query
stringlengths
9
3.4k
document
stringlengths
9
87.4k
metadata
dict
negatives
sequencelengths
4
101
negative_scores
sequencelengths
4
101
document_score
stringlengths
3
10
document_rank
stringclasses
102 values
Initializes indicating root Python module. The application will look for all `Resource` classes defined in the given root module.
def __init__(self, root): self._root = root if not self.get_resources(): raise Exception('Your application has no Resource.')
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __init__(self, basedir=None):\n # ------------------------------------------------------------------------\n super(Resources, self).__init__()\n self.xInitialize(basedir or \"resources\")", "def __init__(self):\n self.modules = {}", "def init(self):\n\n self.loaded = False\n self.exports = NotImplemented\n self.exception = None\n self.namespace = self.create_namespace()\n self.namespace.__file__ = str(self.filename)\n self.namespace.module = self\n self.namespace.require = self.require", "def __init__(self, rootPath=None):\n self.rootPath = rootPath or '.'", "def init_root(path_to_ecore: str) -> None:\n logger.debug(\"Initializing mm_root from %s\", path_to_ecore)\n\n global mm_root\n mm_root = ResourceSet().get_resource(URI(path_to_ecore)).contents[0]", "def initialize_api(app, api):\n api.init_app(app=app) # Initialize api first\n _resources = getattr(app, \"api_registry\", None)\n if _resources and isinstance(_resources, (list, tuple,)):\n for cls, args, kwargs in _resources:\n api.add_resource(cls, *args, **kwargs)", "def init(app):\n from sirepo import feature_config\n from sirepo import simulation_db\n\n if _uri_to_route:\n return\n global _app\n _app = app\n for n in _REQUIRED_MODULES + feature_config.cfg.api_modules:\n register_api_module(importlib.import_module('sirepo.' + n))\n _init_uris(app, simulation_db)", "def init_rest(app_):\n\n rest_api = Api(app_)\n rest_api.add_resource(views.rest_resources.AppListResource,\n ActiveConfig.REST_URL_APPS_LIST,\n ActiveConfig.REST_URL_APPS_LIST + '/')\n rest_api.add_resource(views.rest_resources.AppResource,\n ActiveConfig.REST_URL_APPS_ITEM,\n ActiveConfig.REST_URL_APPS,\n ActiveConfig.REST_URL_APPS + '/')", "def register_root(cls):\n if RegisteredType._reg['root_class'] is None:\n\n del RegisteredType._reg\n RegisteredType._reg = {\n 'classes' : { 'classid_key' : 'type'},\n 'autoid' : 0,\n 'classids' : { 'type' : 'classid_key' },\n }\n RegisteredType._reg['root_class'] = cls \n cls.register_class()", "def __init__(self, root):\n FileHelper.ALL_PATHS = [os.path.join(dp, f) for dp, dn, filenames in os.walk(root) for f in filenames if os.path.splitext(f)[1] in Enums.App.VALID_FILE_TYPES]", "def static_init(cls):\n for path in sys.path:\n if os.path.isdir(path + \"/support_diagnostics\"):\n ImportModules.base_directory = path + \"/support_diagnostics\"", "def __init__(self, root, api, symlink_resource):\n assert root and isinstance(root, config_types.Path)\n self._root = root\n self._api = api\n self._resource = symlink_resource\n # dict[Path]list(Path): Maps target to a list of linknames.\n self._link_map = {}", "def root(self):\n return Resource()", "def initialize(self) -> typing.NoReturn:\n\t\tfor root, dirs, files in os.walk(INPUT_DIRECTORY, topdown=False):\n\t\t\tfor fileName in files:\n\t\t\t\tif fileName.endswith('.py'):\n\t\t\t\t\tself.moduleNameSet.add(os.path.join(root, fileName))", "def __setup_modules(self, config, db, rcontext):\n DEPTH_ROOT = 0\n DEPTH_TYPE = 1\n DEPTH_SUBTYPE = 2\n\n for root, sub_folders, files in os.walk(\"modules\"):\n nicepath = os.path.relpath(root, \"modules\")\n fullpath = root\n\n if nicepath == '.':\n depth = DEPTH_ROOT\n else:\n depth = nicepath.count(os.path.sep) + 1\n\n if depth > DEPTH_SUBTYPE:\n warnings.warn(\"sub-subdirectory in module (%s) \\\n ignored.\" % nicepath)\n\n modulenamebase = nicepath.replace(os.path.sep, '.')\n mimetype = nicepath.replace(os.path.sep, '/')\n\n if depth != DEPTH_ROOT:\n # Each folder should except root have an __init__.py,\n # otherwise the directory name be assigned as a module.\n if not \"__init__.py\" in files:\n warnings.warn(\"__init__.py not found in \\\n module folder '%s'.\" % nicepath)\n continue\n\n modulepath = fullpath + os.path.sep + \"__init__.py\"\n module = Module(modulepath, modulenamebase, mimetype)\n self.modules.append(module)\n\n # Now load each handler .py file\n for file in files:\n modulenameend, extension = os.path.splitext(file)\n if extension.lower() == \".py\":\n is_init = file == \"__init__.py\"\n modulepath = fullpath + os.path.sep + file\n modulename = None\n if is_init:\n modulename = modulenamebase\n elif depth == DEPTH_ROOT:\n modulename = modulenameend\n else:\n modulename = modulenamebase + '.' + modulenameend\n\n module = Module(modulepath, modulename, mimetype,\n is_global=(depth == DEPTH_ROOT),\n as_mime_handler=not is_init)\n if module.is_mime_handler and not rcontext.is_recursive:\n db.setup_module_table(module.md5_tablename,\n module.columndefinition)\n\n self.modules.append(module)", "def __init__(self,\n root: Path = None,\n resources_dir: Path = None,\n slave_configuration_path : Path = None,\n binaries_dir : Path = None,\n wrapper_win64 : Path = None,\n wrapper_linux64: Path = None,\n main_script_path : Path = None,\n model_description: Path = None,\n model_description_path : Path = None,\n main_script: Path = None,\n main_class : Path = None,\n pyfmu_dir : Path = None\n ):\n self.model_description = model_description\n\n self.main_script = main_script\n self.main_class = main_class\n self.slave_configuration = None\n\n # paths\n self.root = root\n self.resources_dir = resources_dir\n self.slave_configuration_path = slave_configuration_path\n self.main_script_path = main_script_path\n self.model_description_path = model_description_path\n self.binaries_dir = binaries_dir\n self.wrapper_win64 = wrapper_win64\n self.wrapper_linux64 = wrapper_linux64\n self.pyfmu_dir = pyfmu_dir", "def _load_modules(self):\n moduledocs = self._docset.get_compounds(xml.Group,\n lambda x: x.get_name().startswith('module_'))\n for moduledoc in moduledocs:\n moduleobj = self._modules.get(moduledoc.get_name())\n if not moduleobj:\n self._reporter.input_error(\n \"no matching directory for module: {0}\".format(moduledoc))\n continue\n moduleobj.set_doc_xml(moduledoc, self)\n self._docmap[moduledoc] = moduleobj", "def _setup_modules(self):\r\n module_registry = AppModule.module_registry()\r\n for bundle in topological_sort(AppModule.module_dependencies()):\r\n for module_label in bundle:\r\n assert module_label in module_registry\r\n module = module_registry[module_label]\r\n self._debug_log('Initializing: %s (%s)' % (module.label(), module.description()))\r\n try:\r\n module.setup_function()\r\n except AppModule.Unimplemented:\r\n pass\r\n self._init_modules.append(module.label())", "def _load_resources(self):\n puts = (getattr(self, 'project', None) or self).puts\n for resource_type, resource_cls in six.iteritems(AVAILABLE_RESOURCES):\n for name in self.settings.get(resource_type, {}):\n extra = {\n 'project': getattr(self, 'project', None) or self,\n 'app': self if hasattr(self, 'project') else None,\n }\n\n with indent(4 if hasattr(self, 'project') else 2):\n puts(colored.green(u\"✓ {}:{}\".format(resource_type, name)))\n\n self._resources[resource_type].append(\n resource_cls.factory(\n name=name,\n settings=self.settings.get(resource_type, {})[name],\n **extra\n )\n )", "def initialize(self, module_name):\n # Load.\n self._initialize(module_name)", "def __init__(__self__, *,\n root: str):\n pulumi.set(__self__, \"root\", root)", "def load_resource_map():\n # to avoid a circular dependency\n from coinbase_commerce.api_resources.base import APIResource\n global RESOURCE_MAP\n RESOURCE_MAP = {k.RESOURCE_NAME: k for k in APIResource.get_subclasses()\n if getattr(k, \"RESOURCE_NAME\", None)}", "def __init__(self):\n super(Modules, self).__init__()\n \n global superclasses\n superclasses['universe'] = []\n superclasses['actions'] = ['universe']\n superclasses['booleans'] = ['universe']\n\n global instances\n instances['universe'] = set()\n instances['actions'] = set()\n instances['booleans'] = set()", "def _createModuleObj(self):\n ModuleInitialCondition.__init__(self)", "def test_init(self):\n\n class TestResource(BaseResource):\n\n name = 'test_resource'\n\n def process(self, message):\n pass\n\n api = Mock()\n api.endpoint = 'http://an_endpoint'\n route = '/a_route'\n TestResource.init(api, route)\n\n # validate the attribute values of the class\n self.assertEqual(api, TestResource.api)\n self.assertEqual(route, TestResource.route)\n self.assertEqual(api.mongodb, TestResource.mongodb)\n self.assertEqual(api.conf, TestResource.conf)\n self.assertEqual('http://an_endpoint/a_route', TestResource.endpoint)\n self.assertEqual('test_resource', TestResource.logger.name)", "def __init__(self, root):\n self.root = root", "def __init__(self, root):\n self.root = root", "def __init__(self, root=None):\n self.set_root(root)", "def _sub_init(self):\n self._find_mod(\"init\", match_only=True)", "def __init__(self, root):\n self.root = root\n self.app = Home(root, self)", "def init_application(app, config):\n app.config.from_object(config)\n\n api = Api(app)\n api.add_resource(Build, config.WSPATH)\n api.add_resource(Request, config.WSPATH + '/<request_id>')\n api.add_resource(Result, config.WSPATH + '/<request_id>/result')\n api.add_resource(Image, config.WSPATH + '/<request_id>/result/image')\n api.add_resource(Output, config.WSPATH + '/<request_id>/result/output/<int:output_id>')\n api.add_resource(Log, config.WSPATH + '/<request_id>/result/log')\n\n AgroLogHandler(app).init()\n app.logger.info(\"Flask Application initialized\")", "def init(self):\n\n # create directories\n self.createDirectory(\"src\")\n list_directories_name = {\n \"Models\": True,\n \"DTOs\": True,\n \"Repositories\": True,\n \"Services\": True,\n \"Mappeurs\": True,\n \"Ressources\": True,\n \"Logs\": False,\n \"Parsers\": False,\n \"docker\": False,\n \"Enums\": False,\n \"Config\": False,\n }\n\n for directory_name in list_directories_name:\n self.createDirectory(\"src/\" + directory_name)\n\n self.createDirectory(\"src/Logs\")\n open(\"src/Logs/debug.log\", \"w\").close()\n open(\"src/Logs/info.log\", \"w\").close()\n open(\"src/Logs/error.log\", \"w\").close()\n\n # test directories\n self.createDirectory(\"Tests\")\n for directory_name in list_directories_name.keys():\n if list_directories_name[directory_name]:\n self.createDirectory(\"Tests/\" + directory_name)\n\n # helpers Test\n path = self.getPathFileInStatic(\"helpersTest.py\")\n shutil.copy(path, \"Tests/helpersTest.py\")\n\n # Security config\n path = self.getPathFileInStatic(\"security.py\")\n shutil.copy(path, \"src/Config/SecurityConfig.py\")\n\n # Logger\n path = self.getPathFileInStatic(\"logger.py\")\n shutil.copy(path, \"src/Config/Logger.py\")\n\n self.createDirectory(\"Tests/Mocks\")\n\n self.writeAppFile()\n\n path = self.getPathFileInStatic(\"config.py\")\n shutil.copy(path, \"src/Config/ApplicationConfig.py\")\n self.info(\"[x] create config.py\")\n # shutil.copy(getPathFileInStatic(\"__init__.py\"), \"src/__init__.py\")\n # info(\"[x] create __init__.py\")\n path = self.getPathFileInStatic(\"server.py\")\n shutil.copy(path, \"server.py\")\n self.info(\"[x] create server.py\")\n path = self.getPathFileInStatic(\"docker-compose.test.yml\")\n shutil.copy(path, \"src/docker/docker-compose.test.yml\")\n self.info(\"[x] create docker-compose.test.yml\")", "def __init__(self, request):\n super(Root, self).__init__(None, None, request=request)", "def init_controllers(self):\n if self.controllers == None:\n return\n controllers_namespace = self.__namespace + \".controllers\" # TODO: allow customize this\n try:\n controllers_package = import_module(controllers_namespace)\n except:\n return None\n\n from ron import Application\n controllers_modules = self._get_package_modules(controllers_package)\n for controller_name in controllers_modules:\n imported_controller = import_module('.' + controller_name, package=controllers_namespace)\n for i in dir(imported_controller):\n attribute = getattr(imported_controller, i)\n if inspect.isclass(attribute) and issubclass(attribute, Controller):\n controller_class = attribute(self)\n self.controllers[controllers_namespace+'.'+controller_name] = controller_class\n Application().controllers[controllers_namespace+'.'+controller_name] = controller_class", "def __init__(self, file_root):\n self.root = file_root", "def __init__(self, root_path):\n self._root = root_path\n if not os.path.exists(self._root):\n os.makedirs(self._root)", "def __init__(self):\n self._events = {}\n self.modules = {}", "def __init__(self, config=None, parent=None, catchall=True, autojson=True):\n\n Bottle.__init__(self, catchall, autojson)\n RonObject.__init__(self, config=config)\n\n if not self.view:\n self.view = View(config = {'module':self})\n\n if isinstance(config, dict):\n self.__namespace = self._get_module_namespace()\n if not self.base_path:\n self.__package = import_module(self.__namespace)\n self.base_path = os.path.dirname(self.__package.__file__)\n self.parent = parent\n\n self.load_components()", "def test_module_initialization(self):\n m = Module('foo')\n assert str(m) == 'foo'\n\n m = Module('foo.bar')\n assert str(m) == 'foo.bar'\n\n m = Module('foo.bar.qux')\n assert str(m) == 'foo.bar.qux'", "def __init__(self, root_path):\r\n self.root_path = root_path\r\n if not os.path.exists(root_path):\r\n os.makedirs(root_path)", "def initialize_routes(api):\n api.add_resource(WatchlistsApi, '/api/watchlists')\n api.add_resource(WatchlistApi, '/api/watchlist/<id>')\n api.add_resource(RegisterUserApi, '/api/auth/register')\n api.add_resource(LoginUserApi, '/api/auth/login')\n api.add_resource(ResetPassword, '/api/auth/reset')\n api.add_resource(ResetFogottenPassword, '/api/auth/reset/password')\n api.add_resource(ForgotPassword, '/api/auth/forgot')\n api.add_resource(ForgotPasswordReset, '/reset/password/<token>')\n api.add_resource(Home, '/')\n api.add_resource(Logout, '/logout')\n api.add_resource(Dashboard, '/dashboard')\n api.add_resource(DashboardSearch, '/dashboard/search')\n api.add_resource(SearchMovies, '/search/movies/<title>')\n api.add_resource(SearchMovieDetails, '/search/movie/details/<id>')\n api.add_resource(SearchTvShows, '/search/shows/<title>')\n api.add_resource(SearchShowDetails, '/search/show/details/<id>')\n api.add_resource(SearchTrendingMovies, '/search/trending/movies')\n api.add_resource(Recommend, '/recommend')", "def __init__(self, realm):\r\n Resource.__init__(self)\r\n\r\n verifyObject(IMasterRealm, realm)\r\n self._realm = realm", "def load(self):\n self.suite.load()\n self.resource_map = {}\n dirlist = os.listdir(self.resources)\n for resource_name in (name for name in dirlist\n if os.path.isfile(os.path.join(self.resources,name)) and\n os.path.splitext(name)[1].lower() == '.fbr'):\n try:\n f = open(os.path.join(self.resources,resource_name),'rU')\n expr = f.read()\n d = eval(expr)\n resource_id = os.path.splitext(resource_name)[0].lower()\n d['id'] = resource_id\n kind = d['kind']\n del d['kind']\n self.resource_map[resource_id] = Resource.create(kind,**d)\n finally:\n f.close()", "def __init__(self, path: Union[str, Path], package_root: Optional[Union[str, Path]] = None) -> None:\n if not path:\n raise ValueError(\"Empty path.\")\n path = Path(path).resolve()\n\n self.package_root = Path(package_root or '.').resolve()\n if not self.package_root.exists():\n raise ValueError(f\"Package root folder {package_root} does not exist.\")\n\n if not self.package_root.is_dir():\n raise ValueError(f\"Package root '{package_root}' is not a folder.\")\n\n if not (path.samefile(self.package_root) or str(self.package_root) in str(path.resolve())):\n raise ValueError(f\"Path '{path}' is not located in the package root '{package_root}'.\")\n\n self.file_path: Path = get_python_file_path(path)\n self.full_module_name = get_module_from_path(self.file_path, self.package_root)\n self.imports: List[ast.Import] = []\n self.importsFrom: List[ast.ImportFrom] = []\n self.modules: List[str] = []\n\n ast_file = ast.parse(self.file_path.read_text())\n self.visit(ast_file)\n self.add_module_parents()", "def __init__(self, paths=sys.path):\n self.paths = paths\n self.module = PythonModuleList()\n for path in paths:\n self.module.addModule( PythonModuleOnDisk(path) )\n\n self.moduleResolver = SimpleModuleResolver(self.module)\n\n self.needed_modules = {}", "def init_app(self):\n self.app.config.setdefault('MACL_DEFINITION', None)\n self.app.config.setdefault('MACL_CLASS', None)\n self.app.config.setdefault('MACL_ERROR_MESSAGE',\n 'You do not have access to this resource')\n\n self.app.miracle_acl_manager = self\n\n self.load_acl()", "def init():\n if not _module_init():\n _pypm.Initialize()\n _module_init(True)\n atexit.register(quit)", "def init(self):\n\n raise DevException(messages.module.error_init_method_required)", "def __init__(self, applicationRootDirs: java.util.Collection):\n ...", "def webinit():\n\troot = Root()\n\troot.player = Player()\n\troot.songs = Songs()\n\troot.albums = Albums()\n\troot.artists = Artists()\n\t\n\tapp = cherrypy.tree.mount(root, '/', 'data/cherrypy.config')\n\treturn app", "def __init__(self, resource_path):\n self.resource_path = resource_path", "def setup(base_path, root_module_name=\"caliper\"):\n if sys.modules.has_key(root_module_name):\n return\n _create_module_and_parents(root_module_name)\n imp.load_package(root_module_name, base_path)\n\n # allow locally installed third party packages to be found.\n sys.path.insert(0, os.path.join(base_path, \"site_packages\"))", "def __init__(self, root_path):\n self.root_path = root_path.replace('\\\\', '/') \n self.workspace_directory = self.root_path + '/workspaces'\n self.resource_directory = self.root_path + '/resources'\n \n self.log_directory = self.root_path + '/log'\n self.log_id = 'event_handler'\n \n self.include_status = ['editable', 'readable']\n self.all_status = ['editable', 'readable', 'deleted']\n \n # Add logger\n core.add_log(log_id=self.log_id, \n log_directory=self.log_directory, \n log_level='DEBUG', \n on_screen=True, \n prefix='main')\n \n # Test main logger\n self._logger = core.get_log(self.log_id)\n self._logger.debug('Start EventHandler: {}'.format(self.log_id))\n self._logger.debug('')\n self._logger.info('TEST info logger')\n self._logger.warning('TEST warning logger')\n self._logger.error('TEST error logger')\n self._logger.debug('TEST debug logger')\n \n self.workspaces = {}\n \n # Mapping objects\n self.mapping_objects = {}\n self.mapping_objects['water_body'] = core.WaterBody(file_path=self.root_path + '/resources/mappings/water_body_match.txt')\n self.mapping_objects['quality_element'] = core.QualityElement(file_path=self.root_path + '/resources/Quality_Elements.cfg')\n self.mapping_objects['display_mapping'] = core.ParameterMapping()\n self.mapping_objects['display_mapping'].load_mapping_settings(file_path=self.root_path + '/resources/mappings/display_mapping.txt')\n \n # Initiate uuid_mapping file for user if not present", "def get_root(self, *args, **kwargs):\n return self._resources_manager.get_root(*args, **kwargs)", "def init_collections(self):\n # Default paths to spec files relative to specs folder.\n self.specs = {\n 'BASE': '',\n 'variables': 'variables.xml',\n 'conditions': 'conditions.xml',\n 'dynamicvariables': 'dynamic_variables.xml',\n 'dynamic_variables': 'dynamic_variables.xml',\n 'resources': 'resources.xml',\n 'panels': 'panels.xml',\n 'packs': 'packs.xml',\n 'install': 'install.xml',\n 'ProcessPanel.Spec': 'ProcessPanel.Spec.xml',\n 'core-packs': 'packs.xml'\n }\n\n # Default paths to resource files relative to specs folder.\n self.resources = {\n 'BASE': '',\n 'userInputSpec': 'userInputSpec.xml',\n 'strings': 'CustomLangPack.xml'\n }\n\n self.langpacks = {}", "def initialize_namespace(name, objects=None, root_class=None, suffix=None):\n\n if root_class:\n base = collect_subclasses(root_class, suffix)\n else:\n base = {}\n\n if objects:\n base.update(objects)\n _namespaces[name] = base\n return base", "def get_init():\n return _module_init()", "def load_all_resources():\n\n # Load the fonts\n ResourcesManager._load_font(\"Munro.ttf\")\n\n # Load images\n ResourcesManager.HIBER_NATION_IMG = ResourcesManager._load_image(\"hiber_nation.png\")\n ResourcesManager.SHIP_IMG = ResourcesManager._load_image(\"ship.png\")\n ResourcesManager.MISSILE_IMG = ResourcesManager._load_image(\"missile.png\")\n\n # Load sounds\n # ResourcesManager.MENU_MUSIC = ResourcesManager._load_sound(\"menu.ogg\")", "def root(self, relative_dir):\n # load module provider\n provider_module = load(self.__module__)\n # get relative module path to package root\n relative_module_path = modularize(relative_dir)\n self.package.module_root = self.__module__[\n 0 : self.__module__.find(relative_module_path) + len(relative_module_path)\n ]\n module_root_path = as_filepath(self.package.module_root)\n self.package.abs_root = provider_module.__file__[\n 0 : provider_module.__file__.find(module_root_path) + len(module_root_path)\n ]\n return self", "def on_libRoot(self):\n self.rf_libTree()\n self.rf_libPath()\n self.rf_libFileName()\n self.rf_delInfo()", "def setUp(self):\n self.modules = {}", "def init():\n\n # Run the Installer's `init` function.\n Installer.init()", "def __init__(self, trestle_root: pathlib.Path, import_: prof.Import) -> None:\n self._trestle_root = trestle_root\n self._import = import_", "def __init__(self, name, config):\n Submodule.__init__(self, name, config)", "def init_app(app):\n api.add_namespace(ns)\n app.register_blueprint(bp, url_prefix='/api/v1')", "def Init(self):\n # First iteration over all the files in root searching for symlinks and\n # non-regular files.\n seen_inodes = {}\n for basepath, _, filenames in sorted(os.walk(self._root)):\n for filename in sorted(filenames):\n full_path = os.path.join(basepath, filename)\n rel_path = full_path[len(self._root):]\n st = os.lstat(full_path)\n\n file_data = {\n 'size': st.st_size,\n }\n self._files[rel_path] = file_data\n\n # Track symlinks.\n if stat.S_ISLNK(st.st_mode):\n link_path = os.readlink(full_path)\n # lddtree's normpath handles a little more cases than the os.path\n # version. In particular, it handles the '//' case.\n self._symlinks[rel_path] = (\n link_path.lstrip('/') if link_path and link_path[0] == '/' else\n lddtree.normpath(os.path.join(os.path.dirname(rel_path),\n link_path)))\n file_data['deps'] = {\n 'symlink': [self._symlinks[rel_path]]\n }\n\n # Track hardlinks.\n if st.st_ino in seen_inodes:\n self._hardlinks[rel_path] = seen_inodes[st.st_ino]\n continue\n seen_inodes[st.st_ino] = rel_path", "def __init__(self):\n if DynamicImporter._instance is not None:\n raise Exception(\"DynamicImporter instance already exists!\")\n DynamicImporter._instance = self\n\n current_path = Path(__file__).parent\n test_path = current_path / \"testdata\"\n files = test_path.rglob(\"*.py\")\n\n for file in files:\n\n if file.name in [\"__init__.py\", \"test_module.py\", \"test_registry.py\", \"connections.py\"]:\n continue\n\n name = file.stem\n module = import_module(f\"testdata.{name}\")\n class_title = f\"{name.title()}Test\"\n\n try:\n _class = getattr(module, class_title) # get the class\n self.class_list[class_title] = _class # add the class to the class list\n except AttributeError: # don't throw exceptions for files that don't have a test\n continue", "def scan_morepath_modules(cls: type[morepath.App]) -> None:\n for module in sorted(morepath_modules(cls)):\n morepath.scan(import_module(module))", "def __call__(self, spec, isfile=False):\n if spec.startswith('pkg:') or spec.startswith('mod:'):\n rsc = Resource.frommodule(spec[4:], isfile)\n elif spec.startswith('dir:'):\n rsc = Resource(spec[4:], self._config.ROOT, isfile)\n else:\n try:\n rsc = Resource.frommodule(spec, isfile)\n except (IOError, ImportError):\n rsc = Resource(spec, self._config.ROOT, isfile)\n return rsc", "def __init__(self, stream):\n try:\n self._root = os.path.split(stream.name)[0]\n except AttributeError:\n self._root = os.getcwd()\n super(ImportLoader, self).__init__(stream)", "def test_init_no_children(\n self,\n fx_deployments: YamlLoaderDeployment,\n mocker: MockerFixture,\n runway_context: MockRunwayContext,\n ) -> None:\n mock_async = mocker.patch.object(Module, \"_Module__async\")\n mock_sync = mocker.patch.object(Module, \"_Module__sync\")\n mock_run = mocker.patch.object(Module, \"run\")\n mod = Module(\n context=runway_context,\n definition=fx_deployments.load(\"min_required\").modules[0],\n )\n assert mod.init()\n mock_run.assert_called_once_with(\"init\")\n mock_async.assert_not_called()\n mock_sync.assert_not_called()", "def __init__(self, app, root):\n self.app = app\n self.root = root\n self.config = app.config()\n self._synth_editors = {} # Active synth editors. synth SID used as key", "def initialize(self, application):", "def rootMobileResource(self) -> BasicResource:\n return self.__rootMobileResource", "def __init__(self):\n thisType = type(self)\n if not thisType._initialized:\n thisType._initialized = True\n self._embedded_device_registry = {}\n self._root_device_registry = {}\n self._service_registry = {}\n self._scan_for_device_extensions_under_code_container(dynamic_extensions)\n self._scan_for_device_extensions_under_code_container(standard_extensions)\n self._scan_for_service_extensions_under_code_container(dynamic_extensions)\n self._scan_for_service_extensions_under_code_container(standard_extensions)\n return", "def mount(class_, root):\n # XXX class_ - in successors own _ready (?)\n if class_._ready:\n return\n else:\n p = VRoot(root).hpath(SchModule.DIR)\n SchModule.DIR = VRoot(p)\n class_._ready = True", "def get_resources():\r\n global __res\r\n if __res == None:\r\n __init_resources()\r\n return __res", "def get_resources():\r\n global __res\r\n if __res == None:\r\n __init_resources()\r\n return __res", "def get_resources():\r\n global __res\r\n if __res == None:\r\n __init_resources()\r\n return __res", "def get_resources():\n global __res\n if __res == None:\n __init_resources()\n return __res", "def get_resources():\n global __res\n if __res == None:\n __init_resources()\n return __res", "def get_resources():\n global __res\n if __res == None:\n __init_resources()\n return __res", "def pre_config_root_create(self, resource_dict):\n pass", "def __init__(self, specs, resources, properties=None):\n if not properties:\n properties = {}\n self.init_collections()\n self.properties = properties\n self.set_paths(specs, resources)\n self.parse_paths()\n self.find_resources()", "def __init__(self, doc_root):\n self.doc_root = doc_root\n site = yaml_load('data/site.yaml')['site']\n self.site = site", "def __init__(self, handler=None, root=None, extra_configs=None):\n self.settings = {}\n self.rw_settings = {}\n self.root = root\n self.scope = rw.scope.Scope()\n self.scope['app'] = self\n self.extra_configs = extra_configs\n if self.root:\n self.handler = handler if handler is not None else RequestHandler\n self.scope['settings'] = rw.cfg.read_configs(self.root.name,\n self.extra_configs)\n\n pkgs = self.scope['settings'].get('rw.templates', {}).get('pkgs', None)\n if not pkgs:\n pkgs = [root.name]\n\n self.scope['template_env'] = rw.template.create_template_env(pkgs)\n self.scope['template_env'].globals['app'] = self\n else:\n self.handler = handler\n self.scope['settings'] = {}\n assert handler is not None\n\n self._wsgi = False # wsgi is not supported\n # compatibility so we can mount tornado RequestHandlers\n self.ui_modules = {}\n self.ui_methods = {}\n rw.server.PHASE_CONFIGURATION.add(self.configure)", "def __init__(self):\n try:\n # Use pkg_resources to support Windows and Unix file paths\n # and find relative module path for file\n file_to_open = resource_string(__name__, self.FILE)\n self.errors = json.loads(file_to_open)\n\n except ResolutionError as e:\n print(e)\n self.errors = dict()", "def _initialize_all():\n registry.clear_checkers()\n registry.clear_contexts()\n cli._register_internal_plugins.has_run = ( # pylint: disable=protected-access\n False\n )\n cli._register_internal_plugins() # pylint: disable=protected-access", "def __init__(self, abspath, relpath, parent):\n self._abspath = abspath\n self._relpath = relpath\n self._name = os.path.basename(abspath)\n self._parent = parent\n self._rawdoc = None\n self._module = None\n self._is_test_dir = False\n if parent and parent.is_test_directory() or \\\n self._name == 'tests':\n self._is_test_dir = True\n self._is_external = False\n if parent and parent.is_external() or self._name == 'external':\n self._is_external = True\n self._subdirs = set()\n if parent:\n parent._subdirs.add(self)\n self._files = set()\n self._has_installed_files = None", "def morepath_modules(cls: type[morepath.App]) -> 'Iterator[str]':\n for base in cls.__mro__:\n if not issubclass(base, morepath.App):\n continue\n\n if base is morepath.App:\n continue\n\n module = '.'.join(base.__module__.split('.')[:2])\n\n if module.startswith('test'):\n continue\n\n yield module", "def __init__(self, root, branches=None):\n self.tree_dict = {}\n self.directory = Path(root)\n self.start = str(self.directory).rfind(os.sep) + 1\n self.branches = branches\n self.get()", "def __gitSubmodulesInit(self):\n self.vcs.gitSubmoduleInit(self.project.getProjectPath())", "def initialization(self):\n super().initialization()\n self.register_uniq_ids([self.id_modal, self.id_modal_close, self.id_wip_button])\n\n # Register modules\n self.modules = [self.mod_table, self.mod_cache, self.mod_upload]", "def _create_module(self, rootdir):\n name = 'module_' + rootdir.get_name()\n moduleobj = Module(name, rootdir)\n rootdir.set_module(moduleobj)\n self._modules[name] = moduleobj", "def initialize_app(app):\n # configure_app(app)\n # log.info(\"> Starting development server at http://%s/api/ <<<<<\" %\n # app.config[\"SERVER_NAME\"])\n\n blueprint_api = Blueprint('api', __name__, url_prefix='/api')\n api.init_app(blueprint_api)\n app.register_blueprint(blueprint_api)\n\n api.add_namespace(task_namespace)\n api.add_namespace(chain_namespace)\n\n Bootstrap(app)\n nav.init_app(app)\n app.register_blueprint(frontend_blueprint)\n app.register_blueprint(processors_blueprint)\n app.register_blueprint(chains_blueprint)\n app.register_blueprint(tasks_blueprint)\n app.register_blueprint(compare_blueprint)\n\n db.init_app(app)\n db.create_all(app=app)\n\n if not os.path.exists(app.config[\"OCRD_BUTLER_RESULTS\"]):\n os.makedirs(app.config[\"OCRD_BUTLER_RESULTS\"])", "def _prepare(self):\n\n # Set configuration defaults and save to the project document\n self.config.setdefault('PAGINATION', True)\n self.config.setdefault('PER_PAGE', 25)\n\n # Create and configure the Flask application\n self.app = self._create_app(self.config)\n\n # Add assets and routes\n self.assets = self._create_assets()\n self._register_routes()\n\n # Add module assets and routes\n self._module_assets = []\n for module in self.modules:\n try:\n module.register(self)\n except Exception as e:\n logger.error('Error while registering {} module: {}'.format(\n module.name, e))\n logger.error('Removing module {} from dashboard.'.format(\n module.name))\n self.modules.remove(module)\n\n # Clear dashboard and project caches.\n self.update_cache()", "def __init__(self, *args, **kwargs):\n super(ResourceFieldType, self).__init__(*args, **kwargs)\n\n resource = self.get_field_info_key('resource')\n\n if isinstance(resource, str):\n module, attr = resource.rsplit('.', 1)\n\n try:\n resource = getattr(import_module(module), attr)\n except (AttributeError, ImportError) as e:\n raise ImportError('Unable to load resource \"%s\": %s'\n % (resource, e))\n\n self.resource = resource", "def __init__(self, config_root):\n if not isinstance(TvbProfile.current, MATLABLibraryProfile):\n config_file_name = TvbProfile.current.LOGGER_CONFIG_FILE_NAME\n package = __import__(config_root, globals(), locals(), ['__init__'], 0)\n package_path = package.__path__[0]\n # Specify logging configuration file for current package.\n logging.config.fileConfig(os.path.join(package_path, config_file_name), disable_existing_loggers=False)\n else:\n logging.basicConfig(level=logging.DEBUG)\n self._loggers = weakref.WeakValueDictionary()", "def init():", "def find_packages(cls, chroot, log=None):\n base = os.path.join(chroot.path(), cls.SOURCE_ROOT)\n packages, namespace_packages = set(), set()\n resources = defaultdict(set)\n\n def iter_files():\n for root, _, files in safe_walk(base):\n module = os.path.relpath(root, base).replace(os.path.sep, '.')\n for filename in files:\n yield module, filename, os.path.join(root, filename)\n\n # establish packages, namespace packages in first pass\n for module, filename, real_filename in iter_files():\n if filename != '__init__.py':\n continue\n packages.add(module)\n if cls.declares_namespace_package(real_filename):\n namespace_packages.add(module)\n\n # second pass establishes non-source content (resources)\n for module, filename, real_filename in iter_files():\n if filename.endswith('.py'):\n if module not in packages:\n # TODO(wickman) Consider changing this to a full-on error as it\n # could indicate bad BUILD hygiene.\n # raise cls.UndefinedSource('%s is source but does not belong to a package!' % filename)\n if log:\n log.warn('%s is source but does not belong to a package.' % real_filename)\n else:\n continue\n submodule = cls.nearest_subpackage(module, packages)\n if submodule == module:\n resources[submodule].add(filename)\n else:\n assert module.startswith(submodule + '.')\n relative_module = module[len(submodule) + 1:]\n relative_filename = os.path.join(relative_module.replace('.', os.path.sep), filename)\n resources[submodule].add(relative_filename)\n\n return packages, namespace_packages, resources" ]
[ "0.6172543", "0.6130026", "0.58406955", "0.5820272", "0.5808664", "0.56877095", "0.5673514", "0.5664736", "0.5635875", "0.561636", "0.5615964", "0.559234", "0.5569267", "0.5522192", "0.5514438", "0.5474632", "0.54634255", "0.54632753", "0.5462733", "0.54501873", "0.5443123", "0.5411119", "0.5410135", "0.54009956", "0.536563", "0.5362878", "0.5362878", "0.53381944", "0.533463", "0.5330706", "0.53303945", "0.53294206", "0.5305333", "0.52954894", "0.5294248", "0.52882767", "0.5286567", "0.5285464", "0.5282438", "0.52616745", "0.5261363", "0.52438635", "0.5241392", "0.52405643", "0.52284807", "0.5209803", "0.5193848", "0.51868147", "0.5183952", "0.5178032", "0.5163693", "0.5162466", "0.5156346", "0.5155051", "0.51452214", "0.5141485", "0.51250005", "0.51189005", "0.5117076", "0.5117071", "0.51138234", "0.5108147", "0.51022106", "0.50968415", "0.5095639", "0.50874776", "0.50871664", "0.5082836", "0.507964", "0.50766915", "0.5072325", "0.5059384", "0.50515807", "0.5042892", "0.5035161", "0.5031436", "0.5026414", "0.5026414", "0.5026414", "0.50262535", "0.50262535", "0.50262535", "0.5019535", "0.5019085", "0.5018496", "0.50131917", "0.49928814", "0.49905455", "0.49803263", "0.49786696", "0.49780345", "0.49702847", "0.4965012", "0.49647132", "0.49603242", "0.49601963", "0.4959832", "0.495619", "0.49522877", "0.49504638" ]
0.7221802
0
Starts WSGI application flow.
def __call__(self, environ, start_response): middleware = Middleware(environ, start_response) middleware.application = self return middleware
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def main():\r\n run_wsgi_app(app)", "def main():\n run_wsgi_app(APP)", "def start():\n from paste.deploy import loadapp, loadserver\n from moksha.config.environment import load_environment\n from moksha.config.middleware import make_app\n ini = 'config:' + path('development.ini').abspath()\n wsgi_app = loadapp(ini)\n serve = loadserver(ini)\n serve(wsgi_app)", "def startapp():", "def Main():\n wsgiref.handlers.CGIHandler().run(application)", "def start():\n app.run()", "def start(self) -> None:\n if self.bolt_app.logger.level > logging.INFO:\n print(get_boot_message())\n else:\n self.bolt_app.logger.info(get_boot_message())\n\n web.run_app(self.web_app, host=\"0.0.0.0\", port=self.port)", "def main():\r\n LOG.info('Starting server build.')\r\n web.run_app(init_app(),\r\n host=os.environ.get('APP_HOST', CONFIG.registry.get('app_host', '0.0.0.0')),\r\n port=int(os.environ.get('APP_PORT', CONFIG.registry.get('app_port', 8080))),\r\n shutdown_timeout=0,\r\n ssl_context=application_security())", "def main():\n try:\n port = 8080\n ip = '0.0.0.0'\n http_server = WSGIServer((ip, port),\n app,\n log=logging,\n error_log=logging,\n )\n print(\"Server started at: {0}:{1}\".format(ip, port))\n http_server.serve_forever()\n except Exception as exc:\n logger.error(exc.message)\n logger.exception(traceback.format_exc())\n finally:\n # Do something here\n pass", "def run():\n\n # Construct a server.\n server = wsgiref.simple_server.make_server(\n _config[ 'address' ],\n _config[ 'port' ],\n application\n )\n\n # Run the server.\n server.serve_forever()\n\n # Return result.\n return 0", "def app(environ, start_response):\n status = '200 OK'\n response_headers = [('Content-Type', 'text/plain')]\n start_response(status, response_headers)\n return ['Hello world from a simple WSGI application!\\n']", "def start() -> None:\n from app import app\n app.run(debug = True, host = HOST, port = PORT)", "def start(self):\n\n self.app = Application()\n self.app._loop = self.loop\n self.add_routes()\n self.app.run(port=int(self.port),\n worker_num=None,\n reload=False,\n debug=False)\n # GZip support\n # Compress(self.app)\n # self.app.config['COMPRESS_MIMETYPES'] = {'text/html',\n # 'application/json'}\n # self.app.config['COMPRESS_LEVEL'] = 4\n # self.app.config['COMPRESS_MIN_SIZE'] = 300\n # Session support\n # self.session_interface = InMemorySessionInterface()\n # self.app.response_middleware.appendleft(self.save_session)\n # self.app.request_middleware.append(self.add_session_to_request)\n\n # self.add_routes()\n # return await self.app.create_server(loop=self.loop,\n # host='0.0.0.0',\n # port=self.port,\n # debug=False)", "def main() -> None:\n config = get_config()\n app = Application()\n web_config = config[\"web\"]\n webapp = WebApp(config)\n webapp.attach_to(app)\n\n run_config = keep(web_config, {\"host\", \"port\"})\n run_app(app, **run_config)", "def serve() -> None:\n uvicorn.run(\n \"bartender.web.application:get_app\",\n workers=settings.workers_count,\n host=settings.host,\n port=settings.port,\n reload=settings.reload,\n log_level=settings.log_level,\n factory=True,\n )", "def main():\n cfg.CONF(sys.argv[1:], project='blazar', prog='blazar-api')\n notifier.init()\n service_utils.prepare_service(sys.argv)\n if not CONF.enable_v1_api:\n app = v2_app.make_app()\n else:\n app = wsgi_app.VersionSelectorApplication()\n\n wsgi.server(eventlet.listen((CONF.host, CONF.port), backlog=500), app)", "def main(_, **settings):\n config = Configurator(settings=settings)\n register_includes(config)\n register_json_renderer(config)\n register_routes(config)\n\n config.scan()\n return config.make_wsgi_app()", "def Run(self):\n self.BuildWebAppSite()\n\n self.BuildRPCSite(self.env.umpire_cli_port, self.methods_for_cli, '0.0.0.0')\n self.BuildRPCSite(self.env.umpire_rpc_port, self.methods_for_dut)\n\n # Start services.\n reactor.callWhenRunning(self.OnStart)\n # And start reactor loop.\n reactor.run()", "def webserver_start():\n run(_webserver_command())", "def run():\n app.run()", "def run():\n app = Application()\n #app.sentry_client = AsyncSentryClient(app.settings['sentry_url'])\n http_server = HTTPServer(app, xheaders=True)\n http_server.listen(options.port)\n print('Running on port %d' % options.port)", "def start(self):\n\n self.app.go()", "def main():\n # Debug is enabled by default, can be disabled by environment variable\n debug = not os.environ.get(\"NO_DEBUG\", False)\n if debug:\n # Workaround for the werkzeug reloader removing the current directory\n # from the path. It's nasty, but it works! Inspired by:\n # https://github.com/mitsuhiko/flask/issues/1246\n os.environ[\"PYTHONPATH\"] = os.getcwd()\n # Enable PTVSD in werkzeug watched processes only\n # if \"WERKZEUG_RUN_MAIN\" in os.environ:\n # ptvsd.enable_attach()\n # Run the Flask app\n APP.run(host=\"0.0.0.0\", port=8000)", "def main(config=None):\n init = InitRepoPath(config)\n\n listen_address, port = init.get_listen_address()\n\n backend = DictBackend(init.get_backends())\n \n app = make_wsgi_chain(backend)\n server = make_server(listen_address, port, app,\n handler_class=WSGIRequestHandlerLogger,\n server_class=WSGIServerLogger)\n logger.info('Listening for HTTP connections on %s:%d',\n listen_address, port)\n server.serve_forever()", "def run(self):\n self.app.run()", "def run(self):\n self.app.run()", "def init_app():\r\n LOG.info('Initialising web server.')\r\n app = web.Application(middlewares=[api_key()])\r\n app.router.add_routes(routes)\r\n set_cors(app)\r\n app.on_startup.append(init_db)\r\n app.on_cleanup.append(close_db)\r\n return app", "def start(name, path):\n app.start(name, path)", "def start():\n _with_deploy_env(['./bin/paster serve src/remix/oerpub/rhaptoslabs/production.ini --daemon'])", "def run():\n\n @werkzeug.serving.run_with_reloader\n def runDebugServer():\n try:\n app.debug = True\n dapp = DebuggedApplication(app, evalex=True)\n appsocket.serve_forever()\n\n except Exception, err:\n app.logger.error(\"Error\", exc_info=True)\n\n runDebugServer()", "def start():\n # Import any local level utilities that may be used\n # before the web-server is initialized.\n from django.core.management import call_command\n from db.models import ApplicationState\n from db.utilities import generate_models\n\n # Run the migrate command within django.\n # Making sure our models are upto date.\n call_command(command_name=\"migrate\", app=\"titandash\")\n\n # Server is being started, it is safe for us\n # to update our active flag.\n ApplicationState.objects.set(state=True)\n\n # Generate any initial models that we expect\n # to be available by default.\n generate_models()\n\n _url = EEL_DASHBOARD if User.objects.valid() else EEL_LOGIN\n\n logger.info(\"starting titandash application with options: '{options}'\".format(options={\"path\": _url, **EEL_START_OPTIONS}))\n # Start eel, providing our start url defined above, the close callback\n # to deal with cleanup functionality, and default options.\n eel.start(_url, close_callback=close_callback, **EEL_START_OPTIONS)", "def start(self):\n self.serve_forever()", "def start(self):\n self.serve_forever()", "def application(environ, start_response, app=[]):\n if not app:\n app.append(make_application())\n return app[0](environ, start_response)", "def start_server(self):\n app.run(host=str(self.__constants.host),\n port=int(self.__constants.port),\n debug=bool(self.__constants.runindebug))", "def main():\n print(\"def main\")\n return APP.run()", "def start(self):\n run(self.app, host=self.host, port=self.port, server=AsyncServer,\n quiet=True, debug=False)", "def bootstrap_wsgi():\n return get_wsgi_application()", "def main():\n\n apps = [\n 'fires', 'hw6',\n 'imageapp',\n 'quixote_demo',\n 'quotes',\n 'chat',\n 'cookie'\n ]\n parser = argparse.ArgumentParser(\n description='A WSGI Server implemented for CSE491-001.',\n epilog='Please check the non-existent documentation for more info.',\n formatter_class=argparse.RawTextHelpFormatter\n )\n # Add the '-?' alias for '--help', which I prefer to use:\n parser.add_argument('-?',\n action='help',\n help='Alias for --help')\n # Add the application argument:\n parser.add_argument('--app',\n nargs='?',\n dest='app',\n default='fires',\n choices=apps,\n help='\\n'.join([\n 'Which WSGI application to run.',\n '(default: \"%(default)s\" - my homework 6)',\n 'Alias: -A'\n ]))\n parser.add_argument('-A',\n nargs='?',\n dest='app',\n default='fires',\n choices=apps,\n help=argparse.SUPPRESS)\n # Add the port argument:\n parser.add_argument('--port',\n nargs='?',\n default=random.randint(8000, 9999),\n type=int,\n help='\\n'.join([\n 'Which port to start the server on.',\n '(default: random integer between 8000 and 9999)',\n 'Alias: -p'\n ]))\n # After that, parse the command-line arguments.\n args = parser.parse_args()\n\n # Create a socket object\n sock = socket.socket()\n # Get local machine name\n host = socket.getfqdn()\n\n if host in ('magrathea', 'Thoth'):\n # For testing, I don't want to have to change my url all the damn time.\n port = 8080\n else:\n port = args.port\n # Bind to the port\n # TODO figure out how to immediately unbind when I'm done\n sock.bind((host, port))\n print 'Starting server at http://%s:%d/' % (host, port)\n # Now wait for client connection.\n sock.listen(5)\n\n # get this from commandline\n app_to_run = args.app\n if app_to_run == 'quixote_demo':\n # quixote stuff for testing with that\n p = create_publisher()\n # p.is_thread_safe = True # hack...\n wsgi_app = quixote.get_wsgi_app()\n elif app_to_run == 'imageapp':\n imageapp.setup()\n p = imageapp.create_publisher()\n wsgi_app = quixote.get_wsgi_app()\n elif app_to_run == 'quotes':\n wsgi_app = QuotesApp('./quotes/quotes.txt', './quotes/html')\n elif app_to_run == 'chat':\n wsgi_app = ChatApp('./chat/html')\n elif app_to_run == 'cookie':\n wsgi_app = cookieapp.wsgi_app\n else: #if app_to_run == 'fires': # default\n wsgi_app = app.make_app()\n\n\n print 'Entering infinite loop; hit CTRL-C to exit'\n try:\n while True:\n # Establish connection with client.\n conn, (client_host, client_port) = sock.accept()\n print 'Got connection from', client_host, client_port\n handle_connection(conn, wsgi_app)\n finally:\n # teardown stuffs\n if app_to_run == 'imageapp':\n imageapp.teardown()\n sock.shutdown(2)\n sock.close()", "def main():\n try:\n http_server = WSGIServer(('0.0.0.0', 8080),\n app,\n log=logging,\n error_log=logging)\n\n http_server.serve_forever()\n except Exception as exc:\n logger.error(exc.message)\n logger.exception(traceback.format_exc())\n finally:\n # get last entry and insert build appended if not completed\n # Do something here\n pass", "def __call__(self, environ, start_response):\n return self.app(environ, start_response)", "def run():\n register_component(\"press\")\n run_app(host=\"0.0.0.0\", port=8080, debug=True, workers=os.cpu_count())", "def run_server():\n app = init_app()\n app.run(host=app.config['HOST'], port=app.config['PORT'])", "def run(self):\n server = CherryPyWSGIServer(\n (self.options['host'], int(self.options['port'])),\n WSGIPathInfoDispatcher({\n '/': WSGIHandler(),\n settings.ADMIN_MEDIA_PREFIX: MediaHandler(\n os.path.join(admin.__path__[0], 'media'))\n }),\n int(self.options['threads']), self.options['host'],\n request_queue_size=int(self.options['request_queue_size']))\n try:\n server.start()\n except KeyboardInterrupt:\n server.stop()", "def app(environ: t.Dict, start_response):\n # Print the request object details in environ.items()\n for k, v in environ.items():\n print(k, v)\n\n # Let's capture the request path\n path = environ.get(\"PATH_INFO\")\n\n # Handle our different routes. Render different templates.\n # Allow user to add \"/\" or not to URL string\n # NOTE: Don't use elif statement! It skips 'data' assignment!\n if path.endswith(\"/\"):\n path = path[:-1] # remove the trailing \"/\"\n if path == \"\": # the root / index\n data = home(environ)\n elif path == \"/contact\":\n data = contact_us(environ)\n elif path == \"/box-office\":\n data = read_box_office_data(environ)\n else:\n data = render_template(template_name=\"404.html\", context={\"path\": path})\n\n # Encode data to BYTE string\n data = data.encode(\"utf-8\")\n\n # Gunicorn's start_response to get a response going\n start_response(\n f\"200 OK\",\n [(\"Content-Type\", \"text/html\"), (\"Content-Length\", str(len(data)))],\n # You can remove these headers and the browser will still parse it.\n # Modern browsers are smart enough to infer how to parse the request\n )\n # Where does this print to? Server logs I bet... YES!\n # print(f\"{data=}\\n{iter([data])}\")\n return iter([data]) # <list_iterator object at 0x10f9f1340>", "async def init_app():\n app = web.Application()\n\n # And... here our routes\n app.router.add_route(\n \"POST\", f\"/{ASTERISK_CALL_APP_ROUTE_ASTERISK_INIT}\", asterisk_init\n )\n app.router.add_route(\"POST\", f\"/{ASTERISK_CALL_APP_ROUTE_PLAY}\", asterisk_play)\n return app", "def main():\n import sys\n FILES.extend(sys.argv[1:])\n app.debug = True\n app.run(port=5001, threaded=False)", "def main(global_config, **settings):\n config = Configurator(settings=settings)\n\n init_includes(config)\n init_routing(config)\n init_db(config)\n return config.make_wsgi_app()", "def start_app(self, name, stateless):\n raise NotImplementedError", "def main():\n app.run(debug=True)", "def main():\n conn = pymongo.MongoClient(settings.DB_URI)\n database = conn[settings.DB_NAME]\n\n application = tornado.web.Application(\n [\n (r\"/\", BaseHandler),\n (r\"/upload\", UploadHandler),\n (r\"/web/([^/]+)\", WebHandler),\n ],\n database=database, secret=settings.SECRET, debug=settings.DEBUG, gzip=True,\n template_path=settings.TEMPLATE_PATH,\n static_path=settings.STATIC_PATH\n )\n application.cache = {}\n\n logging.info(\"starting bbgps...\")\n application.listen(settings.PORT)\n tornado.ioloop.IOLoop.instance().start()", "def run():\n import argparse\n\n parser = argparse.ArgumentParser(description='Phovea Server')\n parser.add_argument('--use_reloader', action='store_true', help='whether to automatically reload the server')\n parser.add_argument('--env', default=cc.get('env'), help='environment mode (dev or prod)')\n\n # parse before to enable correct plugin discovery\n args = parser.parse_known_args()[0]\n if args.env.startswith('dev'):\n enable_dev_mode()\n else:\n enable_prod_mode()\n\n # resolve the default command to decide which application to launch\n default_command = _resolve_commands(parser)\n if default_command is not None:\n # set a default subparse to extract the defined arguments from the instance to the main arguments (?)\n set_default_subparser(parser, default_command)\n\n args = parser.parse_args()\n\n _set_runtime_infos(args)\n\n main = args.launcher(args) # execute the launcher function, which returns another function\n\n if args.use_reloader:\n _log.info('start application using reloader...')\n run_with_reloader(main, extra_files=_config_files())\n else:\n _log.info('start application...')\n main()", "def app():\n return create_app()", "def __call__(self, environ, start_response):\n self.preprocess(environ)\n return self.app(environ, start_response)", "def start(self) -> None:\n app = web.Application()\n app.add_routes([web.post(\"/\", self._handle_request)])\n self._runner = web.AppRunner(app)\n\n self._startup_event = threading.Event()\n self._server_loop = asyncio.new_event_loop()\n t = threading.Thread(target=self._run)\n t.start()\n\n # Wait for server to startup\n self._startup_event.wait()", "def start(cls):\n\n logger.info(\"reading config\")\n env = cs.HostingEnvironment()\n env.start_metrics_if_enabled()\n\n if env.user_script_name:\n Server._download_user_module(env)\n\n logger.info('loading framework-specific dependencies')\n framework = cs.ContainerEnvironment.load_framework()\n framework.load_dependencies()\n\n nginx_pid = 0\n gunicorn_bind_address = '0.0.0.0:8080'\n if env.use_nginx:\n logger.info(\"starting nginx\")\n nginx_conf = pkg_resources.resource_filename('container_support', 'etc/nginx.conf')\n subprocess.check_call(['ln', '-sf', '/dev/stdout', '/var/log/nginx/access.log'])\n subprocess.check_call(['ln', '-sf', '/dev/stderr', '/var/log/nginx/error.log'])\n gunicorn_bind_address = 'unix:/tmp/gunicorn.sock'\n nginx_pid = subprocess.Popen(['nginx', '-c', nginx_conf]).pid\n\n logger.info(\"starting gunicorn\")\n gunicorn_pid = subprocess.Popen([\"gunicorn\",\n \"--timeout\", str(env.model_server_timeout),\n \"-k\", \"gevent\",\n \"-b\", gunicorn_bind_address,\n \"--worker-connections\", str(1000 * env.model_server_workers),\n \"-w\", str(env.model_server_workers),\n \"container_support.wsgi:app\"]).pid\n\n signal.signal(signal.SIGTERM, lambda a, b: Server._sigterm_handler(nginx_pid, gunicorn_pid))\n\n children = set([nginx_pid, gunicorn_pid]) if nginx_pid else gunicorn_pid\n logger.info(\"inference server started. waiting on processes: %s\" % children)\n\n while True:\n pid, _ = os.wait()\n if pid in children:\n break\n\n Server._sigterm_handler(nginx_pid, gunicorn_pid)", "def start(**kwargs):\n # Project\n\n CustomWSGI(\n app=\"stats.api.main:api\",\n options={\n \"worker_class\": \"uvicorn.workers.UvicornWorker\",\n \"preload\": True,\n \"keepalive\": 10,\n \"command\": shutil.which(\"gunicorn\"),\n \"bind\": \":\".join(\n (format_listen_address(params.listen_address), str(params.listen_port))\n ),\n \"workers\": workers,\n \"loglevel\": loglevel,\n \"accesslog\": \"-\",\n \"errorlog\": \"-\",\n # \"logconfig_dict\": {\"formatters\": {\"generic\": {\"format\": \"%(message)s\"}}},\n **kwargs,\n },\n ).run()", "def main(global_config, **settings):\n LOGGER.info('= main :: settings = %s', settings)\n\n config = Configurator(settings=settings)\n\n # Home\n config.add_route('home', '/')\n\n # Lastly, we scan the config and make the app\n # config.scan()\n\n return config.make_wsgi_app()", "def server():\n package('apache2')\n require_started('apache2')", "def run(urls,\n requestType=WSGIRequest,\n unknown_handler=handle_404,\n middleware=(),\n error_handler=production_error_handler, args=sys.argv):\n app = create_application(urls, requestType=requestType, middleware=middleware, error_handler=error_handler)\n if '-d' in args:\n # run as a daemon\n from wsgiref.simple_server import make_server\n httpd = make_server('', 1234, app)\n httpd.serve_forever()\n else:\n from flup.server.fcgi import WSGIServer\n return WSGIServer(app, multiplexed=True).run()", "def main():\n app = App()\n app.run()", "def main():\n options = lib.main.parse_args()\n\n #Initialize all the loggings with the options specified.\n lib.main.logs_initialization(options)\n logging.debug(\"Logs are now enabled and working\")\n\n #Update the main config file with the app information.\n logging.debug(\"Updating parameters on config files\")\n lib.config.update_params()\n\n # Finally, when all the initialization schedule is completed, Flask\n # will start.\n logging.debug(\"Calling Flask initializator function\")\n api.start(options[\"debug\"])", "def dev_start():\r\n nginx_reload()\r\n djangoserver_start()", "def start():\n server = current_server()\n logger.info('Starting Flexx event loop.')\n server.start()", "def app_factory():\n app = web.Application()\n app.add_routes([\n web.get('/ping', handle_ping),\n ])\n return app", "def create_and_run():\n\n app = App()\n app.run()", "def application( environ, start_response ):\n\n # Set the default request handler.\n handler = _handlers[ None ]\n\n # Get request path.\n path = environ.get( 'PATH_INFO', '' )\n\n # Expand request information for handlers.\n environ[ 'request.path' ] = path.lstrip( '/' ).split( '/' )\n environ[ 'request.query' ] = urllib.parse.parse_qs(\n environ[ 'QUERY_STRING' ]\n )\n\n # Handler is specified.\n if environ[ 'request.path' ] and ( environ[ 'request.path' ][ 0 ] != '' ):\n\n # See if a handler is available.\n handler = _handlers.get(\n environ[ 'request.path' ][ 0 ],\n _handlers[ '?' ]\n )\n\n # Delegate to the request handler.\n status, headers, content = handler( environ )\n\n # Define default headers.\n default_headers = {\n 'Content-Type' : 'text/html',\n 'Content-Length' : str( len( content ) ),\n }\n\n # Merge headers from handler.\n handler_headers = dict( headers )\n default_headers.update( handler_headers )\n merged_headers = [ ( k, v ) for k, v in default_headers.items() ]\n\n # Set the status string.\n status_string = '{} {}'.format( status, http_responses[ status ] )\n\n # Start the response.\n start_response( status_string, merged_headers )\n\n # Produce the content.\n if isinstance( content, bytes ):\n yield content\n else:\n yield bytes( content, 'utf-8' )", "def main(global_config, **settings):\n #import pdb; pdb.set_trace()\n config = Configurator(settings=settings)\n\n # logging config for pserve / wsgi\n if settings and 'logging_config_file' in settings:\n from pyramid.paster import setup_logging\n setup_logging(settings['logging_config_file'])\n\n from . import views\n config.include(views.do_view_config)\n config.scan('pelias.adapter.pyramid')\n\n # CORS -- might not make this call in production (eliminate a bit of overheads, as CORS is handled by Apache)\n if settings and settings.get('enable_cors_headers') == 'true':\n config.add_subscriber(app_utils.add_cors_headers_response_callback, NewRequest)\n\n return config.make_wsgi_app()", "def run():\n app.run(debug=True, port=5001)", "def start(self):\n #url = '{}://{}:{}/'.format('http',\n # self.ip,\n # self.port)\n #self.service_info = ServiceInfo(\n # '_webthing._sub._http._tcp.local.',\n # '{}._http._tcp.local.'.format(self.name),\n # address=socket.inet_aton(self.ip),\n # port=self.port,\n # properties={\n # 'url': url,\n # },\n # server='{}.local.'.format(socket.gethostname()))\n #self.zeroconf = Zeroconf()\n #self.zeroconf.register_service(self.service_info)\n\n # If WebSocketS used and NOT running in thread, and WebServer IS\n # running in thread make shure WebServer has enough stack size to\n # handle also the WebSocket requests.\n log.info('Starting Web Server')\n self.server.Start(threaded=srv_run_in_thread, stackSize=8192)", "def start():\r\n\r\n userName = userLogin.login()\r\n runApp(userName)", "def start_web_app(template, context, port=8000):\n from tornado.ioloop import IOLoop\n from jigna.web_app import WebApp\n\n ioloop = IOLoop.instance()\n\n app = WebApp(template=template, context=context)\n app.listen(port)\n\n print 'Starting the web app on port %s ...' % port\n ioloop.start()", "def startup(req=None):\n global started\n if not started:\n started = True\n cherrypy.server.start(init_only=True, server_class=None)\n return 0 # apache.OK", "def main():\n args = utils.parse_arguments()\n logging.basicConfig(level=logging.INFO)\n coloredlogs.install(level=0,\n fmt=\"[%(asctime)s][%(levelname)s] [%(name)s.%(funcName)s:%(lineno)d] %(message)s\",\n isatty=True)\n if args.debug:\n l_level = logging.DEBUG\n else:\n l_level = logging.INFO\n\n logging.getLogger(__package__).setLevel(l_level)\n\n LOG.info('RUNNING TAMAGO WEB')\n serve(app, port=8080, host='0.0.0.0')", "def run_forever(self):\n self.app.run()", "def init(loop):\n tasks = JobsHandler()\n config = ConfigHandler()\n task = TaskHandler()\n\n\n\n app = web.Application(loop = loop)\n app.router.add_route('*', '/tasks/{do_something}', tasks.handle)\n app.router.add_route('*', '/config/{do_something}', config.handle)\n app.router.add_route('*', '/task/{id}/{do_something}', task.handle)\n\n handler = app.make_handler()\n srv = yield from loop.create_server(handler, '0.0.0.0', 8080)\n print(\"Server started at http://0.0.0.0:8080\")\n return srv, handler", "def start_deployment(self):\n return", "def main(global_config, **settings):\n config = Configurator(settings=settings, root_factory=root_factory)\n config.include('substanced')\n config.include('.resources')\n config.scan()\n return config.make_wsgi_app()", "def run(self, handler):\n server = WSGIServer((self.host, self.port), handler)\n server.ssl_adapter = pyOpenSSLAdapter(\n certificate=CERTIFICATE,\n private_key=KEY,\n # certificate_chain=\"intermediate_cert.crt\"\n )\n\n try:\n server.start()\n logger.info(\"Started vROPs Web Service\")\n except Exception as exp:\n server.stop()\n logger.error(\"Exception: {} Stopped vROPs Web Service\".format(exp))", "def runserver():\n app.run(host=config.HOST, port=config.PORT, debug=config.DEBUG, threaded=config.THREADED)", "def make_app():\n return tornado.web.Application([\n tornado.web.URLSpec(r\"/ws/\", WebSocket, name=\"websocket\"),\n tornado.web.URLSpec(r\"/\", StartPage, name='index'),\n (r\"/static/\", tornado.web.StaticFileHandler,\n dict(path=SETTINGS['static_path'])),\n ], **SETTINGS)", "def local_webserver_start():\n if not _is_webserver_running():\n local(_webserver_command())", "def __call__(self, environ, start_response):\n # TODO: Consider supporting multiple applications mounted at root URL.\n # Then, consider providing priority of mounted applications.\n # One application could explicitly override some routes of other.\n script = environ.get('PATH_INFO', '')\n path_info = ''\n while '/' in script:\n if script in self.mounts:\n app = self.mounts[script]\n break\n items = script.split('/')\n script = '/'.join(items[:-1])\n path_info = '/%s%s' % (items[-1], path_info)\n else:\n app = self.mounts.get(script, self.app)\n original_script_name = environ.get('SCRIPT_NAME', '')\n environ['SCRIPT_NAME'] = original_script_name + script\n environ['PATH_INFO'] = path_info\n return app(environ, start_response)", "def application():\n\n configure_app(app)\n yield app", "def main(global_config, **settings):\n config = Configurator(settings=settings)\n config.include(includeme)\n return config.make_wsgi_app()", "def main(args=None):\n app()\n return 0", "def start(self, modulename=None):\n if modulename:\n # Unhook httpserver so cherrypy.server.start() creates a new\n # one (with config from setup_server, if declared).\n cherrypy.server.httpserver = None\n\n cherrypy.engine.start()\n\n self.sync_apps()", "def main():\n tornado.options.parse_command_line()\n ioloop = tornado.ioloop.IOLoop.instance()\n http_server = tornado.httpserver.HTTPServer(App())\n http_server.listen(options.port)\n tornado.autoreload.start()\n ioloop.start()", "def __call__(self, environ, start_response):\n\t\tsegments = get_path_info(environ).strip('/').split('/', 2)\n\t\tif len(segments) < 2:\n\t\t\tapp = hateoas_app\n\t\telse:\n\t\t\tpfx = segments[0] + '/' + segments[1]\n\t\t\tapp = self.instances.get(pfx, NotFound())\n\t\treturn app(environ, start_response)", "def main():\n global APP\n APP = make_app()\n APP.clients = [] # global list of all connected websocket clients\n APP.printer = Serial('/dev/ttyUSB0', baudrate=19200)\n APP.listen('1337', '0.0.0.0')\n log('Listening on http://0.0.0.0:1337')\n tornado.ioloop.IOLoop.current().start()", "def standalone():\n parser = argparse.ArgumentParser(\n description=\"Start Argo Workflow API Dispatch Server\"\n )\n parser.add_argument(\n \"-p\", \"--port\", help=\"Server listening port\", type=int, default=8080\n )\n parser.add_argument(\n \"-b\", \"--bind-address\", help=\"Server bind address\", default=\"127.0.0.1\"\n )\n parser.add_argument(\n \"-m\",\n \"--mock\",\n \"--mock-authentication\",\n action=\"store_true\",\n help=\"Do not require a JWT; mock out authentication\",\n )\n parser.add_argument(\n \"--no-verify-signature\",\n action=\"store_true\",\n help=\"Do not verify JWT signature\",\n )\n parser.add_argument(\n \"--no-verify-audience\",\n action=\"store_true\",\n help=\"Do not verify JWT audience\",\n )\n args = parser.parse_args()\n mock = args.mock\n v_s = True\n v_a = True\n if args.no_verify_signature:\n v_s = False\n if args.no_verify_audience:\n v_a = False\n server = Server(_mock=mock, verify_signature=v_s, verify_audience=v_a)\n httpd = WSGIServer(server.app, host=args.bind_address, port=args.port)\n httpd.start()", "def start_app(soft=False):\r\n global login_manager\r\n from sentry import init as init_sentry\r\n #init_sentry(app)\r\n\r\n\r\n login_manager = setup_application_extensions(app, '/authenticate')\r\n\r\n from flask.ext.sqlalchemy import SQLAlchemy\r\n\r\n from pyhackers.db import set_db, get_db\r\n\r\n set_db(SQLAlchemy(app))\r\n DB = get_db()\r\n from pyhackers.model.cassandra.connection import setup,connect\r\n connect(*setup())\r\n\r\n from pyhackers.model.user import User\r\n\r\n if soft: # When not in web mode\r\n return\r\n\r\n from pyhackers.admin import init as admin_init\r\n from pyhackers.cache import init as cache_init\r\n\r\n #noinspection PyUnusedLocal\r\n @login_manager.user_loader\r\n def load_user(user_id):\r\n logging.warn(\"[USER]Finding user {}\".format(user_id))\r\n try:\r\n return User.query.get(user_id)\r\n except Exception, ex:\r\n logging.exception(ex)\r\n try:\r\n from pyhackers.sentry import sentry_client # OMG\r\n sentry_client.captureException()\r\n finally:\r\n return None\r\n\r\n cache_init(app)\r\n admin_init(app, DB)\r\n\r\n from pyhackers.controllers.main import main_app\r\n from pyhackers.controllers.oauth.ghub import github_bp\r\n from pyhackers.controllers.discuss import discuss_app\r\n from pyhackers.controllers.ajax import ajax_app\r\n\r\n app.register_blueprint(github_bp)\r\n app.register_blueprint(main_app)\r\n app.register_blueprint(discuss_app)\r\n app.register_blueprint(ajax_app)\r\n\r\n @app.route(\"/site-map\")\r\n def site_map():\r\n links = []\r\n for rule in app.url_map.iter_rules():\r\n # Filter out rules we can't navigate to in a browser\r\n # and rules that require parameters\r\n if (\"GET\" in rule.methods or \"POST\" in rule.methods) and rule is not None and len(rule.defaults or []) >= len(rule.arguments or []):\r\n url = url_for(rule.endpoint)\r\n links.append((url, rule.endpoint))\r\n return jsonify({'links': links})\r\n\r\n # from controllers.oauth.twitter import twitter_bp\r\n # app.register_blueprint(twitter_bp)\r", "def init():\n server = Flask(__name__)\n \n return server", "def run():\n REDIRECT = False\n LOG_FILE = \"truss.log\"\n app = App(REDIRECT)\n app.MainLoop()", "def main():\n\n if os.getuid() != 0:\n sys.stderr.write('{} must run as root\\n'.format(sys.argv[0]))\n sys.exit(1)\n\n Path(defaults.BASE_CONFIG_FILES_DIR).mkdir(exist_ok=True)\n\n tornado.options.parse_command_line()\n IOLoop().start(Application(), PORT)", "def main():\n Log.info('Installing...')\n app = Application()\n app.run()\n Log.info(\"Done successfully.\")", "def app(environ, start_response):\n # global counter\n # counter += 1\n # sleeptime = random() * 0.01\n # print(f'[{counter}] going under for {sleeptime}')\n # sleep(sleeptime)\n # print(f'[{counter}] waking up after {sleeptime}')\n data = b'Hello, World!\\n'\n status = '200 OK'\n response_headers = [('Content-type', 'text/plain'),\n ('Content-Length', str(len(data)))]\n start_response(status, response_headers)\n return iter([data])", "async def init_web(manager: ConfigServiceManager):\n base = manager.env.get('WEB_BASE_HREF', '/')\n\n app = web.Application()\n app['base_href'] = base\n app['manager'] = manager\n app['static_root_url'] = base + 'assets'\n app.add_routes(get_standard_routes(app))\n _setup_cors(app)\n app.add_routes(get_custom_routes(app))\n _setup_jinja(manager, app)\n\n if base != '/':\n root_app = web.Application()\n root_app.add_subapp(base, app)\n return root_app\n return app", "def wsgi_app():\n return bottle.default_app()", "def wsgi_app():\n return bottle.default_app()", "def start(context):\n context.run(\"python hellotensorflow/hello.py\")" ]
[ "0.79540753", "0.77699536", "0.72703", "0.7218581", "0.7189519", "0.71686983", "0.7069771", "0.70377684", "0.70257485", "0.6954285", "0.69124424", "0.6888019", "0.6877595", "0.68580186", "0.68424517", "0.677765", "0.66754097", "0.66459525", "0.6637033", "0.6628596", "0.66267157", "0.658793", "0.65370536", "0.65326184", "0.6528207", "0.6528207", "0.65230244", "0.6519794", "0.6508588", "0.6502287", "0.6499413", "0.6487062", "0.6487062", "0.6482029", "0.64816123", "0.6476519", "0.6474443", "0.6466635", "0.6466395", "0.6424214", "0.64162266", "0.6413741", "0.6412578", "0.64003515", "0.6385966", "0.63776743", "0.636751", "0.6364247", "0.636122", "0.6360042", "0.6348287", "0.6325668", "0.6322234", "0.63203275", "0.6311046", "0.63062966", "0.63033634", "0.6303071", "0.62951845", "0.627739", "0.6266388", "0.6248402", "0.6245652", "0.6240103", "0.6215762", "0.62149775", "0.62137085", "0.62097067", "0.6208608", "0.6206526", "0.6205404", "0.62028146", "0.6202535", "0.61989474", "0.6196556", "0.6186008", "0.6150244", "0.613704", "0.613527", "0.61266696", "0.61241966", "0.61231655", "0.61155033", "0.61058414", "0.6102321", "0.6094111", "0.6090523", "0.6067529", "0.6054431", "0.6053099", "0.60456264", "0.603419", "0.60266197", "0.6023101", "0.6014037", "0.60128105", "0.60014933", "0.5992677", "0.5982838", "0.5982838", "0.59776485" ]
0.0
-1
Sinusoid position encoding table
def get_sinusoid_encoding_table(n_position, d_hid, padding_idx=None): def cal_angle(position, hid_idx): return position / np.power(10000, 2 * (hid_idx // 2) / d_hid) def get_posi_angle_vec(position): return [cal_angle(position, hid_j) for hid_j in range(d_hid)] sinusoid_table = np.array( [get_posi_angle_vec(pos_i) for pos_i in range(n_position)] ) sinusoid_table[:, 0::2] = np.sin(sinusoid_table[:, 0::2]) # dim 2i sinusoid_table[:, 1::2] = np.cos(sinusoid_table[:, 1::2]) # dim 2i+1 if padding_idx is not None: # zero vector for padding dimension sinusoid_table[padding_idx] = 0.0 return torch.FloatTensor(sinusoid_table)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _get_sinusoid_encoding_table(self, n_position, d_hid):\n denominator = torch.Tensor([\n 1.0 / np.power(10000, 2 * (hid_j // 2) / d_hid)\n for hid_j in range(d_hid)\n ])\n denominator = denominator.view(1, -1)\n pos_tensor = torch.arange(n_position).unsqueeze(-1).float()\n sinusoid_table = pos_tensor * denominator\n sinusoid_table[:, 0::2] = torch.sin(sinusoid_table[:, 0::2])\n sinusoid_table[:, 1::2] = torch.cos(sinusoid_table[:, 1::2])\n\n return sinusoid_table", "def _get_sinusoid_encoding_table(self, n_position, d_hid):\n denominator = torch.Tensor([\n 1.0 / np.power(10000, 2 * (hid_j // 2) / d_hid)\n for hid_j in range(d_hid)\n ])\n denominator = denominator.view(1, -1)\n pos_tensor = torch.arange(n_position).unsqueeze(-1).float()\n sinusoid_table = pos_tensor * denominator\n sinusoid_table[:, 0::2] = torch.sin(sinusoid_table[:, 0::2])\n sinusoid_table[:, 1::2] = torch.cos(sinusoid_table[:, 1::2])\n\n return sinusoid_table.unsqueeze(0)", "def get_sinusoid_encoding_table(n_position, d_hid, padding_idx=None):\n def cal_angle(position, hid_idx):\n return position / np.power(10000, 2 * (hid_idx // 2) / d_hid)\n\n def get_posi_angle_vec(position):\n return [cal_angle(position, hid_j) for hid_j in range(d_hid)]\n\n sinusoid_table = np.array([get_posi_angle_vec(pos_i) for pos_i in range(n_position)])\n\n sinusoid_table[:, 0::2] = np.sin(sinusoid_table[:, 0::2]) # dim 2i\n sinusoid_table[:, 1::2] = np.cos(sinusoid_table[:, 1::2]) # dim 2i+1\n\n if padding_idx is not None:\n # zero vector for padding dimension\n sinusoid_table[padding_idx] = 0.\n\n return torch.FloatTensor(sinusoid_table)", "def sinusoidal_position_encoding(num_positions, num_channels, position_rate=1.0, position_weight=1.0):\r\n\r\n position_encoding = np.array([\r\n [position_rate * pos / np.power(10000, 2 * (i // 2) / num_channels) for i in range(num_channels)]\r\n if pos != 0 else np.zeros(num_channels) for pos in range(num_positions)])\r\n\r\n position_encoding[:, 0::2] = np.sin(position_encoding[:, 0::2]) # even i\r\n position_encoding[:, 1::2] = np.cos(position_encoding[:, 1::2]) # odd i\r\n\r\n return position_weight * position_encoding.T", "def sine_table(cls, entry):\n exact = 3438 * sin_degrees(entry * angle(0, 225, 0))\n error = 0.215 * signum(exact) * signum(abs(exact) - 1716)\n return iround(exact + error) / 3438", "def _position_encoding_init(max_length, dim):\n position_enc = np.arange(max_length).reshape((-1, 1)) \\\n / (np.power(10000, (2. / dim) * np.arange(dim).reshape((1, -1))))\n position_enc[:, 0::2] = np.sin(position_enc[:, 0::2])\n position_enc[:, 1::2] = np.cos(position_enc[:, 1::2])\n return position_enc", "def get_sine_position_encoding(self, HW, num_pos_feats=64, temperature=10000, normalize=False, scale=None):\n if scale is not None and normalize is False:\n raise ValueError('normalize should be True if scale is passed')\n if scale is None:\n scale = 2 * math.pi\n not_mask = torch.ones([1, HW[0], HW[1]])\n y_embed = not_mask.cumsum(1, dtype=torch.float32)\n x_embed = not_mask.cumsum(2, dtype=torch.float32)\n if normalize:\n eps = 1e-06\n y_embed = y_embed / (y_embed[:, -1:, :] + eps) * scale\n x_embed = x_embed / (x_embed[:, :, -1:] + eps) * scale\n dim_t = torch.arange(num_pos_feats, dtype=torch.float32)\n dim_t = temperature ** (2 * (dim_t // 2) / num_pos_feats)\n pos_x = x_embed[:, :, :, None] / dim_t\n pos_y = y_embed[:, :, :, None] / dim_t\n pos_x = torch.stack((pos_x[:, :, :, 0::2].sin(), pos_x[:, :, :, 1::2].cos()), dim=4).flatten(3)\n pos_y = torch.stack((pos_y[:, :, :, 0::2].sin(), pos_y[:, :, :, 1::2].cos()), dim=4).flatten(3)\n pos_embed = torch.cat((pos_y, pos_x), dim=3).permute(0, 3, 1, 2)\n return pos_embed.flatten(2).permute(0, 2, 1).contiguous()", "def Sinusoidal_Embeddings(positions, d_feature):\n inv_freq = 1 / (10000**(jnp.arange(0.0, d_feature, 2.0) / d_feature))\n sinusoid_freq = jnp.einsum('i,j->ij', positions, inv_freq)\n pos_emb = jnp.concatenate(\n [jnp.sin(sinusoid_freq), jnp.cos(sinusoid_freq)], axis=1)\n return pos_emb", "def encode_pos(i, j):\n return 3 * i + j", "def phi(self, s, x):\n\n s = np.array(s)\n\n # Shift the sequence down\n s[1:] = s[:self.phi_len - 1]\n\n # add the new state to the top\n s[:1] = x\n\n return s", "def position2(t):\n return 98.0 * exp(-t * 2.0 / 13.0) * sin(2 * pi * t)", "def positional_encoding(max_seq_len, dm):\n\n pos = np.arange(max_seq_len)[:, np.newaxis]\n i = 2 * (np.arange(dm)[np.newaxis, :]//2) / np.float32(dm)\n\n pev = pos / np.power(10000, i)\n\n # Applying SIN to odd indices\n pev[:, 0::2] = np.sin(pev[:, 0::2])\n\n # Applying COS to odd indices\n pev[:, 1::2] = np.cos(pev[:, 1::2])\n\n return pev", "def compute_kinoshita(s):\n length = int(NBENDS*LAMBDA/DS) + 1\n x = np.zeros(length)\n y = np.zeros(length)\n cur = np.zeros(length+1)\n theta = THETA0*np.sin(2*np.pi*s/LAMBDA) \\\n + THETA0**3*(JS*np.cos(6*np.pi*s/LAMBDA) \\\n - JF*np.sin(6*np.pi*s/LAMBDA))\n theta[np.abs(theta)<ZERO] = 0\n for i in range(length):\n cossum, sinsum = 0, 0\n for j in range(i):\n cossum += DS*np.cos(theta[j])\n sinsum += DS*np.sin(theta[j])\n x[i] = 0 if np.abs(cossum) < ZERO else cossum\n y[i] = 0 if np.abs(sinsum) < ZERO else sinsum\n x = np.concatenate((x, np.array([x[-1]+x[1]-x[0]])))\n y = np.concatenate((y, np.array([y[-1]+y[1]-y[0]])))\n s = np.concatenate((s, np.array([s[-1]+DS])))\n theta = np.concatenate((theta, np.array([theta[-1]])))\n if FLIPSTRM:\n x = x[::-1]\n y = y[::-1]\n theta = np.concatenate((theta[::-1][1:], np.array([theta[0]])))\n for i in range(1, length):\n cur[i] = (theta[i]-theta[i-1])/DS\n cur[i] = 0 if np.abs(cur[i]) < ZERO else cur[i]\n cur[0], cur[-1] = cur[-2], cur[1]\n return s, x, y, cur, theta", "def scipyTranform(self,s):\n l=len(s)\n wo=2*math.pi/l\n a=2/scipy.integrate.quad(math.cos(n*wo*t),t,-inf,inf)\n b=2/scipy.integrate.quad(math.sin(n*wo*t),t,-inf,inf)", "def _body_coord(self):\r\n cth = np.cos(self.theta)\r\n sth = np.sin(self.theta)\r\n M = self.P - 0.5 * np.diag(self.lengths)\r\n # stores the vector from the center of mass to the nose\r\n c2n = np.array([np.dot(M[self.nose], cth), np.dot(M[self.nose], sth)])\r\n # absolute position of nose\r\n T = -self.pos_cm - c2n - self.goal\r\n # rotating coordinate such that nose is axis-aligned (nose frame)\r\n # (no effect when \\theta_{nose} = 0)\r\n c2n_x = np.array([cth[self.nose], sth[self.nose]])\r\n c2n_y = np.array([-sth[self.nose], cth[self.nose]])\r\n Tcn = np.array([np.sum(T * c2n_x), np.sum(T * c2n_y)])\r\n\r\n # velocity at each joint relative to center of mass velocity\r\n vx = -np.dot(M, sth * self.dtheta)\r\n vy = np.dot(M, cth * self.dtheta)\r\n # velocity at nose (world frame) relative to center of mass velocity\r\n v2n = np.array([vx[self.nose], vy[self.nose]])\r\n # rotating nose velocity to be in nose frame\r\n Vcn = np.array([np.sum((self.v_cm + v2n) * c2n_x),\r\n np.sum((self.v_cm + v2n) * c2n_y)])\r\n # angles should be in [-pi, pi]\r\n ang = np.mod(\r\n self.theta[1:] - self.theta[:-1] + np.pi,\r\n 2 * np.pi) - np.pi\r\n return Tcn, ang, Vcn, self.dtheta", "def SEN_points(ix, iy, w_sen, n_w_sen, t_m, SEN_offset):\n p0 = (ix, iy)\n p1 = (ix - w_sen + n_w_sen / 2 - 3 * SEN_offset / 4, iy)\n p2 = (ix - w_sen + n_w_sen / 2 - 3 * SEN_offset / 4, iy + t_m)\n p3 = (ix - n_w_sen / 2 - SEN_offset / 4, iy + t_m)\n p4 = (ix - n_w_sen / 2 - SEN_offset / 4, iy + 3 * t_m)\n p5 = (ix + w_sen - n_w_sen / 2 + 3 * SEN_offset / 4, iy + 3 * t_m)\n p6 = (ix + w_sen - n_w_sen / 2 + 3 * SEN_offset / 4, iy + 2 * t_m)\n p7 = (ix + n_w_sen / 2 + SEN_offset / 4, iy + 2 * t_m)\n p8 = (ix + n_w_sen / 2 + SEN_offset / 4, iy)\n\n return p0, p1, p2, p3, p4, p5, p6, p7, p8", "def Motion_estimate_inverse(symbols_stream, block_size=16):\n print(\"####\")\n print(symbols_stream[17870:17890])\n data = []\n nb_dct_bloc = height//8*width//8+2*height//16*width//16\n n = symbols_stream.size\n\n block_start_id = 0\n block_end_id = block_start_id+1\n frame_end_id = Motion_estimate_inverse_1frame(symbols_stream,0,\n 'I', block_size)\n \n print(symbols_stream[0:frame_end_id].shape)\n I_tm1 = DCT_inverse(symbols_stream[0:frame_end_id],offset=128)\n data.append(np.array(I_tm1))\n frame_start_id = frame_end_id\n \n while frame_start_id != n:\n # I\n frame_end_id = Motion_estimate_inverse_1frame(symbols_stream,\n frame_start_id,\n 'I', block_size)\n print(symbols_stream[frame_start_id:frame_end_id].shape)\n I_tp1 = DCT_inverse(symbols_stream[frame_start_id:frame_end_id],\n offset=128)\n frame_start_id = frame_end_id\n\n # P\n if frame_start_id != n:\n frame_end_id = Motion_estimate_inverse_1frame(symbols_stream,\n frame_start_id,\n 'P', block_size)\n P = symbols_stream[frame_start_id:frame_end_id]\n print(symbols_stream[frame_start_id:frame_end_id].shape)\n frame_start_id = frame_end_id\n P_frame = Motion_estimate_reverse_1frame(I_tm1,I_tp1,P,block_size)\n data.append(np.array(P_frame))\n\n data.append(np.array(I_tp1))\n\n I_tm1 = np.array(I_tp1)\n\n\n\n return data", "def ion2_position(self,x,y,z):\n axes_vector = np.array([self.a,self.b,self.c])\n self.ion2 = x*self.a + y*self.b + z*self.c\n self.position['1B'] = np.dot(self.position_map[1],axes_vector) + self.ion2\n self.position['2B'] = np.dot(self.position_map[2],axes_vector) + self.ion2\n self.position['3B'] = np.dot(self.position_map[3],axes_vector) + self.ion2\n self.position['4B'] = np.dot(self.position_map[4],axes_vector) + self.ion2\n self.position['5B'] = np.dot(self.position_map[5],axes_vector) + self.ion2\n self.position['6B'] = np.dot(self.position_map[6],axes_vector) + self.ion2\n self.position['7B'] = np.dot(self.position_map[7],axes_vector) + self.ion2\n self.position['8B'] = np.dot(self.position_map[8],axes_vector) + self.ion2", "def S(B, psi, eta):\r\n def S_i(r, n, psi):\r\n \"\"\"Shape function\"\"\"\r\n value = K(r,n)*(psi**r)*(1.-psi)**(n-r)\r\n return value\r\n\r\n # Bersntein Polynomial\r\n def K(r,n):\r\n K=math.factorial(n)/(math.factorial(r)*math.factorial(n-r))\r\n return K\r\n\r\n Ny = len(B)-1\r\n Nx = len(B[0])-1\r\n\r\n output = np.zeros(psi.shape)\r\n for i in range(Nx+1):\r\n for j in range(Ny+1):\r\n output += B[j][i]*S_i(i, Nx, psi)*S_i(j, Ny, eta)\r\n return output", "def _sind(v):\n return math.sin(math.radians(v))", "def positionalEncoding(p_max, d_model, min_freq=1e-4):\n freqs = min_freq**(2.0*(np.arange(d_model)//2)/d_model)\n p = np.arange(p_max)\n matrix = p[:, np.newaxis]*freqs[np.newaxis, :]\n matrix[:,::2] = tf.math.sin(matrix[:,::2])\n matrix[:,1::2] = tf.math.cos(matrix[:,1::2])\n pos_encoding = tf.expand_dims(matrix, 0)\n pos_encoding = tf.cast(pos_encoding, tf.float32)\n return pos_encoding", "def init_m(pos):\n x, y, z = pos\n\n k = 0.1\n nx = k * (x - 0.5)\n\n return (0, np.sin(nx), np.cos(nx))", "def get_stain_matrix(I):", "def encode_positions(self,\n positions: mx.sym.Symbol,\n data: mx.sym.Symbol) -> mx.sym.Symbol:\n pass", "def test_pmt_pos_nt():\r\n pandas.DataFrame(straxen.pmt_positions(False))", "def positional_encoding(position, d_model):\r\n def get_angles(position, i, d_model):\r\n angle_rates = 1 / np.power(10000, (2 * (i // 2)) / np.float32(d_model))\r\n return position * angle_rates # (position, d_model)\r\n\r\n angle_rads = get_angles(position=np.arange(position)[:, np.newaxis],\r\n i=np.arange(d_model)[np.newaxis, :],\r\n d_model=d_model)\r\n\r\n # Apply sin to even indices in the array\r\n angle_rads[:, 0::2] = np.sin(angle_rads[:, 0::2])\r\n\r\n # Apply cos to odd indices in the array\r\n angle_rads[:, 1::2] = np.cos(angle_rads[:, 1::2])\r\n\r\n pos_encoding = angle_rads[np.newaxis, ...]\r\n return tf.cast(pos_encoding, dtype=tf.float32) # (1, position, d_model)\r", "def sign(self,msg,s):\n # $y_s = E_k^{-1} \\left( y_{s+1} \\oplus \\dots E_k^{-1} \\left( y_n \\oplus E_k^{-1} \\left(z\\right)\\right)\\dots\\right) \\oplus E_k \\left( y_{s-1} \\oplus \\dots E_k \\left( y_1 \\oplus v \\right)\\dots\\right)$\n self.permut(msg)\n x,y = [],[]\n for i in range(len(self.kl)):\n if i != s:\n xi = random.randint(0,2**self.l-1)\n x.append(xi)\n y.append(self.trap(xi,self.kl[i].e,self.kl[i].n))\n vi = reduce (lambda vi,i:self.E(i^vi),y[s:],0)\n yc = reduce (lambda yc,i:self.E(i^yc),y[:s],vi)\n x.insert(s,self.trap(yc,self.kl[s].d,self.kl[s].n))\n return itob64(vi) + ' ' + ' '.join('%s'%itob64(xi) for xi in x)", "def _position_encoding_init_BST(max_length, dim):\n # Assume position_enc is the p(vt) - p(vi) fed as input\n position_enc = np.arange(max_length).reshape((-1, 1)) \\\n / (np.power(10000, (2. / dim) * np.arange(dim).reshape((1, -1))))\n return position_enc", "def test_pmt_pos_1t():\r\n pandas.DataFrame(straxen.pmt_positions(True))", "def test_Symmetry_nosym_s2p_map(nacl_unitcell_order1: PhonopyAtoms):\n ph = Phonopy(\n nacl_unitcell_order1,\n supercell_matrix=[2, 2, 2],\n primitive_matrix=\"F\",\n is_symmetry=False,\n )\n # for i, v in enumerate(ph.symmetry.symmetry_operations[\"translations\"]):\n # print(\"[\", \", \".join(f\"{x}\" for x in v), \"],\")\n np.testing.assert_equal(\n ph.symmetry.symmetry_operations[\"translations\"],\n [\n [0.0, 0.0, 0.0],\n [0.5, 0.0, 0.0],\n [0.0, 0.5, 0.0],\n [0.5, 0.5, 0.0],\n [0.0, 0.0, 0.5],\n [0.5, 0.0, 0.5],\n [0.0, 0.5, 0.5],\n [0.5, 0.5, 0.5],\n [0.0, 0.25, 0.25],\n [0.5, 0.25, 0.25],\n [0.0, 0.75, 0.25],\n [0.5, 0.75, 0.25],\n [0.0, 0.25, 0.75],\n [0.5, 0.25, 0.75],\n [0.0, 0.75, 0.75],\n [0.5, 0.75, 0.75],\n [0.25, 0.0, 0.25],\n [0.75, 0.0, 0.25],\n [0.25, 0.5, 0.25],\n [0.75, 0.5, 0.25],\n [0.25, 0.0, 0.75],\n [0.75, 0.0, 0.75],\n [0.25, 0.5, 0.75],\n [0.75, 0.5, 0.75],\n [0.25, 0.25, 0.0],\n [0.75, 0.25, 0.0],\n [0.25, 0.75, 0.0],\n [0.75, 0.75, 0.0],\n [0.25, 0.25, 0.5],\n [0.75, 0.25, 0.5],\n [0.25, 0.75, 0.5],\n [0.75, 0.75, 0.5],\n ],\n )", "def Motion_estimate_inverse_1frame(symbols_stream, frame_start_id, frame_type, block_size):\n n = symbols_stream.size\n frame_end_id = frame_start_id\n\n if frame_type == 'P':\n frame_end_id += width//block_size*height//block_size*3 # motion vectors\n frame_end_id += 2\n for _ in range(nb_dct_bloc):\n frame_end_id += 1\n while frame_end_id < n and symbols_stream[frame_end_id-1] != 0:\n frame_end_id += 2\n return frame_end_id\n\n if frame_type == 'I':\n frame_end_id += 2\n for _ in range(nb_dct_bloc):\n frame_end_id += 1\n while frame_end_id < n and symbols_stream[frame_end_id-1] != 0:\n frame_end_id += 2\n return frame_end_id\n\n else :\n print(\"Motion_estimate_inverse_1frame error : invalide format \"\\\n \"specified.\")", "def ellipse_orientation(S):\n return 1/2 * np.arctan2(S[..., 2], S[..., 1])", "def position(t, x, y):\n return x * exp(-t * y) * sin(2 * pi * t)", "def get_direction_matrix(self) -> int:", "def ion1_position(self,x,y,z):\n axes_vector = np.array([self.a,self.b,self.c])\n self.ion1 = x*self.a + y*self.b + z*self.c\n self.position['1A'] = np.dot(self.position_map[1],axes_vector) + self.ion1\n self.position['2A'] = np.dot(self.position_map[2],axes_vector) + self.ion1\n self.position['3A'] = np.dot(self.position_map[3],axes_vector) + self.ion1\n self.position['4A'] = np.dot(self.position_map[4],axes_vector) + self.ion1\n self.position['5A'] = np.dot(self.position_map[5],axes_vector) + self.ion1\n self.position['6A'] = np.dot(self.position_map[6],axes_vector) + self.ion1\n self.position['7A'] = np.dot(self.position_map[7],axes_vector) + self.ion1\n self.position['8A'] = np.dot(self.position_map[8],axes_vector) + self.ion1", "def ion1_position(self,x,y,z):\n axes_vector = np.array([self.a,self.b,self.c])\n self.ion1 = x*self.a + y*self.b + z*self.c\n self.position['1A'] = np.dot(self.position_map[1],axes_vector) + self.ion1\n self.position['2A'] = np.dot(self.position_map[2],axes_vector) + self.ion1\n self.position['3A'] = np.dot(self.position_map[3],axes_vector) + self.ion1\n self.position['4A'] = np.dot(self.position_map[4],axes_vector) + self.ion1\n self.position['5A'] = np.dot(self.position_map[5],axes_vector) + self.ion1\n self.position['6A'] = np.dot(self.position_map[6],axes_vector) + self.ion1\n self.position['7A'] = np.dot(self.position_map[7],axes_vector) + self.ion1\n self.position['8A'] = np.dot(self.position_map[8],axes_vector) + self.ion1", "def translate_to_algebraic(location):\n\n columns = 'abcdefghi'\n return columns[location[0]] + str(location[1] + 1)", "def __coding_coordinate(self):\n region1 = self.long_side_len\n region2 = self.short_side_len\n length = len(self.seq)\n if self.direction == '+':\n a_s = 0\n a_e = region2\n b_s = self.length - region1\n b_e = self.length - 1\n elif self.direction == '-':\n a_s = 0\n a_e = region1\n b_s = self.length - region2\n b_e = self.length - 1\n return (a_s, a_e, b_s, b_e)", "def table_sncosmo(self):\n t = self.table\n\n zp = 25.0\n mag, magerr = t['magpsf'], t['sigmamagpsf']\n mjd = t['jdobs'] - 2400000.5\n flux = 10.**(-0.4*(mag-zp))\n eflux = flux * 0.4 * np.log(10.) * magerr\n zp = np.zeros(len(flux)) + zp\n\n mask = []\n zpsys = []\n band = []\n peakmag, peakmjd = 99,0.0\n for n,r in enumerate(t) :\n f = (r['instrument'], r['filter'])\n if r['magpsf'] > 90.: \n flux[n] = 0.\n eflux[n] = 10**(-0.4*(r['limmag']-zp[n]))/5.\n\n if f in self.filter_dict.keys():\n band.append(self.filter_dict[f])\n zpsys.append('ab')\n mask.append(True)\n else:\n mask.append(False)\n\n mask = np.array(mask, dtype=bool)\n out = Table(data=[mjd[mask], band, flux[mask], eflux[mask], zp[mask], zpsys],\n names=['mjd', 'band', 'flux', 'fluxerr', 'zp', 'zpsys'])\n out.meta['z'] = self.redshift\n if self.mwebv is not None:\n out.meta['mwebv'] = self.mwebv\n \n return out", "def computePosition(self, state):\n d = 0\n if state[5] == \"East\":\n d = 0\n elif state[5] == \"West\":\n d = 1\n elif state[5] == \"North\":\n d = 2\n else:\n d = 3\n return state[0]*64+state[1]*32+state[2]*16+state[3]*8+state[4]*4+d", "def _position_cylindrical2spherical(pos):\n\n rho=pos[:,0]\n theta_cylindrical=pos[:,1]\n z=pos[:,2]\n\n r=np.sqrt(rho**2+z**2)\n theta_spherical=np.arctan2(rho,z)\n phi=theta_cylindrical\n\n return np.dstack((r,theta_spherical,phi))[0]", "def displacement_table(pos, box, mic=True):\n drij = pos[:, np.newaxis] - pos[np.newaxis]\n if mic:\n disp_in_box(drij, box)\n return drij", "def __init__(self,\n size: int = 0,\n max_len: int = 5000):\n if size % 2 != 0:\n raise ValueError(\"Cannot use sin/cos positional encoding with \"\n \"odd dim (got dim={:d})\".format(size))\n pe = torch.zeros(max_len, size)\n pe.require_grad = False\n\n position = torch.arange(0, max_len).unsqueeze(1)\n div_term = torch.exp((torch.arange(0, size, 2, dtype=torch.float) *\n -(math.log(10000.0) / size)))\n pe[:, 0::2] = torch.sin(position.float() * div_term)\n pe[:, 1::2] = torch.cos(position.float() * div_term)\n pe = pe.unsqueeze(0) # shape: [1, size, max_len]\n super(PositionalEncoding, self).__init__()\n self.register_buffer('pe', pe)\n self.dim = size", "def _generate_uncompressed_position(latitude: float, longitude: float, symbol_table: str,\n symbol_id: str, ambiguity: int = 0) -> str:\n # Encode the latitude\n lat = APRSUtils.encode_uncompressed_latitude(latitude, ambiguity)\n\n # Encode the longitude\n lng = APRSUtils.encode_uncompressed_longitude(longitude, ambiguity)\n\n logger.debug(\"Latitude: {} ({}) Longitude: {}\".format(\n lat, ambiguity, lng\n ))\n\n # Parse the symbol table\n logger.debug(\"Symbol table: {}\".format(symbol_table))\n logger.debug(\"Symbol ID: {}\".format(symbol_id))\n\n info = f\"{lat}{symbol_table}{lng}{symbol_id}\"\n\n return info", "def spot1d_phi(infile, sequence):\n return np.loadtxt(infile, usecols=10, skiprows=1).reshape((1, -1, 1))", "def pos_to_high_freq_pos(pos):\n pos_high_freq = []\n for i in range(1, 11):\n freq = np.pi*2**i\n pos_high_freq.extend([\n sin(freq * pos[0]),\n sin(freq * pos[1]),\n sin(freq * pos[2]),\n ])\n pos_high_freq.extend([\n cos(freq * pos[0]),\n cos(freq * pos[1]),\n cos(freq * pos[2]),\n ])\n return pos_high_freq", "def bomb_vector(self):\n\n\t\tif self.b_offset == 0:\n\t\t\top = sin\n\t\telse:\n\t\t\top = cos\n\n\t\tself.y -= self.speed\n\t\tself.rect.y = self.y\n\t\t# MMMMMMMMMMMMMMMMMMAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAATHS\n\t\tself.x = int((self.g_settings.screen_height/2) + self.amplitude*op(self.frequency*((float(self.y)/self.g_settings.screen_width)*(2*pi) + (self.speed*time()))))\n\t\tif self.b_offset == 0:\n\t\t\tself.rect.x = self.x + self.position_x - 16\n\t\telif self.b_offset == 1:\n\t\t\tself.rect.x = self.x + self.position_x + 16\n\t\tself.screen.blit(self.image, self.rect)", "def getMatrix(self, frame):\n radian=np.radians(self.getValue(frame))\n c=np.cos(radian)\n s=np.sin(radian)\n self.matrix[2, 2]=c\n self.matrix[2, 0]=s\n self.matrix[0, 2]=-s\n self.matrix[0, 0]=c\n return self.matrix", "def position1(t):\n return 100.0 * exp(-t * 1.0 / 10.0) * sin(2 * pi * t)", "def csi(self):\n return self.table[0, 0] / (self.table[0, 0] + self.table[0, 1] + self.table[1, 0])", "def position(self):\n return self.atoms.reshape((1,-1))", "def positional_encoding(seq_len, embed_dim, timescale=10000):\n\n if embed_dim % 2 != 0:\n raise ValueError(\"Embedding dimension must be even\")\n\n positions = jnp.arange(seq_len)\n i = jnp.arange(embed_dim//2)\n angular_frequencies = 1/jnp.power(timescale, 2*i/embed_dim)\n\n angles = jnp.outer(positions, angular_frequencies)\n cosine = jnp.cos(angles) # seq_len, embed_dim // 2\n sine = jnp.sin(angles) # seq_len, embed_dim // 2\n\n pos_enc = jnp.concatenate([cosine, sine], axis=1)\n\n return pos_enc", "def generatePos(self):\n self.pos = np.zeros((self.num_points, 2), dtype='int32')\n self.pos[:, 1] = np.repeat(list(reversed(np.arange(1, self.x*2, 2))), self.y)\n self.pos[:, 0] = np.tile(np.arange(1, self.x*2, 2), self.y)", "def getMatrix(self, frame):\n radian=np.radians(self.getValue(frame))\n c=np.cos(radian)\n s=np.sin(radian)\n self.matrix[1, 1]=c\n self.matrix[1, 2]=s\n self.matrix[2, 1]=-s\n self.matrix[2, 2]=c\n return self.matrix", "def getMatrix(self, frame):\n radian=np.radians(self.getValue(frame))\n c=np.cos(radian)\n s=np.sin(radian)\n self.matrix[0, 0]=c\n self.matrix[0, 1]=s\n self.matrix[1, 0]=-s\n self.matrix[1, 1]=c\n return self.matrix", "def positional_encoding_2d(height, width, d_model):\n half_d = d_model // 2\n angle_x = get_angles(\n np.arange(width)[np.newaxis, :, np.newaxis],\n np.arange(d_model // 4)[np.newaxis, np.newaxis, :], half_d) # (1, width, d_model/4)\n angle_y = get_angles(\n np.arange(height)[:, np.newaxis, np.newaxis],\n np.arange(d_model // 4)[np.newaxis, np.newaxis, :], half_d) # (height, 1, d_model/4)\n\n pos_encoding = np.zeros([height, width, d_model])\n pos_encoding[:, :, 0:half_d:2] = np.sin(angle_x)\n pos_encoding[:, :, 1:half_d:2] = np.cos(angle_x)\n pos_encoding[:, :, half_d::2] = np.sin(angle_y)\n pos_encoding[:, :, half_d + 1::2] = np.cos(angle_y)\n return tf.cast(pos_encoding, dtype=tf.float32)", "def fd_pasym_head(self,pos):\n\t\tvalue = 0.0\n\t\ty = self.y\n\t\t#print('point:%d' % pos)\n\t\tfor j in range(-pos, 0):\n\t\t\t#print( \"\ty[%d] - y[%d] * c[%d][%d] \" % (pos+j, pos, pos, j) )\n\t\t\tvalue = value + ( y[pos+j] - y[pos] ) * self.coef_asym[pos][j]\n\t\tfor j in range(1,self.order - pos+1):\n\t\t\t#print( \"\ty[%d] - y[%d] * c[%d][%d] \" % (pos+j, pos, pos, j) )\n\t\t\tvalue = value + ( y[pos+j] - y[pos] ) * self.coef_asym[pos][j]\n\t\treturn value", "def positional_encoding(max_positions: int, d_model: int) -> tf.Tensor:\n\n def get_angles(pos, i, d_model):\n angle_rates = 1 / np.power(DEFAULT_POSITIONAL_BASE,\n (2 * (i // 2)) / np.float32(d_model))\n return pos * angle_rates\n\n angle_rads = get_angles(\n np.arange(max_positions)[:, np.newaxis],\n np.arange(d_model)[np.newaxis, :], d_model)\n\n # Apply sin to even indices in the array; 2i.\n angle_rads[:, 0::2] = np.sin(angle_rads[:, 0::2])\n\n # Apply cos to odd indices in the array; 2i+1.\n angle_rads[:, 1::2] = np.cos(angle_rads[:, 1::2])\n\n pos_encoding = angle_rads[np.newaxis, ...]\n\n return tf.cast(pos_encoding, dtype=tf.float32)", "def spot_coords(self,spot):\n if spot == '1':\n return (330 - 60 ,335 - 15)\n if spot == '2':\n return (419 - 60, 335 - 15)\n if spot == '3':\n return (591 - 60, 159 - 15)\n if spot == '4':\n return (588 - 60, 248 - 15)", "def position_encoding(sentence_size, embedding_size):\n encoding = np.ones((embedding_size, sentence_size), dtype=np.float32)\n ls = sentence_size+1\n le = embedding_size+1\n for i in range(1, le):\n for j in range(1, ls):\n encoding[i-1, j-1] = (i - (embedding_size+1)/2) * (j - (sentence_size+1)/2)\n encoding = 1 + 4 * encoding / embedding_size / sentence_size\n # Make position encoding of time words identity to avoid modifying them\n encoding[:, -1] = 1.0\n return np.transpose(encoding)", "def _get_movements_8n():\n s2 = math.sqrt(2)\n return [(1, 0, 1.0),\n (0, 1, 1.0),\n (-1, 0, 1.0),\n (0, -1, 1.0),\n (1, 1, s2),\n (-1, 1, s2),\n (-1, -1, s2),\n (1, -1, s2)]", "def sequence_to_zhuyin(self, sequence, add_eos=False, add_sos=False):\n\t\tindex_sequence = [self.zhuyin_ind['SOS']] if add_sos else []\n\n\t\tfor char in self.split_sequence(sequence):\n\t\t\tch = pinyin(char, style=Style.BOPOMOFO)[0][0][0]\n\t\t\tif ch not in self.zhuyin_table:\n\t\t\t\tindex_sequence.append((self.zhuyin_ind['UNK']))\n\t\t\telse:\n\t\t\t\tindex_sequence.append(self.zhuyin_ind[ch])\n\n\t\tif add_eos:\n\t\t\tindex_sequence.append(self.zhuyin_ind['EOS'])\n\n\t\treturn index_sequence", "def create_discrete_mapping_2dpos_to_4dsensation(self):\n\n # scan all possible positions (in [0,1]**2)\n coordinates = np.meshgrid(np.arange(0, 1, 1/self.environment_size[0]),\n np.arange(0, 1, 1/self.environment_size[1]))\n\n # create the pos2sensation_mapping\n pos2sensation_mapping = np.full((len(coordinates[0][0]), len(coordinates[0][1]), self.n_sensations), np.nan)\n for i in range(self.n_sensations):\n\n # draw random parameters (and ensure every even parameter is not too small)\n params = 4 * np.random.rand(12) - 2\n params[::2] = [0.25 * np.sign(val) if np.abs(val) < 0.25 else val for val in params[::2]]\n\n # generate the i-th sensation for all positions\n pos2sensation_mapping[:, :, i] \\\n = 1 / params[0] * np.cos(2 * np.pi * (np.round(params[0]) * coordinates[0] + params[1])) \\\n + 1 / params[2] * np.cos(2 * np.pi * (np.round(params[2]) * coordinates[0] + params[3])) \\\n + 1 / params[4] * np.cos(2 * np.pi * (np.round(params[4]) * coordinates[0] + params[5])) \\\n + 1 / params[6] * np.cos(2 * np.pi * (np.round(params[6]) * coordinates[1] + params[7])) \\\n + 1 / params[8] * np.cos(2 * np.pi * (np.round(params[8]) * coordinates[1] + params[9])) \\\n + 1 / params[10] * np.cos(2 * np.pi * (np.round(params[10]) * coordinates[1] + params[11]))\n\n return pos2sensation_mapping", "def BeamPosition():\n \n XPOS, YPOS = [], []\n\n x=0\n for j in range(0,6,1):\n x += 0.1\n y=0\n for k in range(0,6,1):\n y += 0.2\n XPOS.append(x)\n YPOS.append(y)\n\n return XPOS, YPOS", "def _random_spherical_position(u):\n n = u.size\n nhalf = n // 2\n cos_t = 2 * u[:nhalf] - 1\n phi = 2 * np.pi * u[nhalf:]\n\n sin_t = np.sqrt((1.0 - cos_t * cos_t))\n\n x = sin_t * np.cos(phi)\n y = sin_t * np.sin(phi)\n z = cos_t\n\n return x, y, z", "def fib_sphere_grid(npoints):\n\n phi = (1.0 + np.sqrt(5.0)) / 2.0\n\n i = np.arange(npoints, dtype=float)\n i2 = 2*i - (npoints-1)\n theta = (2.0*np.pi * i2/phi) % (2.*np.pi)\n sphi = i2/npoints\n phi = np.arccos(sphi)\n return theta, phi", "def sign(self,msg,s):\n # $y_s = E_k^{-1} \\left( y_{s+1} \\oplus \\dots E_k^{-1} \\left( y_n \\oplus E_k^{-1} \\left(z\\right)\\right)\\dots\\right) \\oplus E_k \\left( y_{s-1} \\oplus \\dots E_k \\left( y_1 \\oplus v \\right)\\dots\\right)$\n root_link,limit = 1234567,56 # find a more secure root and an optimal limit!\n link = itob64(pow(root_link,self.kl[s].p + self.kl[s].q,self.kl[s].n)%(1<<limit))\n #link = itob64(1000000L) easy to tally !\n self.permut(msg + link)\n x,y = [],[]\n for i in range(len(self.kl)):\n if i != s:\n xi = random.randint(0,2**self.l-1)\n x.append(xi)\n y.append(self.trap(xi,self.kl[i].e,self.kl[i].n))\n vi = reduce (lambda vi,i:self.E(i^vi),y[s:],0)\n yc = reduce (lambda yc,i:self.E(i^yc),y[:s],vi)\n x.insert(s,self.trap(yc,self.kl[s].d,self.kl[s].n))\n return link + ' ' + itob64(vi) + ' ' + ' '.join('%s'%itob64(xi) for xi in x)", "def _encode_pos(x, y):\n return struct.pack(_ENCODE_POS, x, y)", "def kolmomap(xx,yy,amp,wavelength,angle,phase):\n sinemap=sine2d(xx,yy,amp[0],wavelength[0],angle[0]/180.*pi,phase[0])*0.\n for counter in range(len(amp)):\n sinemap=sinemap+sine2d(xx,yy,amp[counter],wavelength[counter],angle[counter]/180.*pi,phase[counter])\n return sinemap", "def testStereo(self):\r\n smi_and_cansmi = [\r\n ('OC(=O)[C@@H](CCC(N)=O)N', 'NC(=O)CC[C@@H](N)C(=O)O'),\r\n ('OC(=O)[C@H](CCC(N)=O)N', 'NC(=O)CC[C@H](N)C(=O)O'),\r\n ('N[C@@H](C(O)=O)CCC(N)=O', 'NC(=O)CC[C@@H](N)C(=O)O'),\r\n ('N[C@H](C(O)=O)CCC(N)=O', 'NC(=O)CC[C@H](N)C(=O)O'),\r\n ('OC(=O)[C@H](N)CCC(N)=O', 'NC(=O)CC[C@@H](N)C(=O)O'),\r\n ('OC(=O)[C@@H](N)CCC(N)=O', 'NC(=O)CC[C@H](N)C(=O)O'),\r\n ('N[C@H](CCC(N)=O)C(O)=O', 'NC(=O)CC[C@@H](N)C(=O)O'),\r\n ('N[C@@H](CCC(N)=O)C(O)=O', 'NC(=O)CC[C@H](N)C(=O)O'),\r\n ('NC(=O)CC[C@@H](N)C(O)=O', 'NC(=O)CC[C@@H](N)C(=O)O'),\r\n ('NC(=O)CC[C@H](N)C(O)=O', 'NC(=O)CC[C@H](N)C(=O)O'),\r\n ('NC(=O)CC[C@H](C(O)=O)N', 'NC(=O)CC[C@@H](N)C(=O)O'),\r\n ('NC(=O)CC[C@@H](C(O)=O)N', 'NC(=O)CC[C@H](N)C(=O)O')]\r\n for smi, cansmi in smi_and_cansmi:\r\n mol = pybel.readstring(\"smi\", smi)\r\n self.assertEqual(mol.write(\"can\").split()[0],\r\n cansmi)", "def _encode_pos(self, x, y):\n return struct.pack(_ENCODE_POS, x, y)", "def get_movements_8n():\n s2 = math.sqrt(2)\n return [(1, 0, 1.0),\n (0, 1, 1.0),\n (-1, 0, 1.0),\n (0, -1, 1.0),\n (1, 1, s2),\n (-1, 1, s2),\n (-1, -1, s2),\n (1, -1, s2)]", "def invgeochart(w):\n # u = torch.asin(w[...,2])\n u = torch.acos(w[...,2])\n # v = torch.acos(w[...,0]/torch.cos(u))\n v = torch.atan(w[...,1]/w[...,0])\n return torch.stack((u,v+np.pi))", "def _position_spherical2cartesian(pos):\n \n r=pos[:,0]\n theta=pos[:,1]\n phi=pos[:,2]\n\n if any(theta>np.pi) or any(theta<0): #sanity check. not necessary for phi.\n raise ValueError, \"Theta beyond [0,pi]. Exiting.\"\n\n\n x=r*np.sin(theta)*np.cos(phi)\n y=r*np.sin(theta)*np.sin(phi)\n z=r*np.cos(theta)\n\n return np.dstack((x,y,z))[0]", "def sin_inplace(a):", "def __init__(s,i,j):\n # Posição do centro\n s.cx, s.cy = convert(i,j)\n # Cor (pode ser passada para o construtor no futuro)\n s.cor = (200,200,200)\n\n # Vértices do hexágono\n s.pontos = (\n (s.cx, s.cy-L),\n (s.cx+l, s.cy-L/2),\n (s.cx+l, s.cy+L/2),\n (s.cx, s.cy+L),\n (s.cx-l, s.cy+L/2),\n (s.cx-l, s.cy-L/2),\n )", "def stempot(self,xmax,ymax,nx,ny,atms,pixelshift,scalefactor):\n #zed=2 for rutherford scattering of the nucleus, less for screening\n zed = 1.7\n\n ix = numpy.arange(1.0,nx)\n iy = numpy.arange(1.0,ny)\n dx = xmax/nx\n dy = ymax/ny\n rx = numpy.arange(0,xmax-dx,dx)\n ry = numpy.arange(0,ymax-dy,dy)\n\n Zatom = atms.get_atomic_numbers()\n #translate atoms such that the center of mass is in the center of the computational cell\n com = atms.get_center_of_mass()\n #com = [ 44.40963074 , 44.65497562 , 44.90406073] #for AuNP\n #com = numpy.array(com)\n #print 'com',com -0.149836425, 0.29967285, 0\n #com += [0.41205016875, 0.6742639125, 0] #for rotated line profile \n #com += [-0.149836425, 0.29967285, 0] #for AuNP\n #com += pixelshift\n #print 'com+pixelshift',com\n cop = xmax/2.0\n trans = [cop-i for i in com]\n atms.translate(trans)\n positions=atms.get_positions()\n ax=[]\n ay=[]\n az=[]\n for o,t,h in positions:\n ax.append(o)\n ay.append(t)\n az.append(h)\n ax = numpy.array(ax)\n ay = numpy.array(ay)\n az = numpy.array(az)\n amax = len(Zatom)\n\n #find boundaries of slice\n axmin = min(ax)\n axmax = max(ax)\n aymin = min(ay)\n aymax = max(ay)\n\n V= numpy.zeros((nx,ny))\n\n #map x and y coords of the atoms to the nearest grid points\n #A fraction of the atom must be assigned to the closest gridpoints\n #to avoid sum and difference frequencies appearing in the image\n #grid point to the left of the atom\n ix = numpy.array([math.floor(axi/dx) for axi in ax])\n #apply periodic boundary conditions\n iax = numpy.array([math.fmod(iaxi,nx) for iaxi in ix])\n ibx = numpy.array([math.fmod(iaxi+1,nx) for iaxi in ix])\n #fraction of atom at iax\n fax = numpy.array([1-math.fmod((axi/dx),1 ) for axi in ax])\n #grid point above the atom\n iy = numpy.array([math.floor(ayi/dy) for ayi in ay])\n #apply periodic boundary conditions\n iay = numpy.array([math.fmod(iayi,ny) for iayi in iy])\n iby = numpy.array([math.fmod(iayi+1,ny) for iayi in iy])\n #fraction of atom at iay \n fay = numpy.array([1-math.fmod((ayi/dy),1 ) for ayi in ay])\n #Add each atom to the potential grid\n V1 = numpy.array([fax[i] * fay[i] * (Zatom[i]**zed) for i in range(len(fax))])\n V2 = numpy.array([(1-fax[i]) * fay[i] * (Zatom[i]**zed) for i in range(len(fax))])\n V3 = numpy.array([fax[i] * (1-fay[i]) * (Zatom[i]**zed) for i in range(len(fax))])\n V4 = numpy.array([(1-fax[i]) * (1-fay[i]) * (Zatom[i]**zed) for i in range(len(fax))])\n #V1 = numpy.array([fax[i] * fay[i] * scalefactor for i in range(len(fax))])\n #V2 = numpy.array([(1-fax[i]) * fay[i] * scalefactor for i in range(len(fax))])\n #V3 = numpy.array([fax[i] * (1-fay[i]) * scalefactor for i in range(len(fax))])\n #V4 = numpy.array([(1-fax[i]) * (1-fay[i]) * scalefactor for i in range(len(fax))])\n\n for j in range(amax):\n V[iax[j],iay[j]] += V1[j]\n V[ibx[j],iay[j]] += V2[j]\n V[iax[j],iby[j]] += V3[j]\n V[ibx[j],iby[j]] += V4[j]\n rev_trans = [-1.0*i for i in trans]\n atms.translate(rev_trans)\n return V", "def footprint_corner_indices():", "def m1_make_upper_shape_points_list(tx, ty, m1_info, SEN_info):\n \"\"\"\n 1 Get information from m1_info & SEN_info\n \"\"\"\n x_m1 = m1_info[0]\n y_m1 = m1_info[1]\n z_m = m1_info[2]\n\n m1_points = m1_info[3]\n\n m1_p0 = m1_points[0]\n m1_p1 = m1_points[1]\n m1_p2 = m1_points[2]\n m1_p3 = m1_points[3]\n\n w_sen = SEN_info[0]\n n_w_sen = SEN_info[1]\n h_sen = SEN_info[2]\n t_sen = SEN_info[3]\n u_n = SEN_info[4]\n l_n = SEN_info[5]\n set = SEN_info[6]\n u_offset = SEN_info[7]\n l_offset = SEN_info[8]\n\n \"\"\"\n 2 Make lists.\n upper_shape_upper_left_row list\n upper_shape_upper_right_row list\n\n upper_shape_lower_left_row list\n upper_shape_lower_right_row list\n \"\"\"\n # upper side\n upper_shape_upper_left_row = []\n upper_shape_upper_right_row = []\n\n for i in range(u_n):\n # left row\n ix = tx + t_sen\n iy = ty + (i * u_offset + set) + 10 # have to \"+\" something now its magic number\n\n p0, p1, p2, p3, p4, p5, p6, p7, p8 = Y_upper_shape_points(ix, iy, w_sen, t_sen, n_w_sen)\n left_points = [p1, p2, p3, p4, p5, p6, p7, p8]\n upper_shape_upper_left_row.extend((left_points))\n\n for i in range(u_n - 1, -1, -1):\n # right row\n ix = tx + (x_m1 - t_sen)\n iy = ty + (i * u_offset + set) + 10\n\n p0, p1, p2, p3, p4, p5, p6, p7, p8 = Y_upper_shape_points(ix, iy, w_sen, t_sen, n_w_sen)\n right_points = [p5, p6, p7, p8, p1, p2, p3, p4]\n upper_shape_upper_right_row.extend(right_points)\n\n # lower side\n upper_shape_lower_left_row = []\n upper_shape_lower_right_row = []\n\n for i in range(l_n -1, -1, -1):\n # left row\n ix = tx + t_sen\n iy = ty - (i * l_offset + set) - 10 # have to \"-\" something now its magic number\n\n p0, p1, p2, p3, p4, p5, p6, p7, p8 = Y_upper_shape_points(ix, iy, w_sen, t_sen, n_w_sen)\n left_points = [p1, p2, p3, p4, p5, p6, p7, p8]\n upper_shape_lower_left_row.extend((left_points))\n\n for i in range(l_n):\n # right row\n ix = tx + (x_m1 - t_sen)\n iy = ty - (i * l_offset + set) - 10\n\n p0, p1, p2, p3, p4, p5, p6, p7, p8 = Y_upper_shape_points(ix, iy, w_sen, t_sen, n_w_sen)\n right_points = [p5, p6, p7, p8, p1, p2, p3, p4]\n upper_shape_lower_right_row.extend(right_points)\n\n upper_shape_upper = [upper_shape_upper_left_row, upper_shape_upper_right_row]\n upper_shape_lower = [upper_shape_lower_left_row, upper_shape_lower_right_row]\n\n return upper_shape_upper, upper_shape_lower", "def anglor_phi(infile, sequence):\n return anglor(infile, sequence)", "def __init__(self, length):\n self.x = length\n self.y = length\n self._table = self.x * self.y", "def translateToFirstCutoffCell(pos):\r\n x = pos[0]\r\n y = pos[1]\r\n z = pos[2]\r\n \r\n while x >= Na:\r\n x = x - Na\r\n \r\n while y >= Nb:\r\n y = y - Nb\r\n \r\n while z >= Nc:\r\n z = z - Nc\r\n \r\n return (x,y,z)", "def __init__(self, height, width, mines):\n self.x = int(width)\n self.y = int(height)\n self.table_state = [\n ['-' for i in xrange(0, self.x)] for j in xrange(0, self.y)]\n self.mine_locations = self.generate_mines(int(mines))\n self.final_table = self.generate_answer()", "def _position_spherical2cylindrical(pos):\n \n\n r=pos[:,0]\n theta_spherical=pos[:,1]\n phi_spherical=pos[:,2]\n\n if any(theta_spherical>np.pi) or any(theta_spherical<0): #sanity check. not necessary for phi.\n raise ValueError, \"Theta beyond [0,pi]. Exiting.\"\n\n rho=r*np.sin(theta_spherical)\n theta_cylindrical=phi_spherical\n z=r*np.cos(theta_spherical)\n\n return np.dstack((rho,theta_cylindrical,z))[0]", "def sinh_inplace(a):", "def get_positional_encoding(seq_len, hidden_size):\n positions = tf.cast(tf.range(seq_len), 'float32')\n hidden_size //= 2\n log_increment = tf.math.log(10000.) / (tf.cast(hidden_size, 'float32') - 1)\n depths = tf.exp(tf.cast(tf.range(hidden_size), 'float32') * -log_increment)\n \n positional_encoding = tf.expand_dims(positions, 1) * tf.expand_dims(depths, 0)\n positional_encoding = tf.concat([tf.sin(positional_encoding), \n tf.cos(positional_encoding)], axis=1)\n return positional_encoding", "def position_from_seed(seed):\n random.seed(seed)\n ascii_character_sum = sum(bytearray(seed, \"utf8\")) # Sums the ASCII values of every character\n offset = random.randint(1, 100)\n start_position = (math.log(ascii_character_sum / 100) + offset, math.log(ascii_character_sum / 100) + offset)\n end_positon = (start_position[0] + 100, start_position[1] + 100)\n square_position = (start_position, end_positon)\n print(square_position)\n \n return square_position", "def find_start_pose(self):\n\n # Find start position\n y,x = [k for k,v in self.mp.items() if v == 94 or v == 60 \\\n or v == 62 or v == 118][0]\n\n\n # Assign orientation\n dy,dx, theta = 0,0, 0\n if self.mp[y,x] == ord('^'): theta = np.pi/2\n elif mp[y,x] == ord('<'): theta = -np.pi\n elif mp[y,x] == ord('>'): theta = 0\n else: theta = -np.pi/2\n\n return y, x, theta", "def position_encoding(sentence_size, embedding_size):\n encoding = np.ones((embedding_size, sentence_size), dtype=np.float32)\n ls = sentence_size+1\n le = embedding_size+1\n for i in range(1, le):\n for j in range(1, ls):\n encoding[i-1, j-1] = (i - (le-1)/2) * (j - (ls-1)/2)\n encoding = 1 + 4 * encoding / embedding_size / sentence_size\n return np.transpose(encoding)", "def transform(o):\n\t\t\tp = o.pos() - self.pos()\n\t\t\tx = cosa * p.x + sina * p.y\n\t\t\ty = -sina * p.x + cosa * p.y\n\t\t\treturn tuple(px_to_mm(x,y))", "def look_at(from_pos, to_pos):\r\n up = np.array([0, 0, 1]) \r\n n = normalize(from_pos - to_pos)\r\n u = normalize(np.cross(up, n))\r\n v = np.cross(n, u)\r\n mat = np.stack([u, v, n], axis=1).flatten()\r\n quat = np.zeros(4)\r\n functions.mju_mat2Quat(quat, mat)\r\n return quat", "def square(a, N):\n\tprint('For a square with side length of 2a = %.2f' % (2*a))\n\t\n\tm11_ = []\n\tm22_ = []\n\tm66_ = []\n\n\tfor N in N_list:\n\t\tprint 'With %d segments we have:' % N\n\t\t# Matrix for storing values achieved from the lhs of the equation\n\t\tA = zeros((N,N)) \n\n\t\t# Array(vector) on the rhs os the equation \n\t\tB11 = zeros(N) \n\t\tB22 = zeros(N)\t\t\t\t\t\t\t\t\t\t\t\n\t\tB66 = zeros(N)\n\n\t\tx = zeros(N+1)\n\t\ty = zeros(N+1)\n\n\t\tS1 = -a # Negative half of a side\n\t\tS2 = a # Positive half of the side\n\n\t\tN = N/4 * 4 \t# Total number of segments for all the four sides\t\t\t\t\t\t\t\t\t\t \n\t\tN1 = N/4 \n\t\t\t\t# Number of segments for one side\t\t\t\t\t\t\t\t\t\t \n\t\tx = zeros(N+1)\n\t\ty = zeros(N+1)\n\n\t\t#making points in all four sides\n\t\tfor i in range(N1+1):\n\t\t\tinc = a*(1-(cos(pi/N1*i))) # from a to 2a\n\n\t\t\t# Bottom side (Constant y=-a while x goes from -a to a)\n\t\t\tx[i] = S1 + inc\n\t\t\ty[i] = -a\n\n\t\t\t# Right side (Constant x=a while y goes from -a to a)\n\t\t\tx[i+N1] = a\n\t\t\ty[i+N1] = S1 + inc\n\n\t\t\t# Top side (Constant y=a while x goes from a to -a)\n\t\t\tx[i+2*N1] = -(S1 + inc)\n\t\t\ty[i+2*N1] = a\n\n\t\t\t# Left side (Constant x=-a while y goes from a to -a)\n\t\t\tx[i+3*N1] = -a\n\t\t\ty[i+3*N1] = -(S1 + inc)\n\n\t\txbar = (x[1:] + x[:-1])/2.0\n\t\tybar = (y[1:] + y[:-1])/2.0\n\n\t\t# Length of each segment dS = sqrt(d(x0,y0)^2 - d(x1,y1)^2)\n\t\t# (x,y) position minus the next (x,y) position\n\t\tdS = linalg.norm(array([x[1:],y[1:]])-array([x[:-1],y[:-1]]), axis=0) \n\n\t\t# Normal vector components of the segments\n\t\tn1 = -(y[1:] - y[:-1])/dS \t#-dy/dS\t\t\t\t\t \t \t\t \n\t\tn2 = (x[1:] - x[:-1])/dS \t#dx/dS\t\t\t\t\t\t\t \n\t\tn6 = (xbar*n2 - ybar*n1) \n\n\t\tfor i in range(N):\n\t\t\t# array transpose to get [x,y] position of a point on the circumference\n\t\t\t# Distance from midpoint in segment i to the starting/ending point of the\n\t\t\t# next/current segment\n\t\t\tr1 = linalg.norm(array([x[:-1],y[:-1]]).T - array([xbar[i], ybar[i]]), axis=1)\n\n\t\t\tr2 = linalg.norm(array([x[1:],y[1:]]).T - array([xbar[i], ybar[i]]), axis=1)\n\n\t\t\ttheta = -arccos((dS**2 - r2**2 - r1**2)/(-2*r2*r1)) \n\t\t\ttheta[isnan(theta)] = 0\t\n\n\t\t\t#Calculates the right-hand side of the integral (24)\n\t\t\th11 = (log(r1)+log(r2))*0.5*dS \n\t\t\th22 = (log(r1)+log(r2))*0.5*dS\t\t \n\t\t\th66 = (log(r1)+log(r2))*0.5*dS\n\n\t\t\t#Adds the angles to the matrix A\t \n\t\t\tA[i] = theta # N matrices that are NxN\n\t\t\tfill_diagonal(A,-pi) #replace diagonal entries with -pi\n\n\t\t\t#Adds rhs to the B-arrays\t\t\t\t\t \n\t\t\tB11[i] = sum(n1*h11) \t\t\t\t\t\t\t\t\t\t \n\t\t\tB22[i] = sum(n2*h22) \t\t\t\t\t\t\t\t \n\t\t\tB66[i] = sum(n6*h66) \n\n\t\t# Calculates phi for the three directions\n\t\t# Solve the linear matrix equation A*phi=B\n\t\tphi11 = linalg.solve(A,B11)\t\t\t\t\t\t\t \n\t\tphi22 = linalg.solve(A,B22)\t\t\t\t\t\t\t\t \n\t\tphi66 = linalg.solve(A,B66)\n\n\t\t# Calculates the added mass coefficients\n\t\tm11 = sum(phi11*n1*dS)\n\t\tm11_.append(m11)\n\n\t\tm22 = sum(phi22*n2*dS)\n\t\tm22_.append(m22)\n\n\t\tm66 = sum(phi66*n6*dS)\n\t\tm66_.append(m66)\t\n\n\t\tExact_m11 = 4.754*a**2\n\t\tError_m11 = Exact_m11-m11\n\t\tprint('The error in the added mass coefficient m11 is %.5f' % Error_m11)\n\n\t\tExact_m22 = 4.754*a**2\n\t\tError_m22 = Exact_m22-m22\n\t\tprint('The error in the added mass coefficient m22 is %.5f' % Error_m22)\n\n\t\tExact_m66 = 0.725*a**4\n\t\tError_m66 = Exact_m66-m66\n\t\tprint('The error in the added mass coefficient m66 is %.5f' % Error_m66)\n\t\tprint\n\n\treturn m11_, Exact_m11, m22_, Exact_m22, m66_, Exact_m66", "def map_position(pos):\n\n posiction_dict = dict(zip(range(1, 17), [i for i in range(30, 62) if i % 2]))\n return posiction_dict[pos]", "def Cardioid(self, a):\n t = range(-180,180)\n a = float(a)\n x = []\n y = []\n for i in t:\n i = self.deg2rad(i)\n x.append(a*(2*math.cos(i) - math.cos(2*i)))\n y.append(a*(2*math.sin(i) - math.sin(2*i)))\n return x, y", "def big_psi(sun_pos, sat_3d_pos):\n return np.arccos(np.dot(sun_pos.T, sat_3d_pos) / (vector_magnitude(sun_pos[0], sun_pos[1], sun_pos[2]) * vector_magnitude(sat_3d_pos[0], sat_3d_pos[1], sat_3d_pos[2])))", "def order_spat_pos(self):\n # ToDo: We somehow need to automate this.\n ## For OLD data, i.e. before 2017\n #ord_spat_pos = np.array([0.06054688, 0.14160156, 0.17089844, 0.22753906, 0.27539062,\n # 0.32128906, 0.36474609, 0.40673828, 0.45019531, 0.48974609,\n # 0.52978516, 0.56054688, 0.59814453, 0.63378906, 0.66503906,\n # 0.70019531, 0.7421875 , 0.77978516, 0.82763672, 0.87109375,\n # 0.9296875])\n ## For NEW data\n ord_spat_pos = np.array([0.078125, 0.13769531, 0.19189453, 0.24414062, 0.29296875,\n 0.34179688, 0.38330078, 0.42724609, 0.46582031, 0.50439453,\n 0.54199219, 0.57763672, 0.61279297, 0.6484375 , 0.68457031,\n 0.71875 , 0.75439453, 0.79443359, 0.83789062, 0.88671875,\n 0.94091797])\n return ord_spat_pos", "def eight(radius = 15, resolution = 20, robotHeight = -90, n = 1):\n \n t = np.linspace(0, n*2*m.pi, resolution*n)\n eightPos = []\n for num in t:\n x = -m.sin(num)*radius\n y = m.cos(num)*radius - radius\n eightPos.append([x, y, robotHeight, 0, 0, 0, 'mov'])\n\n eightPos.append([x, y, robotHeight, 0, 0, 0, 'mov'])\n\n for num in t:\n x = -m.sin(num)*radius\n y = -m.cos(num)*radius + radius\n eightPos.append([x, y, robotHeight, 0, 0, 0, 'mov'])\n\n eightPos.append([0,0,-127,0,0,0,'mov'])\n return eightPos", "def create_synt_desmat(self,gridpoints):\n n_gp = len(gridpoints)\n self.synt_des_mat = np.zeros([n_gp, self.p])\n self.synt_des_mat[:,0] = 1.0\n for i in range(n_gp):\n for j in range(1,self.p):\n self.synt_des_mat[i,j] = self.phi(gridpoints[i],j)", "def omega(self):\n self.cosineSequences()", "def state_to_position(self, state):\r\n dir = state % 4\r\n col = ((state - dir) / 4) % self.width\r\n row = (state - dir - col * 4) / (4 * self.width)\r\n return row, col, dir" ]
[ "0.7326898", "0.72803426", "0.66477567", "0.6008105", "0.6005823", "0.59894025", "0.56817", "0.5652626", "0.56051815", "0.5601691", "0.5469565", "0.5458949", "0.5339877", "0.53386784", "0.53131545", "0.5309653", "0.52948576", "0.52931553", "0.52630687", "0.5255187", "0.52443165", "0.5242418", "0.5234618", "0.5234609", "0.5225042", "0.52214974", "0.5208143", "0.5200203", "0.5191224", "0.51855063", "0.5172408", "0.5170684", "0.5154621", "0.5142869", "0.5141301", "0.5141301", "0.513588", "0.51257783", "0.5125045", "0.51119226", "0.50963306", "0.5093256", "0.50768745", "0.5075122", "0.5058682", "0.50585794", "0.50525314", "0.5039881", "0.5034599", "0.5030885", "0.5030102", "0.5020653", "0.50118697", "0.5000606", "0.4993538", "0.49798563", "0.49789175", "0.49761856", "0.49723628", "0.49694216", "0.49670824", "0.4965759", "0.4960881", "0.49597418", "0.49587235", "0.4932124", "0.49294057", "0.49234408", "0.49207145", "0.4918956", "0.49119267", "0.49116766", "0.49105763", "0.49041378", "0.49004292", "0.4900298", "0.48956165", "0.4894641", "0.48898613", "0.48885387", "0.48876864", "0.4885733", "0.48844674", "0.48842117", "0.48809382", "0.4880043", "0.48787174", "0.4878124", "0.4877517", "0.48773983", "0.48749408", "0.4861675", "0.4857681", "0.4850464", "0.4850174", "0.484769", "0.4841208", "0.48410758", "0.48367503", "0.48331523" ]
0.6543115
3
Process data and return preprocessor instance
def process_data(self, clip_name) -> Preprocessor: config: Config = Config.get_config() folder_name = config.video_data video_data_file = ''.join(clip_name.split('.')[:-1]) + '.json' video = Video.from_json(os.path.join(folder_name, video_data_file)) # Convert to usable data type period_running_person division, alle fragment soorten preprocessor = Preprocessor(video) return preprocessor
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def preprocess(self,data):\n preprocessObj = PreprocessData()\n preprocess_data = preprocessObj.preprocess(data)\n return preprocess_data", "def preprocess(data):\n raise NotImplementedError", "def main():\n p = DataPreprocessor()\n p.preprocess_and_save_data(p.path_to_file)", "def buildProcessor( self ):\n from vrml.vrml97 import parseprocessor\n return parseprocessor.ParseProcessor()", "def\t_preprocessor(self) :\n\t\tlogging.debug('Beginning preprocessor')\n\t\t\n\t\t# Parse entries from ss class\n\t\tself._parse_initsol()\n\t\tself._parse_modelspace()\n\t\tself._parse_initbound()\n\t\t\n\t\t# Set regressors according to exptype\n\t\tself._set_regressors()\n\n\t\t# Deal with equations\n\t\tself.equations = self.ss.equations\n\n\t\t# Deal with noisy data ??", "def preprocess_dataset(data_parameters):\n\n assert data_parameters[\"input_file\"] is not None\n assert data_parameters[\"preprocessed_file\"] is not None\n\n # Checks if the dataset we are trying to create already exists\n data_preprocessor = Preprocessor(data_parameters)\n dataset_exists = data_preprocessor.check_existing_dataset()\n\n # If it doesn't, creates it\n if not dataset_exists:\n # Run the data_preprocessor\n data_preprocessor.create_bff_dataset()\n\n # Stores the processed data and a id. That id is based on the simulation\n # settings for the preprocessing part, and it's used to make sure future uses\n # of this preprocessed data match the desired simulation settings\n data_preprocessor.store_dataset()\n else:\n logging.info(\"The dataset already exists in %s, skipping the dataset creation \"\n \"steps!\", data_parameters['preprocessed_file'])\n return data_preprocessor", "def process(self, preprocess_args: Dict) -> None:\n dataset2preprocessor = {\n 'vhi': VHIPreprocessor,\n }\n\n for dataset, variables in preprocess_args.items():\n\n # check the format is as we expected\n assert dataset in dataset2preprocessor, \\\n f'{dataset} is not supported! Supported datasets are {dataset2preprocessor.keys()}'\n\n assert type(variables) is list, \\\n f'Expected {dataset} values to be a list. Got {type(variables)} instead'\n\n preprocessor = dataset2preprocessor[dataset](self.data)\n\n for variable in variables:\n preprocessor.preprocess(**variable)", "def pre_processor(self):", "def preprocess(self, data, label):\n\t\traise NotImplementedError", "def _preprocess_data_point(self, x: DataPoint) -> DataPoint:\n for preprocessor in self._pre:\n x = preprocessor(x)\n if x is None:\n raise ValueError(\"Preprocessor should not return None\")\n return x", "def run_preprocessor():\n\n prepro = PP()\n df = prepro.load_data()\n\n df.subject_ch = df.subject_ch.map(lambda x: convert_emoticons(str(x)))\n\n df = prepro.remove_html_tags()\n\n\n df = remove_unwanted_columns(df)\n df[\"weeks\"] = df['created_ch'].dt.week\n\n # remove characteres\n df = prepro.normalization()\n\n # run tokenizer\n df = prepro.tokenizations()\n\n # remove characteres\n df = prepro.stop_word_remover()\n\n # remove characteres\n df = prepro.stemming_lemmatization()\n\n return df\n\n\n\n\n # def replace_week_numbers(df):\n # \"\"\"\n # functiion that change week number from 1 to 19\n # \"\"\"\n \n # return sorted(df.weeks.unique())\n\n # df[\"weeks_num\"] = df[\"weeks\"].map(lambda x: replace_week_numbers(df).index(x)+1 if(x in replace_week_numbers(df)) else np.nan)", "def process_data(self, data):\n return data", "def preprocess(self):", "def _build_preprocessing(self):\n\n # For now, do nothing\n pass", "def process(proc_data):\n\n # No further processing\n return proc_data", "def preprocess(\n self, data: List[Dict[str, Any]]\n ) -> Generator[Dict[str, Any], None, None]:\n raise NotImplementedError", "def _preprocess_training_model(self, data):\n def _pre_process(raw_data):\n \"\"\" Pre-process raw data. \"\"\"\n pattern = re.compile(\n r\"((?<=')\\w\\d.*?(?=')|(?<=\\\")\\w\\d.*?(?=\\\")|[\\w\\d]+)\")\n words = re.findall(pattern, raw_data)\n return ' '.join(list(map(string_utils.snake_case_to_camel, words)))\n\n data_list = []\n # Preprocess the dataset with naming convention, etc.\n with Progress() as progress:\n preprocess_task = progress.add_task('Pre-processing dataset...',\n total=data.shape[0])\n for idx, row in data.iterrows():\n row_data = {}\n for column in ['text', 'key', 'value']:\n row_data[column] = _pre_process(row[column])\n data_list.append(row_data)\n progress.update(preprocess_task, advance=1)\n return pd.DataFrame(data=data_list)", "def post_preprocessor(data=None, **kw):\n logger.info(data)\n logger.info(\"start post_preprocessor\")\n model = data[model_name]\n data.clear()\n data.update(model)\n logger.info(data)\n logger.info(\"end post_preprocessor\")", "def _process(proc_data):\n int_list = {\n 'sent_bytes', 'packets_transmitted', 'packets_received', 'response_bytes', 'icmp_seq',\n 'ttl', 'duplicates', 'vr', 'hl', 'tos', 'len', 'id', 'flg', 'off', 'pro', 'cks'\n }\n\n float_list = {\n 'packet_loss_percent', 'round_trip_ms_min', 'round_trip_ms_avg', 'round_trip_ms_max',\n 'round_trip_ms_stddev', 'timestamp', 'time_ms'\n }\n\n for key in proc_data:\n if key in int_list:\n proc_data[key] = jc.utils.convert_to_int(proc_data[key])\n\n if key in float_list:\n proc_data[key] = jc.utils.convert_to_float(proc_data[key])\n\n return proc_data", "def handle(self, data, context):\n \n model_input = self.preprocess(data)\n model_out = self.inference(model_input)\n return self.postprocess(model_out)", "def preprocess(self):\n pass", "def preprocess(self):\n pass", "def preprocess(self):\n pass", "def preprocessor(self):\n return self._preprocessor", "def attach(self,\n preprocessor_type: PreprocessorsTypes,\n parent_extractor_type: ExtractorsTypes = None) -> None:\n # Check what arguments are needed for the current preprocessor\n arguments = {}\n if preprocessor_type == PreprocessorsTypes.N_GRAMS:\n charset = Charset[self._preprocessors_config[\"ngrams\"]\n [\"valid_charset\"]]\n arguments = {\n \"n\":\n self._preprocessors_config[\"ngrams\"][\"n\"],\n \"to_lowercase\":\n self._preprocessors_config[\"ngrams\"][\"to_lowercase\"],\n \"valid_charset\":\n charset\n }\n elif preprocessor_type == PreprocessorsTypes.GROUP_COUNTER:\n if (parent_extractor_type in [\n ExtractorsTypes.STATIC_OPCODES,\n ExtractorsTypes.DYNAMIC_OPCODES\n ]):\n arguments = {\n \"categories\":\n self._extractors_config[\"opcodes\"][\"categories\"],\n \"allow_multiple_categories\":\n self._extractors_config[\"opcodes\"]\n [\"allow_multiple_categories\"],\n \"verbose\":\n self._extractors_config[\"opcodes\"][\"verbose\"],\n \"min_ignored_percent\":\n self._extractors_config[\"opcodes\"][\"min_ignored_percent\"]\n }\n elif (parent_extractor_type in [\n ExtractorsTypes.STATIC_APIS, ExtractorsTypes.DYNAMIC_APIS\n ]):\n arguments = {\n \"categories\":\n self._extractors_config[\"apis\"][\"categories\"],\n \"allow_multiple_categories\":\n self._extractors_config[\"apis\"]\n [\"allow_multiple_categories\"],\n \"verbose\":\n self._extractors_config[\"apis\"][\"verbose\"],\n \"min_ignored_percent\":\n self._extractors_config[\"apis\"][\"min_ignored_percent\"]\n }\n\n # Create the preprocessor\n preprocessor = None\n if preprocessor_type == PreprocessorsTypes.IDENTITY:\n preprocessor = Identity()\n elif preprocessor_type == PreprocessorsTypes.BINARIZER:\n preprocessor = Binarizer()\n elif preprocessor_type == PreprocessorsTypes.K_BINS_DISCRETIZER:\n preprocessor = KBinsDiscretizer()\n\n # Save this column in case of imputation needs\n self._columns_to_be_filled.append(len(self._preprocessors))\n\n elif preprocessor_type == PreprocessorsTypes.COUNTER:\n preprocessor = Counter()\n elif preprocessor_type == PreprocessorsTypes.COUNT_VECTORIZER:\n preprocessor = CountVectorizer()\n elif preprocessor_type == PreprocessorsTypes.N_GRAMS:\n preprocessor = NGrams(**arguments)\n elif preprocessor_type == PreprocessorsTypes.GROUP_COUNTER:\n preprocessor = GroupCounter(**arguments)\n elif preprocessor_type == PreprocessorsTypes.SAME_LENGTH_IMPUTER:\n preprocessor = SameLengthImputer()\n\n self._preprocessors.append(preprocessor)", "def create_preprocess_transition(tokenizer: Tokenizer,\n include_regex, comment_regex) -> PseudoJunction:\n preprocessor_factory = partial(_preprocessor_factory,\n tokenizer=tokenizer, include_regex=include_regex, comment_regex=comment_regex)\n thread_safe_factory = ThreadLocalSingletonFactory(preprocessor_factory)\n # preprocess = partial(_preprocess, factory=thread_safe_factory)\n return PseudoJunction(thread_safe_factory) # , preprocess)", "def preprocess_main():", "def get_preprocess(self) -> Dict:\n raise NotImplementedError", "def preprocess(self, data):\n if self.mode == 'image':\n data = self.transpose(data)\n data = self.dilate(data)\n data = self.mask(data)\n\n if self.mode == 'histogram':\n data = self.flatten(data)\n data = self.mask(data)\n\n if self.mode == 'curve':\n if isinstance(data, np.ndarray) or (isinstance(data, list) and contains_numbers(data)):\n if hasattr(self, 'objects'):\n xdata = self.main_object.get_xdata()\n else:\n xdata = range(len(data))\n\n data = [xdata, data]\n\n smoothed = self.smooth(data[1].squeeze() if data[1].ndim > 1 else data[1])\n data = [*data, smoothed]\n\n if self.mode == 'loss':\n if isinstance(data, tuple):\n loss, lr = data\n else:\n loss, lr = data, None\n\n if loss is None:\n smoothed = None\n else:\n smoothed = self.smooth(loss)\n\n data = [loss, smoothed, lr]\n\n return data", "def get(self, uuid: str) -> Preprocessor:\n return self.preprocessors[uuid]", "def get_single_preprocessor(instance_id=None, **kw):\n pass", "def post_process(cls, data):\n return data", "def run(self, data):\n\t\t# no processing here\n\t\treturn data", "def get_image_preprocessor(self):\n image_size = self.model.get_image_size()\n input_data_type = get_data_type(self.params)\n\n shift_ratio = 0\n\n processor_class = self.dataset.get_image_preprocessor()\n assert processor_class\n return processor_class(\n image_size,\n image_size,\n self.batch_size * self.batch_group_size,\n len(self.devices) * self.batch_group_size,\n dtype=input_data_type,\n train=(not self.params.eval),\n distortions=self.params.distortions,\n resize_method=self.resize_method,\n shift_ratio=shift_ratio,\n summary_verbosity=self.params.summary_verbosity,\n distort_color_in_yiq=self.params.distort_color_in_yiq,\n fuse_decode_and_crop=self.params.fuse_decode_and_crop)", "def preprocess(config: Config) -> None:\n print(colored(\"preprocessing:\", attrs=[\"bold\"]))\n factory = PreprocessingFactory()\n factory.process(config)", "def build_preprocessors(md_instance, **kwargs):\r\n preprocessors = odict.OrderedDict()\r\n if md_instance.safeMode != 'escape':\r\n preprocessors[\"html_block\"] = HtmlBlockPreprocessor(md_instance)\r\n preprocessors[\"reference\"] = ReferencePreprocessor(md_instance)\r\n return preprocessors", "def process(self, data) :\n rData = Core.Processlib.Data()\n rData.frameNumber = data.frameNumber\n rData.buffer = self._worker.process(data.buffer)\n if self._writer: #optional HDF5 writer\n self._writer.write(rData.buffer, rData.frameNumber)\n return rData", "def _preprocess_fn(data):\n\n # Validate input\n if not isinstance(data, dict) or 'image' not in data:\n raise ValueError('Argument `data` must be a dictionary, '\n 'not %s' % str(type(data)))\n\n # Apply all the individual steps in sequence.\n image = data['image']\n image = decode_image(image)\n image = normalize_value_range(image)\n image = get_multiscale_patches(image, **preprocessing_kwargs)\n\n data['image'] = image\n return data", "def preprocess(self, data_f):\n \n return self.vec.transform(data_f.review)", "def process(self, data):\n return self.transformer.transform(data)", "def _preprocess(self, data):\n\n # pipeline: first call the previous statistics:\n if self.previous_statistics is not None:\n data = self.previous_statistics.statistics(data)\n # the first of the statistics need to take list as input, in order to match the API. Then actually the\n # transformations work on np.arrays. In fact the first statistic transforms the list to array. Therefore, the\n # following code needs to be called only if the self statistic is the first, i.e. it does not have a\n # previous_statistic element.\n else:\n data = self._check_and_transform_input(data)\n\n return data", "def fit(self, inputs: list) -> 'BasePreprocessor':", "def _process(self, data: np.ndarray) -> np.ndarray:", "def _process(self, data: np.ndarray) -> np.ndarray:", "def preprocess(self, par_dict, op_item, addon):\n result = par_dict\n\n if \"preprocess\" in op_item:\n\n for pre in [\n sub(\"\\s+\", \"\", i) for i in op_item[\"preprocess\"].split(\" --> \")\n ]:\n func_name = sub(\"^([^\\(\\)]+)\\(.+$\", \"\\\\1\", pre).strip()\n params_name = sub(\"^.+\\(([^\\(\\)]+)\\).*\", \"\\\\1\", pre).split(\",\")\n\n param_list = ()\n for param_name in params_name:\n param_list += (result[param_name],)\n\n # run function\n func = getattr(addon, func_name)\n res = func(*param_list)\n\n # substitute res to the current parameter in result\n for idx in range(len(res)):\n result[params_name[idx]] = res[idx]\n\n return result", "def _process(proc_data: List[Dict]) -> List[Dict]:\n return proc_data", "def _preprocess(self):\n self.data['sentences'] = self.data['text'].apply(self._tokenize_sent)\n self.data['nouns'] = self.data['sentences'].apply(self._get_nouns)\n # self._get_frequent_features()\n # self._compactness_pruning()\n # self._redundancy_pruning()\n # self._get_features()\n self._extract_opinions()", "def get_preprocess_fn(**preprocessing_kwargs):\n\n def _preprocess_fn(data):\n \"\"\"The preprocessing function that is returned.\"\"\"\n\n # Validate input\n if not isinstance(data, dict) or 'image' not in data:\n raise ValueError('Argument `data` must be a dictionary, '\n 'not %s' % str(type(data)))\n\n # Apply all the individual steps in sequence.\n image = data['image']\n image = decode_image(image)\n image = normalize_value_range(image)\n image = get_multiscale_patches(image, **preprocessing_kwargs)\n\n data['image'] = image\n return data\n\n return _preprocess_fn", "def construct_parser_preprocessor(subparser) -> None:\n\n def within_percent_interval(interval_str: str) -> float:\n \"\"\"\n Checks whether or not the given string representation of a floating\n point number is in the interval [0, 1].\n\n Parameters\n ----------\n interval_str: str :\n The string that needs to be checked for a valid value\n\n Returns\n -------\n float\n If valid, the number representation of interval_str\n\n Raises\n ------\n ValueError\n If interval_str cannot be converted to a floating pointer number\n ArgumentTypeError\n If the number is not within the interval [0, 1]\n \"\"\"\n interval = float(interval_str)\n if interval < 0 or interval > 1:\n raise ArgumentTypeError(\"Input given is out of bounds!\")\n\n return interval\n \"\"\"\n if subparser:\n preprocess_parser = subparser.add_parser(\n \"preprocess\",\n formatter_class=argparse.ArgumentDefaultsHelpFormatter,\n help=\"Preprocess given dataset\",\n )\n else:\n preprocess_parser = argparse.ArgumentParser(\n description='Preprocess given dataset',\n formatter_class=argparse.ArgumentDefaultsHelpFormatter\n )\n \"\"\"\n\n subparser.add_argument(\n 'input_dir', type=str, default=\"data_clean\", metavar=\"input-dir\",\n help='Input directory to preprocess'\n )\n\n subparser.add_argument(\n '-o', '--output-dir', type=str, default=\"filtered_data_clean\",\n help='Output directory to hold preprocessed data_clean'\n )\n\n subparser.add_argument(\n '--stopwords', type=str, default=None,\n help='Path to the .csv stop words file'\n )\n\n # Make file generation options mutually exclusive\n # Note, all 3 of the flags appear. However, we only want 1 of them to\n # appear.\n subparser.add_argument(\n '--export', type=str, default=\"single\",\n choices=[\"single\", \"split\", \"fasttext\", \"all\"],\n help='Indicate whether you only want a single file holding all of the '\n 'preprocessed data_clean, or both. If \"split\\\" was chosen, it '\n 'utilizes the \"--train-split\" argument to know how big to make the '\n 'training and testing sets. If fasttext is given, it utilizes the '\n '\"--slice-length\" argument to know how big to make each slice'\n )\n\n subparser.add_argument(\n '--slice_length', type=int, default=25,\n help=\"Number of tokens to have per slice of a file.\"\n )\n\n subparser.add_argument(\n '--train-split', type=within_percent_interval, default=.8,\n metavar=\"[0-1]\",\n help=\"Percentage in interval [0,1] of total data_clean going to the \\\n training dataset.\"\n )\n\n subparser.set_defaults(run=run_preprocessor)", "def preprocess(self, data, scope):\n if scope != 'train':\n # reshape\n data = self._data_reshape(data)\n\n # normalize\n if data.dtype == np.int16:\n start_unit = -1000\n end_unit = 300\n data = 2 * (data.astype(np.float32) - start_unit) / (end_unit - start_unit) - 1\n\n # subtract train mean and divide by train std\n if scope == 'train':\n self.mean = np.mean(data)\n data -= self.mean\n self.std = np.std(data)\n data /= self.std\n else:\n data -= self.mean\n data /= self.std\n\n # reshape for channel\n s = data.shape\n if len(data.shape) == 4:\n data = data.reshape((s[0], s[1], s[2], s[3], 1))\n else:\n data = data.reshape((s[0], s[1], s[2], 1))\n return data", "def load_preprocessor(dirpath: typing.Union[str, Path]) -> 'BasePreprocessor':\n dirpath = Path(dirpath)\n data_file_path = dirpath.joinpath(BasePreprocessor.DATA_FILENAME)\n return dill.load(open(data_file_path, mode='rb'))", "def get_preprocess(self) -> Dict:\n input_shape = get_input_shape(self.deploy_cfg)\n cfg = process_model_config(self.model_cfg, [''], input_shape)\n preprocess = cfg.data.test.pipeline\n return preprocess", "def get_definition(self, data_name='data', processors_name='processors'):\n\n if not self.pipeline_definition:\n with open(self._pipeline_template, 'r') as file_obj:\n template = jinja2.Template(file_obj.read())\n self.pipeline_definition = json.loads(template.render(\n data=data_name,\n processors=processors_name,\n valid_images=['%s:%s' % pair for pair in ALLOWED_IMAGES]\n ))\n\n return self.pipeline_definition", "def _preprocessor(self, X_raw, train):\n # =============================================================\n # YOUR CODE HERE\n\n df = X_raw.copy()\n drop_index = []\n original_index = df.index.tolist()\n if len(df.shape) == 1:\n df = df.to_frame().transpose()\n\n categorical_data_onehot = [\"city_district_code\",\"pol_coverage\", \"pol_pay_freq\", \"pol_payd\", \"pol_usage\", \"drv_drv2\", \"drv_sex1\", \"drv_sex2\", \"vh_fuel\", \"vh_type\", \"vh_make\"]\n categorical_data_hash = [\"pol_insee_code\",\"regional_department_code\",\"canton_code\"]\n drop_data = [\"id_policy\",\"commune_code\"]\n null_data = df.columns[df.isnull().sum()>0].tolist()\n \n #remove anomalies\n if train:\n df.drop(df.loc[df[\"drv_age_lic2\"]>df[\"drv_age2\"]].index,inplace=True)\n \n df.loc[:,\"drv_sex2\"] = df.loc[:,\"drv_sex2\"].fillna(value=0)\n if train:\n df.dropna(inplace=True)\n else:\n df = df.fillna(value=0)\n \n #scale continous data\n for col in [\"population\", \"pol_bonus\", \"pol_sit_duration\", \"town_mean_altitude\", \"town_surface_area\", \"vh_age\", \"vh_sale_begin\", \"vh_sale_end\", \"vh_value\", \"vh_speed\"]:\n df[col] = np.log(df[col]+1e-10)\n \n df.loc[:,\"pol_insee_code\"] = df.loc[:,\"pol_insee_code\"].str[:2]\n df.loc[:,\"vh\"] = df[\"vh_make\"].str.strip() + \"_\" + df[\"vh_model\"].str.strip()\n bool_df1 = df[\"vh_make\"].value_counts()>3000 #3000\n df.loc[:,\"vh_make\"].loc[~df[\"vh_make\"].isin(bool_df1[bool_df1].index.tolist())] = \"na\"\n df.loc[:,categorical_data_onehot] = df[categorical_data_onehot].astype(str)\n df.loc[:,categorical_data_hash] = df[categorical_data_hash].astype(str)\n\n bool_df2 = df[\"vh\"].value_counts()>1000 #1000\n popular_vh = bool_df2[bool_df2].index.tolist()\n df[\"vh_onehot\"] = df[\"vh\"]\n df[\"vh_hash\"] = df[\"vh\"]\n df[\"vh_onehot\"].loc[~df[\"vh\"].isin(popular_vh)] = \"na\"\n df[\"vh_hash\"].loc[df[\"vh\"].isin(popular_vh)] = \"na\"\n categorical_data_onehot.append(\"vh_onehot\")\n categorical_data_hash.append(\"vh_hash\")\n\n if train:\n self.onehot_enc = OneHotEncoder(sparse=False,handle_unknown=\"ignore\")\n onehot_mat = self.onehot_enc.fit_transform(df[categorical_data_onehot])\n\n #self.hash_enc = FeatureHasher(n_features=len(categorical_data_hash)*20,input_type=\"string\")\n #hash_mat = self.hash_enc.fit_transform(df[categorical_data_hash].values).toarray()\n\n else:\n onehot_mat = self.onehot_enc.transform(df[categorical_data_onehot])\n #hash_mat = self.hash_enc.transform(df[categorical_data_hash].values).toarray()\n\n\n drop_data += categorical_data_onehot\n drop_data += categorical_data_hash\n drop_data += [\"vh\",\"vh_make\",\"vh_model\"]\n\n df.drop(columns=drop_data,inplace=True)\n \n final_index = df.index.tolist()\n drop_index = list(set(original_index) - set(final_index))\n \n #X = np.concatenate((df.values,onehot_mat,hash_mat),axis=1)\n X = np.concatenate((df.values,onehot_mat),axis=1)\n\n\n \n return X,drop_index", "def preprocess_data(self, lidar_data):\n raise NotImplementedError(\"Lidar preprocessing object must implement a way \"\n \"to preprocess data\")", "def preproc_pipeline(data):\n # Preprocess\n data = preprocess(data)\n\n # Optional --> run a technical analysis on it and add more features\n data = generate_ta(data)\n \n # Split\n train_set, validation_set, test_set = train_val_test_split(data)\n \n # Set up for Keras\n train_set = shape_for_keras(train_set)\n validation_set = shape_for_keras(validation_set)\n test_set = shape_for_keras(test_set)\n\n # We could save this to csv.\n return train_set, validation_set, test_set", "def preprocess(self, cfg_pipeline):\n return", "def _construct(self, data):\n logging.info(\"overall constructing (enter)\")\n\n pre_construct_data = self._pre_construct(data)\n # Kickstart the seralizin'.\n\n #if it found no ids, then we can just use the pre construct data\n if any((len(ids) > 0 for label, ids in self.ids.iteritems())):\n self.data = collections.defaultdict(dict)\n\n\n for manip in self.manips:\n manip()\n\n logging.debug(\"constructing (enter)\")\n # extend the output using the collated data we've found\n data = self.construct(data)\n logging.debug(\"constructing (exit)\")\n\n logging.debug(\"overall constructing (exit)\")\n return data\n else:\n logging.debug(\"overall constructing (exit)\")\n return pre_construct_data", "def _preprocess_image(self, input_data):\n image = self.preprocessor.preprocess(input_data.images)\n return InputData(images=image, labels=input_data.labels)", "def _preprocess(self, data):\n transformed_data = _copy(data)\n for name, step in self._transformers[:-1]:\n transformed_data = step.fit_transform(transformed_data)\n if type(transformed_data) != _tc.SFrame:\n raise RuntimeError(\"The transform function in step '%s' did not\"\n \" return an SFrame (got %s instead).\" % (name,\n type(transformed_data).__name__))\n return transformed_data", "def preprocess(self, data):\n logger.info(str(data))\n text = data[0].get(\"data\")\n if text is None:\n text = data[0].get(\"body\") # with txt file\n if isinstance(text, dict):\n logger.info(\" ############## Got Dict !! ##########################\")\n input_text = text['text']\n else:\n input_text = text.decode('utf-8')\n max_length = int(self.setup_config[\"max_length\"])\n logger.info(\"Received text: '%s'\", input_text)\n\n logger.info(input_text)\n # input_text = \"안녕하세요? 반갑습니다. 오늘 날씨가 정말 끝내줘요. 너 너무 사랑스러워요\"\n inputs = self.tokenizer.encode(input_text, max_char_length=max_length, return_attention_mask=True)\n return inputs", "def compile_customization(data):\n data=data.split(\" \")\n optional_data=data[1:]\n data = data[0].split(\"@\")\n return_dict = {}\n return_dict['resize_type']=data[0]\n return_dict['width']=int(data[1])\n return_dict['height']=int(data[2])\n return_dict['padding_type']='reflect'\n return_dict['constant_color']=[255,0,0]\n\n if len(optional_data)!=0:\n optional_data=optional_data[0]\n if \",\" in optional_data:\n return_dict['padding_type']='constant'\n color_list=optional_data.split(\",\")\n return_dict['constant_color']=tuple([int(i) for i in color_list])\n else:\n return_dict['padding_type']=optional_data\n return return_dict", "def getPreProcessData(self, data:np.ndarray, dataIncludesLabels:bool) -> DataProcessorWithVisitor:\n scaledDataCategoryVisitor = ScaledDataCategoryVisitor()\n train_data = pd.read_csv(\"titanic/train.csv\")\n train_data = train_data.to_numpy().tolist()\n dataProcessorWithVisitor = DataProcessorWithVisitor(train_data, True, scaledDataCategoryVisitor)\n _, dataToCompareTo = dataProcessorWithVisitor.getProcessedData()\n \n dataProcessor = DataProcessorGaussAndCosine(data, dataIncludesLabels, dataCategoryVisitor=scaledDataCategoryVisitor, sigma=1.0, dataToCompareTo=dataToCompareTo)\n return dataProcessor", "def test_data_preprocessing(raw_data): \r\n\r\n # get data output\r\n data_output = raw_data[['Submitby Date Time', 'Challenge Manager', 'Challenge Copilot', 'Posting Date Date', 'Track',\r\n 'Technology List', 'First Place Prize', 'Num Registrations', 'Total Prize']]\r\n with open('cache/extended_columns.pkl', 'rb') as f:\r\n extended_columns = pickle.load(f)\r\n with open('cache/num_date_columns.pkl', 'rb') as f:\r\n max_date_columns = pickle.load(f)\r\n \r\n data_output = class_binaryzation_for_test(data_output, extended_columns)\r\n try:\r\n data_output = date_separation1(data_output, max_num_columns=NUM_DATE_COLUMNS)\r\n except:\r\n data_output = date_separation2(data_output)\r\n data_output = money_digitalization(data_output)\r\n data_output = get_date_in_days(data_output)\r\n data_output['Days from Posting to Submit'] = data_output['Submitby Date Time Days from 2016'] \\\r\n - data_output['Posting Date Date Days from 2016'] \r\n\r\n return data_output", "def pre_process(cls, *args, **kwargs):\n pass", "def pre_process(cls, *args, **kwargs):\n pass", "def pre_process(cls, *args, **kwargs):\n pass", "def pre_process(cls, *args, **kwargs):\n pass", "def __call__(self, *args, **kwargs):\n return self.current_processor(*args, **kwargs)", "def preprocess(self, data_group):\n\n input_data = data_group.preprocessed_case\n\n # Split Channels\n if self.channels is None:\n channel_subset = np.copy(input_data)\n else:\n all_channels = set(range(input_data.shape[-1]))\n remaining_channels = list(all_channels.difference(set(self.channels)))\n reminaing_channel_subset = np.take(input_data, remaining_channels, axis=-1)\n channel_subset = np.take(input_data, self.channels, axis=-1)\n\n # Merge Target Channels\n if self.merge_method == 'maximum':\n channel_subset = np.max(channel_subset, axis=-1)[..., np.newaxis]\n\n # Join Channels\n if self.channels is None:\n output_data = channel_subset\n else:\n output_data = np.concatenate((reminaing_channel_subset, channel_subset), axis=-1)\n\n data_group.preprocessed_case = output_data\n self.output_data = output_data", "def default_put_single_preprocessor(instance_id=None, data=None, **kw):\n logger.info(\"start put_single_preprocessor\")\n logger.info(data)\n model = data[model_name]\n data.clear()\n data.update(model)\n logger.info(\"end put_single_preprocessor\")\n pass", "def preprocess(self, train_file, validation_file, test_file):\n chardict, labeldict = self.make_dictionary(train_file, validation_file, test_file)\n print 'preparing training data'\n training = self.parse_file(train_file, chardict, labeldict)\n \n print 'preparing validation data'\n validation = self.parse_file(validation_file, chardict, labeldict)\n\n print 'preparing test data'\n test = self.parse_file(test_file, chardict, labeldict)\n\n return Data(training, validation, test, chardict, labeldict)", "def preprocess(self, preprocess_args: dict) -> \"Handle\":\n call_order = preprocess_args[\"call_order\"]\n return self.fork(self.__handle__.key or str(call_order))", "def preprocess(self, data_group):\n\n input_data = data_group.preprocessed_case\n output_shape = list(input_data.shape)\n output_shape[-1] = len(self.label_splits)\n output_data = np.zeros(output_shape)\n\n # Merge Target Channels\n if self.split_method == 'integer_levels':\n for label_idx, label in enumerate(self.label_splits):\n if type(label) is list:\n # This is a little clunky\n single_label_data = np.zeros(output_shape[0:-1])[..., np.newaxis]\n for index in label:\n single_label_data += np.where(input_data == index, 1, 0)\n single_label_data = np.where(single_label_data > 0, 1, 0)\n else:\n single_label_data = np.where(input_data == label, 1, 0)\n\n output_data[..., label_idx] = single_label_data[..., 0]\n\n data_group.preprocessed_case = output_data\n self.output_data = output_data", "def _process(self, data: np.ndarray) -> np.ndarray:\n\n # Step 1. Reorder the data.\n memory = self._reorder(data)\n\n # Step 2. Do the restless classification into counts.\n counts = [defaultdict(int) for _ in range(self._n_circuits)]\n prev_shot = \"0\" * self._num_qubits\n header = {\"memory_slots\": self._num_qubits}\n\n for idx, shot in enumerate(memory):\n shot = format_counts_memory(shot, header)\n\n restless_adjusted_shot = RestlessToCounts._restless_classify(shot, prev_shot)\n\n circuit_idx = idx % self._n_circuits\n\n counts[circuit_idx][restless_adjusted_shot] += 1\n\n prev_shot = shot\n\n return np.array([dict(counts_dict) for counts_dict in counts])", "def _data_process(self, v):\n pass", "def preprocess(self,text):\n return preprocess.get_tokens(text)", "def _pre_construct(self, data):\n logging.info(\"pre constructing (enter)\")\n self.ids = collections.defaultdict(set)\n self.collecting = True\n pre_construct_data = self.construct(data)\n self.collecting = False\n logging.info(\"pre constructing (exit)\")\n return pre_construct_data", "def preprocess_data(self, data, scale_data=True):\n print(\"preprocess_data not implemented\")\n return data", "def process_python(data, code):\n\tx=data\n\treturn eval(code)", "def parse_preproc(key, content):\n if inspect.isclass(key):\n pp = key(**content)\n key = pp.__class__.__name__.lower()\n elif key.lower() == 'none':\n pp = DummyNone()\n elif key.lower() == 'recenter':\n pp = StandardScaler(with_mean=True, with_std=False)\n elif key.lower() == 'standardize':\n pp = StandardScaler(with_mean=True, with_std=True)\n elif key.lower() == 'normalize':\n content.setdefault('norm', 'l2')\n # pp = Normalizer(norm=content[1][0])\n pp = Normalizer(**content)\n elif key.lower() == 'minmax':\n content.setdefault('feature_range', (0, 1))\n pp = MinMaxScaler(**content)\n else:\n pp = DummyNone()\n return (key, pp, 'preproc')", "def preprocess_data(self):\n # Fault and cavity models use same data and features. Get that now.\n signals = get_signal_names(cavities=['1', '2', '3', '4', '5', '6', '7', '8'],\n waveforms=['GMES', 'GASK', 'CRFP', 'DETA2'])\n\n # We need to crop, downsample, then do z-score. Any constant values are set to 0.001 manually.\n num_resample = 4096\n num_meta_columns = 8\n self.common_features_df = window_extractor(self.example, signals=signals, windows={'pre-fault': -1533.4},\n n_samples=7680, standardize=False, downsample=True,\n ds_kwargs={'num': num_resample})\n\n # The extractor makes a row per requested window plus some metadata. Columns are named\n # Sample_<sample_num>_<cav_num>_<signal>, and go Sample_1_1_GMES, Sample_2_1_GMES, ..., Sample_1_1_GASK, ....\n # We want to change this so that each column is all of the samples for 1_GMES, 1_GASK, ... as in the signal\n # order above.\n self.common_features_df = pd.DataFrame(\n self.common_features_df.iloc[0, num_meta_columns:].values.reshape(len(signals), -1).T, columns=signals)\n\n self.common_features_df = standard_scaling(self.common_features_df, fill=0.001)", "def _get_data_preprocessing_fns(self):\n # Create new functions with partial positional arguments assigned\n process_path_fn = \\\n partial(data_preprocessing.process_path,\n one_hot=self.ONE_HOT,\n num_classes=self._NUM_CLASSES,\n class_names=self._CLASS_NAMES)\n process_img_path_fn = data_preprocessing.process_img_path\n convert_format_fn = \\\n partial(data_preprocessing.convert_format,\n grayscale_in=self._GRAYSCALE_IN,\n grayscale_out=self._GRAYSCALE_OUT)\n random_rotate_fn = \\\n partial(data_preprocessing.random_rotate,\n stddev=self._ROTATE_STDDEV)\n random_zoom_fn = \\\n partial(data_preprocessing.random_zoom,\n max_percent=self._ZOOM_MAX_PERCENT,\n stddev=self._ZOOM_STDDEV,\n img_height=self._HEIGHT,\n img_width=self._WIDTH)\n resize_fn = \\\n partial(data_preprocessing.resize,\n height=self._HEIGHT,\n width=self._WIDTH)\n\n funcs = edict({'process_path': process_path_fn,\n 'process_img_path': process_img_path_fn,\n 'convert_format': convert_format_fn,\n 'random_rotate': random_rotate_fn,\n 'random_zoom': random_zoom_fn,\n 'resize': resize_fn})\n\n return funcs", "def preprocess(self, data):\n (w,h,f) = self.rawinputformat()\n dt = numpy.dtype(numpy.uint8)\n nb = numpy.frombuffer(data,dt,-1,0)\n actual_stream_width = (w&1)+w # hack, rather get this from the app sink\n if(actual_stream_width != self.reqsize):\n nb = nb.reshape(h,actual_stream_width,3)\n nb = nb[0:h,0:w,0:3] # crop to network input size\n else:\n nb = nb.reshape((actual_stream_width,actual_stream_width,3))\n img = nb.astype('float32')\n #Preprocess image\n #for i in range(3):\n # img[:,:,i] = (img[:,:,i] - self.mean[i]) * self.std[i]\n #img = resize(img/255.0,(w,h),1)\n img = img/255.0\n print(img.shape)\n #print(img[0,0,:])\n return img.astype(numpy.float16)", "def parse(self, doc):\n self.preprocessor.preprocess(doc)\n\n for extractor in self.extractors:\n extractor.extract(doc)\n\n return doc", "def make_processor(cls, fnc):\n #def wrapper(**kw):\n # name = fnc.__name__\n # fnc.__name__ = 'run'\n # return type()\n # pass", "def preprocess(self):\n raise RuntimeError(\"please implement this function!\")", "def get_preprocessed_seq_data(self, raw_data, cls):\n # Check that input data has unique ids\n self._check_unique_ids(raw_data)\n\n cls_id = int(self.class_name_to_class_id[cls])\n\n data_keys = ['gt_ids', 'tracker_ids', 'gt_dets', 'tracker_dets', 'similarity_scores']\n data = {key: [None] * raw_data['num_timesteps'] for key in data_keys}\n unique_gt_ids = []\n unique_tracker_ids = []\n num_gt_dets = 0\n num_tracker_dets = 0\n for t in range(raw_data['num_timesteps']):\n\n # Only extract relevant dets for this class for preproc and eval (cls)\n gt_class_mask = np.atleast_1d(raw_data['gt_classes'][t] == cls_id)\n gt_class_mask = gt_class_mask.astype(np.bool)\n gt_ids = raw_data['gt_ids'][t][gt_class_mask]\n gt_dets = [raw_data['gt_dets'][t][ind] for ind in range(len(gt_class_mask)) if gt_class_mask[ind]]\n\n tracker_class_mask = np.atleast_1d(raw_data['tracker_classes'][t] == cls_id)\n tracker_class_mask = tracker_class_mask.astype(np.bool)\n tracker_ids = raw_data['tracker_ids'][t][tracker_class_mask]\n tracker_dets = [raw_data['tracker_dets'][t][ind] for ind in range(len(tracker_class_mask)) if\n tracker_class_mask[ind]]\n similarity_scores = raw_data['similarity_scores'][t][gt_class_mask, :][:, tracker_class_mask]\n\n # Match tracker and gt dets (with hungarian algorithm)\n unmatched_indices = np.arange(tracker_ids.shape[0])\n if gt_ids.shape[0] > 0 and tracker_ids.shape[0] > 0:\n matching_scores = similarity_scores.copy()\n matching_scores[matching_scores < 0.5 - np.finfo('float').eps] = -10000\n match_rows, match_cols = linear_sum_assignment(-matching_scores)\n actually_matched_mask = matching_scores[match_rows, match_cols] > 0 + np.finfo('float').eps\n match_cols = match_cols[actually_matched_mask]\n\n unmatched_indices = np.delete(unmatched_indices, match_cols, axis=0)\n\n # For unmatched tracker dets, remove those that are greater than 50% within a crowd ignore region.\n unmatched_tracker_dets = [tracker_dets[i] for i in range(len(tracker_dets)) if i in unmatched_indices]\n ignore_region = raw_data['gt_ignore_region'][t]\n intersection_with_ignore_region = self._calculate_mask_ious(unmatched_tracker_dets, [ignore_region],\n is_encoded=True, do_ioa=True)\n is_within_ignore_region = np.any(intersection_with_ignore_region > 0.5 + np.finfo('float').eps, axis=1)\n\n # Apply preprocessing to remove unwanted tracker dets.\n to_remove_tracker = unmatched_indices[is_within_ignore_region]\n data['tracker_ids'][t] = np.delete(tracker_ids, to_remove_tracker, axis=0)\n data['tracker_dets'][t] = np.delete(tracker_dets, to_remove_tracker, axis=0)\n similarity_scores = np.delete(similarity_scores, to_remove_tracker, axis=1)\n\n # Keep all ground truth detections\n data['gt_ids'][t] = gt_ids\n data['gt_dets'][t] = gt_dets\n data['similarity_scores'][t] = similarity_scores\n\n unique_gt_ids += list(np.unique(data['gt_ids'][t]))\n unique_tracker_ids += list(np.unique(data['tracker_ids'][t]))\n num_tracker_dets += len(data['tracker_ids'][t])\n num_gt_dets += len(data['gt_ids'][t])\n\n # Re-label IDs such that there are no empty IDs\n if len(unique_gt_ids) > 0:\n unique_gt_ids = np.unique(unique_gt_ids)\n gt_id_map = np.nan * np.ones((np.max(unique_gt_ids) + 1))\n gt_id_map[unique_gt_ids] = np.arange(len(unique_gt_ids))\n for t in range(raw_data['num_timesteps']):\n if len(data['gt_ids'][t]) > 0:\n data['gt_ids'][t] = gt_id_map[data['gt_ids'][t]].astype(np.int)\n if len(unique_tracker_ids) > 0:\n unique_tracker_ids = np.unique(unique_tracker_ids)\n tracker_id_map = np.nan * np.ones((np.max(unique_tracker_ids) + 1))\n tracker_id_map[unique_tracker_ids] = np.arange(len(unique_tracker_ids))\n for t in range(raw_data['num_timesteps']):\n if len(data['tracker_ids'][t]) > 0:\n data['tracker_ids'][t] = tracker_id_map[data['tracker_ids'][t]].astype(np.int)\n\n # Record overview statistics.\n data['num_tracker_dets'] = num_tracker_dets\n data['num_gt_dets'] = num_gt_dets\n data['num_tracker_ids'] = len(unique_tracker_ids)\n data['num_gt_ids'] = len(unique_gt_ids)\n data['num_timesteps'] = raw_data['num_timesteps']\n data['seq'] = raw_data['seq']\n data['cls'] = cls\n\n # Ensure again that ids are unique per timestep after preproc.\n self._check_unique_ids(data, after_preproc=True)\n\n return data", "def _preprocess(self, data, normalize=False) -> np.ndarray:\n \n preprocessor = StandardScaler() if not normalize else Normalizer()\n\n data = preprocessor.fit_transform(data)\n \n return data", "def primary_preprocessor(self):\n preprocessor = None\n for preprocessor in self.preprocessors:\n if getattr(preprocessor, '_is_primary', False):\n return preprocessor\n return preprocessor", "def apply_processing(self, data, net_input_or_target):\n processing_steps = self._get_processing_steps(net_input_or_target)\n metadata = {}\n for step in processing_steps:\n data = step(data)\n if hasattr(step, 'metadata'):\n metadata.update(step.metadata)\n return data, metadata", "def preprocessor_forward(self, inputs: Dict[str, TorchscriptPreprocessingInput]) ->Dict[str, torch.Tensor]:\n return self.preprocessor(inputs)", "def processor(self, data):\n streaming_data = self.decoder.decodeData(data)\n # Add Your code here to process data and handle transport/storage", "def processor(self, data):\n streaming_data = self.decoder.decodeData(data)\n # Add Your code here to process data and handle transport/storage", "def processor(self, data):\n streaming_data = self.decoder.decodeData(data)\n # Add Your code here to process data and handle transport/storage", "def processor(self, data):\n streaming_data = self.decoder.decodeData(data)\n # Add Your code here to process data and handle transport/storage", "def processor(self, data):\n streaming_data = self.decoder.decodeData(data)\n # Add Your code here to process data and handle transport/storage", "def processor(self, data):\n streaming_data = self.decoder.decodeData(data)\n # Add Your code here to process data and handle transport/storage", "def _process(self, data: np.ndarray) -> np.ndarray:\n if not self.is_trained:\n raise DataProcessorError(\"SVD must be trained on data before it can be used.\")\n\n # IQ axis is reduced by projection\n if self._n_shots == 0:\n # level1 average mode\n dims = self._n_circs, self._n_slots\n else:\n # level1 single mode\n dims = self._n_circs, self._n_shots, self._n_slots\n\n projected_data = np.zeros(dims, dtype=object)\n\n for idx in range(self._n_slots):\n scale = self.parameters.scales[idx]\n axis = self.parameters.main_axes[idx]\n mean_i = self.parameters.i_means[idx]\n mean_q = self.parameters.q_means[idx]\n\n if self._n_shots != 0:\n # Single shot\n for circ_idx in range(self._n_circs):\n centered = [\n data[circ_idx, :, idx, 0] - mean_i,\n data[circ_idx, :, idx, 1] - mean_q,\n ]\n projected_data[circ_idx, :, idx] = axis @ np.array(centered) / scale\n else:\n # Averaged\n centered = [data[:, idx, 0] - mean_i, data[:, idx, 1] - mean_q]\n projected_data[:, idx] = axis @ np.array(centered) / scale\n\n return projected_data", "def preprocess(self, data):\n\n input_data_str = data[0].get(\"data\")\n if input_data_str is None:\n input_data_str = data[0].get(\"body\")\n\n input_data = input_data_str.decode(\"utf-8\")\n input_tensor = torch.Tensor(ast.literal_eval(input_data))\n return input_tensor" ]
[ "0.69076425", "0.66020375", "0.64183813", "0.62324464", "0.61948276", "0.6038848", "0.59573555", "0.5956903", "0.5925527", "0.5921275", "0.59206593", "0.5867471", "0.58528507", "0.5809913", "0.5783746", "0.5756985", "0.5732583", "0.5712453", "0.57105845", "0.56719923", "0.5663179", "0.5663179", "0.5663179", "0.5637756", "0.56183517", "0.56061226", "0.5601307", "0.5587392", "0.557685", "0.5573858", "0.556796", "0.5561526", "0.5550827", "0.55422896", "0.55387247", "0.5535934", "0.5523226", "0.54856986", "0.5485582", "0.5437001", "0.5424636", "0.54188365", "0.54098034", "0.54098034", "0.5404836", "0.54025924", "0.53995705", "0.5395101", "0.539013", "0.5386516", "0.534904", "0.53470576", "0.5343773", "0.5342567", "0.5321055", "0.53195196", "0.5316762", "0.53129506", "0.53037727", "0.53017426", "0.52959204", "0.5277207", "0.5264069", "0.52484435", "0.5247315", "0.5247315", "0.5247315", "0.5247315", "0.5247023", "0.5246575", "0.5237963", "0.523749", "0.52229875", "0.5213752", "0.5203416", "0.51818025", "0.5170606", "0.5164395", "0.515646", "0.51544666", "0.51466113", "0.5146178", "0.5142486", "0.5139091", "0.51346177", "0.5129762", "0.5125827", "0.5124925", "0.51131546", "0.5110912", "0.5110544", "0.5091181", "0.5090712", "0.5090712", "0.5090712", "0.5090712", "0.5090712", "0.5090712", "0.5085742", "0.5082038" ]
0.6266177
3
Function to construct all plottable files. In principle to be used for visualisation.
def get_plottables(self, period_person_division, running_person_identifiers, running_fragments, turning_fragments): period_running_person_division = {period: {person: coords for person, coords in period_dictionary.items() if person in running_person_identifiers} for period, period_dictionary in period_person_division.items()} running_plottables = { period: {person: coords for person, coords in period_dictionary.items() if person in running_person_identifiers} for period, period_dictionary in period_person_division.items() if any(lower <= period <= upper for (lower, upper) in running_fragments)} turning_plottables = { period: {person: coords for person, coords in period_dictionary.items() if person in running_person_identifiers} for period, period_dictionary in period_person_division.items() if any(lower <= period <= upper for (lower, upper) in turning_fragments)} period_running_person_division = dict(filter(lambda x: x[1] != {}, period_running_person_division.items())) running_plottables = dict(filter(lambda x: x[1] != {}, running_plottables.items())) turning_plottables = dict(filter(lambda x: x[1] != {}, turning_plottables.items())) return period_running_person_division, running_plottables, turning_plottables
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _create_html(self, workdir, templatePath, imgFormat):\n plot_tables = []\n plot_set = [ self._expectedPlots_globalAvg, self._expectedPlots_Nino, self._expectedPlots_transportDiags ]\n\n # build up the plot_tables array\n for k in range(len(plot_set)):\n plot_table = []\n plot_tuple_list = plot_set[k]\n num_plots = len(plot_tuple_list)\n num_last_row = num_plots % self._columns[k]\n num_rows = num_plots//self._columns[k]\n index = 0\n\n for i in range(num_rows):\n ptuple = []\n for j in range(self._columns[k]):\n label, plot_file = plot_tuple_list[index]\n img_file = '{0}.{1}'.format(plot_file, imgFormat)\n rc, err_msg = cesmEnvLib.checkFile( '{0}/{1}'.format(workdir, img_file), 'read' )\n if not rc:\n ptuple.append(('{0}'.format(label), '{0} - Error'.format(plot_file)))\n else:\n ptuple.append(('{0}'.format(label), plot_file))\n index += 1 \n plot_table.append(ptuple)\n\n # pad out the last row\n if num_last_row > 0:\n ptuple = []\n for i in range(num_last_row):\n label, plot_file = plot_tuple_list[index]\n img_file = '{0}.{1}'.format(plot_file, imgFormat)\n rc, err_msg = cesmEnvLib.checkFile( '{0}/{1}'.format(workdir, img_file), 'read' )\n if not rc:\n ptuple.append(('{0}'.format(label), '{0} - Error'.format(plot_file)))\n else:\n ptuple.append(('{0}'.format(label), plot_file))\n index += 1 \n\n for i in range(self._columns[k] - num_last_row):\n ptuple.append(('',''))\n\n plot_table.append(ptuple)\n\n plot_tables.append(('{0}'.format(self._expectedPlotHeaders[k]),plot_table, self._columns[k]))\n\n # create a jinja2 template object\n templateLoader = jinja2.FileSystemLoader( searchpath=templatePath )\n templateEnv = jinja2.Environment( loader=templateLoader, keep_trailing_newline=False )\n\n template = templateEnv.get_template( self._template_file )\n\n # add the template variables\n templateVars = { 'title' : self._name,\n 'plot_tables' : plot_tables,\n 'imgFormat' : imgFormat\n }\n\n # render the html template using the plot tables\n self._html = template.render( templateVars )\n \n return self._html", "def create_plots(self):\n if not os.path.exists(self.output_folder):\n os.makedirs(self.output_folder)\n self.sse_plot()\n self.avg_sse_plot()", "def init_plots() :\n plot_dict = {}\n\n station_dict = {}\n\n for st_id in [ -5, -4, -3, -2, -1, 1, 2, 3, 4, 5 ] :\n prefix = 'station_' + str( st_id ) + '_'\n station_dict[prefix+'spacepoints_xy'] = \\\n ROOT.TH2D( prefix+'spacepoints_xy', \"Spacepoint X-Y Positions\", \\\n 1000, -200.0, 200.0, 1000, 200.0, 200.0 )\n\n plot_dict['station_plots'] = station_dict\n\n\n plot_dict['beam_positions_x'] = ROOT.TH2D( 'beam_positions_x', \\\n \"Distribution of X Positions for each station\", \\\n 11, -5.5, 5.5, 1000, -200.0, 200.0 )\n plot_dict['beam_positions_y'] = ROOT.TH2D( 'beam_positions_y', \\\n \"Distribution of Y Positions for each station\", \\\n 11, -5.5, 5.5, 1000, -200.0, 200.0 )\n plot_dict['beam_profile_x'] = None\n plot_dict['beam_profile_y'] = None\n plot_dict['beam_profile_x_up_fit'] = None\n plot_dict['beam_profile_y_up_fit'] = None\n plot_dict['beam_profile_x_down_fit'] = None\n plot_dict['beam_profile_y_down_fit'] = None\n\n plot_dict['tof_0_1'] = ROOT.TH1F( 'tof_0_1', 'Time TOF0 - TOF1', \\\n 1000, 0.0, 100.0 )\n plot_dict['tof_1_2'] = ROOT.TH1F( 'tof_1_2', 'Time TOF1 - TOF2', \\\n 1000, 0.0, 100.0 )\n plot_dict['tof_0_1_cut'] = ROOT.TH1F( 'tof_0_1_cut', 'Time TOF0 - TOF1', \\\n 1000, 0.0, 100.0 )\n plot_dict['tof_1_2_cut'] = ROOT.TH1F( 'tof_1_2_cut', 'Time TOF1 - TOF2', \\\n 1000, 0.0, 100.0 )\n\n return plot_dict", "def make_all_plots(dirname='plots'):\n for worker_type in ['ordinary', 'normal', 'master', None]:\n name = 'rajpal'\n if worker_type is not None:\n name += '-' + worker_type\n data = Data.from_rajpal_icml15(worker_type=worker_type)\n data.make_plots(name)\n data.make_data('{}.csv'.format(name))\n\n data = Data.from_bragg_hcomp13(positive_only=False)\n data.make_plots(os.path.join(dirname, 'bragg'))\n data.make_data(os.path.join(dirname, 'bragg.csv'))\n\n data = Data.from_bragg_hcomp13(positive_only=True)\n data.make_plots(os.path.join(dirname, 'bragg-pos'))\n data.make_data(os.path.join(dirname, 'bragg-pos.csv'))\n\n data = Data.from_lin_aaai12(workflow='tag')\n data.make_plots(os.path.join(dirname, 'lin-tag'))\n data.make_data(os.path.join(dirname, 'lin-tag.csv'))\n\n data = Data.from_lin_aaai12(workflow='wiki')\n data.make_plots(os.path.join('lin-wiki'))\n data.make_data(os.path.join('lin-wiki.csv'))\n\n make_bragg_teach_plots(dirname=dirname)", "def make_htt_plots(input_filename, output_dir):\n in_stem = os.path.splitext(os.path.basename(input_filename))[0]\n output_dir = os.path.join(output_dir, in_stem)\n if not os.path.isdir(output_dir):\n print 'Making output dir', output_dir\n os.makedirs(output_dir)\n\n f = cu.open_root_file(input_filename)\n tree = cu.get_from_file(f, \"valid\")\n\n common_cut = COMMON_CUT\n norm_cut = '1./nMatches' # normalisation, for event-level quantities, since we store it for each match in an event\n if common_cut != '':\n norm_cut += ' && %s' % common_cut\n\n do_htt_plots(tree, output_dir, norm_cut)\n\n do_mht_plots(tree, output_dir, norm_cut)\n\n # Do plots where y axis is some variable of interest\n do_dr_plots(tree, output_dir, common_cut)\n\n do_rsp_plots(tree, output_dir, common_cut)\n\n do_nvtx_plots(tree, output_dir, norm_cut)\n\n do_njets_plots(tree, output_dir, norm_cut)\n\n do_jet_pt_plots(tree, output_dir, common_cut)\n\n f.Close()", "def create_display_data_table():\n\n for ccd in range(0, 10):\n for node in range(0, 4):\n file = 'ccd' + str(ccd) + '_' + str(node)\n infile = data_dir + file\n outfile = web_dir + 'Data/' + file\n\n f = open(infile, 'r')\n data = [line.strip() for line in f.readlines()]\n f.close()\n\n fo = open(outfile, 'w')\n#\n#--- adding heading\n#\n line = \"#\\n#Date Mn K alpha Al K alpha Ti K alpha Slope Sigma Int Sigma\\n#\\n\"\n fo.write(line)\n for ent in data:\n atemp = re.split('\\s+', ent)\n stime = int(atemp[0])\n#\n#--- converting the date into <mon> <year> form (e.g. May 2013)\n#\n ltime = tcnv.axTimeMTA(stime)\n btemp = re.split(':', ltime)\n year = btemp[0]\n [mon, mdate] = tcnv.changeYdateToMonDate(int(year), int(btemp[1]))\n lmon = tcnv.changeMonthFormat(mon)\n line = lmon + ' ' + year \n for j in range(1, len(atemp)):\n line = line + '\\t' + atemp[j]\n\n line = line + '\\n'\n fo.write(line)\n fo.close()", "def tabular_data(self):\n path = CFG.GRAPHS_DIR\n chdir(path)\n\n if self.experiment_count == 1:\n f = open(self.tablefile, 'w')\n f.write(self.print_border_line())\n f.write(self.table_header())\n f.write(self.print_border_line())\n f.write(self.pretty_string(\"Functions\"))\n f.write(self.pretty_string(\"Batch Size\"))\n f.write(self.pretty_string(\"Training (%)\"))\n f.write(self.pretty_string(\"Testing (%)\", True))\n f.write('\\n')\n f.write(self.print_border_line())\n f.close()\n\n f = open(self.tablefile, 'a')\n f.write(self.pretty_string(self.function_name))\n f.write(self.pretty_string(str(self.batch_size)))\n f.write(self.pretty_string(self.tr_mean_str))\n f.write(self.pretty_string(self.test_mean_str, True))\n f.write('\\n')\n f.close()", "def gen_plots(uf_dict, f_dict, min_x, max_x, min_y, max_y, axes, name, histogram, total):\n with PdfPages(name) as pdf:\n total_xuf = []\n total_yuf = []\n total_xf = []\n total_yf = []\n for entry in uf_dict:\n print 'Making plot for ' + entry\n xuf, yuf = zip(*uf_dict[entry])\n fig = plt.figure()\n ax1 = fig.add_subplot(111)\n ax1.scatter(xuf, yuf, c='#ad4851', marker='o', label='initial structures')\n try:\n xf, yf = zip(*f_dict[entry])\n ax1.scatter(xf, yf, c='orange', marker='x', label='selected structures')\n except ValueError:\n xf = []\n yf = []\n plt.legend(loc='upper right')\n plt.title(entry, fontsize=30)\n plt.xlim(min_x, max_x)\n plt.ylim(min_y, max_y)\n plt.xlabel(axes[0], fontsize=20)\n plt.ylabel(axes[1], fontsize=20)\n pdf.savefig(fig)\n plt.close()\n\n if total:\n total_xuf.extend(xuf)\n total_yuf.extend(yuf)\n total_xf.extend(xf)\n total_yf.extend(yf)\n\n if histogram:\n bins = np.linspace(min_y, max_y, num=10)\n plt.hist(yuf, bins, alpha=0.5, color='b', label='initial structures')\n try:\n plt.hist(yf, bins, alpha=0.5, color='orange', label='selected structures')\n except ValueError:\n pass\n plt.legend(loc='upper right')\n plt.title(entry, fontsize=30)\n plt.xlabel(axes[1], fontsize=20)\n plt.ylabel('Frequency', fontsize=20)\n pdf.savefig()\n plt.close()\n\n if total:\n print 'Making composite plot'\n fig = plt.figure()\n ax1 = fig.add_subplot(111)\n ax1.scatter(total_xuf, total_yuf, c='#ad4851', marker='o', label='initial structures')\n ax1.scatter(total_xf, total_yf, c='orange', marker='x', label='selected structures')\n plt.legend(loc='upper right')\n plt.title('Composite Plot', fontsize=30)\n plt.xlim(min_x, max_x)\n plt.ylim(min_y, max_y)\n plt.xlabel(axes[0], fontsize=20)\n plt.ylabel(axes[1], fontsize=20)\n pdf.savefig(fig)\n plt.close()", "def create_OER_plots(\n data,\n plots_folder=\"OER_plots\"\n ):\n # | - create_OER_plots\n calc_systems = data\n\n # | - Styling and Setup\n # settings size and font for revtex stylesheet\n\n fig_width_pt = 1.8 * 246.0 # Get this from LaTeX using \\showthe\\columnwidth\n #fig_width_pt *= 300./72 # convert to 300 dpi\n inches_per_pt = 1.0 / 72.27 # Convert pt to inches\n #inches_per_pt = 1.0/300 # Convert pt to inches\n golden_mean = (np.sqrt(5) - 1.0) / 2.0 # Aesthetic ratio\n fig_width = fig_width_pt * inches_per_pt # width in inches\n fig_height = fig_width * golden_mean # height in inches\n fig_size = [fig_width, fig_height]\n fig = plt.figure(figsize=fig_size, dpi=300)\n\n font_size = 9\n tick_font_size = 8\n xlabel_pad = 8\n ylabel_pad = 18\n matplotlib.rcParams['ps.usedistiller'] = 'xpdf'\n\n matplotlib.rcParams['font.size'] = 10\n #matplotlib.rcParams['axes.labelsize'] = 2*font_size\n matplotlib.rcParams['axes.labelsize'] = font_size\n matplotlib.rcParams['legend.fontsize'] = font_size\n matplotlib.rcParams['xtick.labelsize'] = tick_font_size\n matplotlib.rcParams['ytick.labelsize'] = tick_font_size\n\n font_default = 'helvetica'\n #font_default='cmss'\n\n def setfont(font=font_default, unicode=True):\n \"\"\"Set font.\n\n Set Matplotlibs rcParams to use LaTeX for font rendering.\n Revert all changes by calling rcdefault() from matplotlib.\n\n Parameters:\n -----------\n font: string\n \"Helvetica\"\n \"Times\"\n \"Computer Modern\"\n\n usetex: Boolean\n Use unicode. Default: False.\n\n \"\"\"\n # | - setfont\n # Use TeX for all figure text!\n plt.rc('text', usetex=True)\n\n font = font.lower().replace(\" \", \"\")\n if font == 'times':\n # Times\n font = {'family': 'serif', 'serif': ['Times']}\n preamble = r\"\"\"\n \\usepackage{color}\n \\usepackage{mathptmx}\n \"\"\"\n elif font == 'helvetica':\n # Helvetica\n # set serif, too. Otherwise setting to times and then\n # Helvetica causes an error.\n font = {'family': 'sans-serif', 'sans-serif': ['Helvetica'],\n 'serif': ['cm10']}\n preamble = r\"\"\"\n \\usepackage{color}\n \\usepackage[tx]{sfmath}\n \\usepackage{helvet}\n \\usepackage{sansmath}\n \"\"\"\n else:\n # Computer modern serif\n font = {'family': 'serif', 'serif': ['cm10']}\n # preamble = r\"\"\"\n preamble = r\"\"\"\n \\usepackage{color}\n \"\"\"\n\n if font == 'cmss':\n # Computer modern sans serif\n font = {'family': 'sans-serif', 'serif': ['cmss']}\n preamble = r\"\"\"\n \\usepackage{color}\n \\usepackage[tx]{sfmath}\n \"\"\"\n\n if unicode:\n # Unicode for Tex\n #preamble = r\"\"\"\\usepackage[utf8]{inputenc}\"\"\" + preamble\n # inputenc should be set automatically\n plt.rcParams['text.latex.unicode'] = True\n\n # print font, preamble\n plt.rc('font', **font)\n plt.rcParams['text.latex.preamble'] = preamble\n #__|\n\n setfont(\n font_default,\n # unicode=True,\n unicode=False,\n )\n\n matplotlib.rcParams['lines.linewidth'] = 1.\n\n #matplotlib.rcParams['ytick.direction'] = 'out'\n #matplotlib.rcParams['xtick.direction'] = 'out'\n\n ax = fig.add_axes([0.2, 0.2, 0.6, 0.6])\n\n zoom = 0.5\n d1 = 3 * zoom\n d2 = 4 * zoom\n xcenter = 1.5 # 0.65\n #ycenter=1.23#2.4\n ycenter = 0.8 # 2.4\n\n x1 = xcenter - d1 # -0.6\n x2 = xcenter + d1 # 2.2\n y1 = ycenter - d2 # 1#0.5\n y2 = ycenter + d2 # 5\n ax.axis([x1, x2, y1, y2])\n ax.set_xlabel(r'$\\Delta$G$_{\\sf O}$ - $\\Delta$G$_{\\sf OH}$ (eV)')\n #ax.set_ylabel(r'$\\Delta$G$_{\\sf OOH}$ -$\\Delta$G$_{\\sf O}$ (eV)')\n ax.set_ylabel(r'$\\Delta$G$_{\\sf OH}$')\n\n delta = 0.025\n x = np.arange(x1, x2 + delta, delta)\n y = np.arange(y1, y2 + delta, delta)\n X, Y = np.meshgrid(x, y)\n\n #Z1 = mlab.bivariate_normal(X, Y, 1.0, 1.0, 0.0, 0.0)\n #Z2 = mlab.bivariate_normal(X, Y, 1.5, 0.5, 1, 1)\n # difference of Gaussians\n #Z = 10.0 * (Z2 - Z1)\n #__|\n\n # | - Methods\n #fit=[0.84527288, 3.38026638]\n def ooh_oh_scaling(doh):\n \"\"\"ooh_oh_scaling equation.\"\"\"\n # | - ooh_oh_scaling\n #like ambars\n #dooh=0.5*doh + 3.0\t\t #O\n #normal one\n\n dooh = doh + 3.2\n return(dooh)\n #__|\n\n def overpotential(doh, do):\n \"\"\"Calculate overpotential.\n\n Args:\n doh:\n do:\n \"\"\"\n # | - overpotential\n dooh = ooh_oh_scaling(doh)\n dg14 = [doh, do - doh, dooh - do, - dooh + 4.92]\n m = max(dg14)\n return(m - 1.23)\n #return doh*do\n #__|\n\n def overpotential2(x, doh):\n \"\"\"Calculate overpotential (version 2).\n\n Args:\n x:\n doh:\n \"\"\"\n # | - overpotential2\n dooh = ooh_oh_scaling(doh)\n dg14 = [doh, x, -x + 2.46, -dooh + 4.92]\n m = max(dg14)\n return(m - 1.23)\n #return doh*do\n #__|\n\n def overpotential3(x, doh):\n \"\"\"Calculate overpotential (version 3).\n\n Args:\n x:\n doh:\n \"\"\"\n # | - overpotential3\n dooh = ooh_oh_scaling(doh)\n dg14 = [doh, x, dooh - (x + doh), -dooh + 4.92]\n m = max(dg14)\n return(m - 1.23)\n\n #return doh*do\n #__|\n\n def overpotential_label(doh, do):\n \"\"\"Return overpotential label.\n\n Args:\n doh:\n do:\n \"\"\"\n # | - overpotential_label\n dooh = ooh_oh_scaling(doh)\n dg14 = [doh, do - doh, dooh - do, -dooh + 4.92]\n m = max(dg14)\n for i in range(len(dg14)):\n if(m == dg14[0]):\n return(r'OH lim.')\n if(m == dg14[1]):\n return(r'OH-O lim.')\n if(m == dg14[2]):\n return(r'O-OOH lim.')\n if(m == dg14[3]):\n return( r'OOH-O$_{\\sf 2}$ lim.')\n #return doh*do\n #__|\n\n #__|\n\n #Z=overpotential(X,Y)\n\n # | - OER_contour_plot *****************************************************\n Z = []\n for j in y:\n tmp = []\n for i in x:\n tmp.append(overpotential3(i, j))\n Z.append(tmp)\n\n\n #print overpotential(0.8,2.4)\n\n Z = np.array(Z)\n\n\n #im = plt.imshow(Z, origin='lower',interpolation='bilinear',\n # cmap=cm.jet_r, extent=(x1,x2,y1,y2), vmin=0, vmax=2)\n\n origin = 'lower'\n levels = np.arange(0.0, 2, 0.1)\n #levels = np.arange(0.2, 2, 0.1)\n CS = plt.contourf(\n X,\n Y,\n Z,\n levels,\n #20,\n # [-1, -0.1, 0, 0.1],\n #alpha=0.8,\n #cmap=plt.cm.bone,\n cmap=plt.cm.jet_r,\n #extend='both',\n extend='max',\n origin=origin,\n )\n\n # Note that in the following, we explicitly pass in a subset of\n # the contour levels used for the filled contours. Alternatively,\n # We could pass in additional levels to provide extra resolution,\n # or leave out the levels kwarg to use all of the original levels.\n\n CS2 = plt.contour(\n CS,\n levels=CS.levels,\n colors='white',\n linewidths=0.05,\n alpha=0.3,\n origin=origin,\n # hold='on',\n )\n\n # | - __old__\n #levels = np.arange(0, 2, 0.05)\n #CS = plt.contourf(X,Y,Z, levels, cmap=cm.jet_r, origin='lower')\n #CS = plt.contourf(X,Y,Z, levels, origin='lower')\n #im = plt.imshow(Z, interpolation='bilinear', origin='lower',\n # cmap=cm.jet, extent=(x1,x2,y1,y2))\n #levels2 = [2.0]\n #CS2 = plt.contour(CS, levels2,\n # colors = 'r',\n # origin='lower',\n # hold='on')\n #CS = plt.contour(Z, levels,\n # origin='lower',\n # linewidths=0.5,\n # extent=(x1,x2,y1,y2))\n ##Thicken the zero contour.\n #zc = CS.collections[6]\n #plt.setp(zc, linewidth=2)\n #__|\n\n cbar = plt.colorbar(CS)\n #cbar.ax.set_ylabel('Overpotential [V]')\n #cbar.ax.set_ylabel(r'$\\eta_{\\sf calc.}$')\n cbar.ax.set_ylabel(r'$\\eta_{\\sf OER}$')\n\n # | - __old__\n #cbar.add_lines(CS2)\n #plt.clabel(CS, levels[1::2], # label every second level\n # inline=1,\n # fmt='%1.1f',\n # fontsize='x-small')\n #plt.title('Lines with colorbar')\n # We can still add a colorbar for the image, too.\n # This makes the original colorbar look a bit out of place,\n # so let's improve its position.\n #__|\n\n ax.tick_params(axis='both', direction='out')\n ax.get_xaxis().tick_bottom() # remove unneeded ticks\n ax.get_yaxis().tick_left()\n\n # | - __old__\n #plot(x,ooh_oh_scaling(x),'--',color='orange',lw=1,\n # dashes=(3,1),label='$\\Delta$G$_{\\sf OOH}$=0.82G$_{\\sf OH}$+3.18 eV')\n #ax.text(x1+0.02,y2-0.3,\n # '$\\Delta$G$_{\\sf OOH}$=%.2fG$_{\\sf OH}$+%.2f eV' %(fit[0],fit[1]),\n # color='orange',fontsize='x-small',zorder=10,horizontalalignment='left')\n #ax.text(x1+0.02,y2-0.3,\n # '$\\Delta$G$_{\\sf OOH}$=%.2fG$_{\\sf OH}$+%.2f eV' %(0.82,3.18),\n # color='orange',fontsize='x-small',zorder=10,horizontalalignment='left')\n #plt.show()\n #__|\n\n offset = [0.0, 0.08]\n\n #foo=r': %f' % (calc_systems[i][3])\n for i in range(len(calc_systems)):\n # ax.plot(calc_systems[i][1]-calc_systems[i][0], calc_systems[i][0],\n # 'or',color=calc_systems[i][5])\n\n ax.plot(\n calc_systems[i][1] - calc_systems[i][0],\n calc_systems[i][0],\n calc_systems[i][9],\n mec=calc_systems[i][5],\n mfc=calc_systems[i][10],\n mew=0.8,\n zorder=4,\n marker=calc_systems[i][11],\n label=calc_systems[i][4] + ' : %.2f V' % (calc_systems[i][3])\n )\n\n # | - __old__\n # if i!=0 and 1:\n # ax.text(calc_systems[i][1]-calc_systems[i][0]+calc_systems[i][6],\n # calc_systems[i][0]+calc_systems[i][7],\n # calc_systems[i][4]+'(%.2f)' %(calc_systems[i][3]),color='black',\n # fontsize=6,horizontalalignment='center',rotation=0,zorder=1)\n # else:\n # ax.text(calc_systems[i][1]-calc_systems[i][0]+calc_systems[i][6],\n # calc_systems[i][0]+calc_systems[i][7],\n # calc_systems[i][4]+'(%.2f)' %(calc_systems[i][3]),\n # color='white',fontsize=6,horizontalalignment='center',\n # rotation=0,zorder=1)\n #ax.text(calc_systems[i][0],calc_systems[i][1],'%i' %(i+1),\n # color='black',fontsize=4,\n # horizontalalignment='center',\n # verticalalignment='center',\n # rotation=0,zorder=2)\n #__|\n\n corners = [\n [1.3, 1.0],\n [x1 + (x2 - x2) * 0.2, y1 + (y2 - y1) * 0.9],\n [x1 + (x2 - x2) * 0.8, y1 + (y2 - y1) * 0.1],\n [-2, 0],\n ]\n\n #for i in range(len(corners)):\n # ax.text(corners[i][0],corners[i][1], overpotential_label(corners[i][0],\n # corners[i][1]), color='white',fontsize='x-small',\n # horizontalalignment='center',rotation=0,zorder=3)\n\n ax.legend(\n bbox_to_anchor=(1.25, 1.05),\n loc=2,\n borderaxespad=1,\n ncol=1,\n fancybox=True,\n shadow=True,\n fontsize='x-small',\n handlelength=2,\n )\n\n fig_path_i = os.path.join(\n plots_folder,\n \"OER_contour_plot_v13.pdf\",\n )\n\n if not os.path.exists(plots_folder):\n os.makedirs(plots_folder)\n\n fig.savefig(\n fig_path_i,\n bbox_inches=\"tight\",\n )\n\n # fig.savefig('OER_contour_plot_v13.pdf', bbox_inches='tight')\n fig.clf()\n\n #__| **********************************************************************\n\n # | - OER_scaling **********************************************************\n\n fig = plt.figure(figsize=fig_size, dpi=300)\n ax = fig.add_axes([0.2, 0.2, 0.6, 0.6])\n x1 = -1\n x2 = 2.5\n ax.axis([x1, x2, x1, ooh_oh_scaling(x2)])\n\n ax.set_xlabel(r'$\\Delta$G$_{\\sf OH}$ (eV)')\n ax.set_ylabel(r'$\\Delta$G$_{\\sf OOH}$,$\\Delta$G$_{\\sf O}$ (eV)')\n\n xdata = []\n ydata = []\n y2data = []\n\n #for i in range(3):\n for i in range(len(calc_systems)):\n xdata.append(calc_systems[i][0])\n ydata.append(calc_systems[i][2])\n y2data.append(calc_systems[i][1])\n\n # print(xdata)\n # print(ydata)\n\n fit = polyfit(xdata, ydata, 1)\n fit_fn = poly1d(fit)\n # print(fit_fn)\n aa = fit_fn[1]\n bb = fit_fn[0]\n\n fit1 = polyfit(xdata, y2data, 1)\n fit_fn1 = poly1d(fit1)\n # print(fit_fn1)\n\n #print fit_fn[0], fit_fn[1]\n #how bad is scaling\n for i in range(len(calc_systems)):\n error = calc_systems[i][2] - \\\n (fit_fn[1] * calc_systems[i][0] + fit_fn[0])\n # print(error, calc_systems[i])\n\n xx = np.arange(x1, x2, delta)\n\n # Plotting Scaling Lines\n ax.plot(xx, fit_fn[1] * xx + fit_fn[0], '--',\n lw=1, dashes=(3, 1), c='grey', label='OOH scaling',\n )\n\n ax.plot(xx, xx + 3.2, '--', lw=1, dashes=(3, 1), c='black')\n\n ax.plot(xx, xx, '--', lw=1, dashes=(3, 1), c='black')\n\n ax.plot(xx, fit_fn1[1] * xx + fit_fn1[0], '--',\n lw=1, dashes=(3, 1), c='red', label='O scaling',\n )\n\n for i in range(len(calc_systems)):\n ax.plot(\n calc_systems[i][0],\n calc_systems[i][2],\n 'ro',\n ms=3,\n marker=calc_systems[i][11],\n #alpha=0.2,\n color=calc_systems[i][10],\n )\n\n ax.plot(\n calc_systems[i][0],\n calc_systems[i][1],\n 'ro',\n ms=3,\n marker=calc_systems[i][11],\n #alpha=0.2,\n color=calc_systems[i][10],\n )\n\n ax.plot(\n calc_systems[i][0],\n calc_systems[i][0],\n calc_systems[i][9],\n mec=calc_systems[i][5],\n mfc=calc_systems[i][10],\n mew=0.8,\n zorder=4,\n marker=calc_systems[i][11],\n label=calc_systems[i][4] + ' : %.2f V' % (calc_systems[i][3]),\n )\n\n # ax.text(calc_systems[i][0],\n # calc_systems[i][0]+calc_systems[i][7]+0.08,\n # calc_systems[i][4]+'(%.2f)' %(calc_systems[i][3]),\n # color='black',fontsize=6,horizontalalignment='center',\n # rotation=0,zorder=1)\n\n ax.legend(\n bbox_to_anchor=(1.05, 1.05),\n loc=2,\n borderaxespad=0.5,\n ncol=1,\n fancybox=True,\n shadow=True,\n fontsize='x-small',\n handlelength=2,\n )\n\n\n fig_path_i = os.path.join(\n plots_folder,\n \"OER_scaling.pdf\",\n )\n\n if not os.path.exists(plots_folder):\n os.makedirs(plots_folder)\n\n fig.savefig(\n fig_path_i,\n bbox_inches=\"tight\",\n )\n\n # fig.savefig('OER_scaling.pdf', bbox_inches='tight')\n\n fig.clf()\n\n #__| **********************************************************************\n\n # | - OER_1D_plot **********************************************************\n ax = fig.add_axes([0.2, 0.2, 0.6, 0.6])\n\n #x1=1.23-1\n #x2=1.23+1\n #y2=1\n #y1=0\n\n x1 = 0.5\n x2 = 2.8\n y2 = 2.83\n y1 = 1.23\n\n ax.axis([x1, x2, y1, y2])\n delta = 0.01\n x = np.arange(x1, x2, delta)\n\n ax.set_xlabel(r'$\\Delta$G$_{\\sf O}-\\Delta$G$_{\\sf OH}$ (eV)')\n\n #ax.set_ylabel(r'$\\Delta$G$_{\\sf O}$ (eV)')\n # ax.set_ylabel(r'U_{\\sf OER}$ (V)')\n\n ax.set_ylabel(r'$\\eta_{\\sf OER}$')\n ax.set_ylim(ax.get_ylim()[::-1])\n plot(\n x,\n np.maximum(x, 3.2 - x),\n '--',\n color='black',\n lw=0.67,\n dashes=(3, 1),\n zorder=2,\n )\n\n # | - __old__\n #plot(x,1.23,'--',color='black',lw=0.67, dashes=(3,1),zorder=2)\n # xy=np.array([xp for xp in x if 1.55<xp<1.66])\n # ax.fill_between(xy, y2, np.maximum(xy,3.2-xy)-1.23,\n # zorder=1, color='red', alpha=0.3, edgecolor=\"none\")\n # for b in x:\n # if(np.maximum(b, 3.2-b) < 0.44):\n # print(b)\n\n #plot(x,np.maximum(x,bb-x*(aa)-0.65)-1.23,'--',\n # color='grey',lw=0.67, dashes=(3,1))\n #slope not one\n #plot(x,np.maximum(x,3.18-0.82*x-0.35)-1.23,'--',color='pink',\n # lw=0.67,dashes=(3,1))\n #plot(x,np.maximum(x,2.46-x)-1.23,'-',color='black',lw=0.67)\n\n #import matplotlib.patches as patches\n #ax.add_patch(\n # patches.Rectangle(\n # (calc_systems[1][1]-calc_systems[1][0],\n # calc_systems[1][3]-0.04), # (x,y)\n # 0.25, # width\n # 0.05, # height\n # fill=True,\n # edgecolor=\"none\",\n # facecolor='red',\n # )\n #)\n #__|\n\n for i in range(len(calc_systems)):\n ax.plot(\n calc_systems[i][1] - calc_systems[i][0],\n calc_systems[i][3] + 1.23,\n calc_systems[i][9],\n mec=calc_systems[i][5],\n mfc=calc_systems[i][10],\n mew=0.8,\n zorder=4,\n marker=calc_systems[i][11],\n\n label=calc_systems[i][4] +\n ' : %.2f V' % (calc_systems[i][3] + 1.23),\n\n )\n\n # | - __old__\n # if(i!=1):\n # ax.text(calc_systems[i][1]-calc_systems[i][0],\n # calc_systems[i][3]-0.02,calc_systems[i][3])\n # color='black',fontsize=6,horizontalalignment='left',rotation=0,zorder=4)\n # else:\n # ax.text(calc_systems[i][1]-calc_systems[i][0],calc_systems[i][3]-0.02,\n # calc_systems[i][4]+'(%.2f)' %(calc_systems[i][3]),\n # color='black',fontsize=6,horizontalalignment='right',rotation=0,zorder=4)\n #__|\n\n ax.legend(\n bbox_to_anchor=(-0.15, 1.425),\n loc=2,\n borderaxespad=0.5,\n ncol=3,\n fancybox=True,\n shadow=False,\n fontsize=\"x-small\",\n handlelength=2,\n )\n\n # fig.savefig('OER_1D_plot_v13.pdf', bbox_inches='tight')\n\n fig_path_i = os.path.join(\n plots_folder,\n \"OER_1D_plot_v13.pdf\",\n )\n\n if not os.path.exists(plots_folder):\n os.makedirs(plots_folder)\n\n fig.savefig(\n fig_path_i,\n bbox_inches=\"tight\",\n )\n\n fig.clf()\n\n #__| **********************************************************************\n\n #__|", "def make_tables(self):\n for t in self.tables:\n self.add_table(groupname=t['groupname'],\n tablename=t['tablename'],\n description=t['description'],\n tabletitle=t['tabletitle'])", "def createPrettyPlots(self, inputDir, outputDir, fnExtension = 'png'):\n number_of_entries_per_row = 4\n number_of_files_per_column = 2\n imageWidth = 200 # 1600 org is four times as large\n imageHeight = 150 # 1200\n nTmessage(\"Updating index files for input directory: %s\" % inputDir)\n if os.path.exists(outputDir):\n# nTmessage(\"Removing output directory: %s\" % outputDir)\n shutil.rmtree(outputDir)\n # end if\n# nTmessage(\"Creating output directory: %s\" % outputDir)\n os.mkdir(outputDir)\n# nTdebug(\"Doing copyCingHtmlJsAndCssToDirectory\")\n copyCingHtmlJsAndCssToDirectory(outputDir) \n# htmlDir = os.path.join(cingRoot, \"HTML\")\n fnMatchPattern = '*.' + fnExtension\n image_fn_list = glob(os.path.join(inputDir,fnMatchPattern)) \n inputDirBase = os.path.basename(inputDir)\n# nTdebug(\"Got relative part of inputDir: %s\" % inputDirBase) # e.g. plotTrend\n image_code_list = []\n for image_fn in image_fn_list:\n _root, image_code, _ext = nTpath(image_fn)\n image_code_list.append(image_code)\n # end for \n ## Get the number of files required for building an index\n number_of_images_all_present = len(image_code_list)\n number_of_images_per_file = number_of_entries_per_row * number_of_files_per_column\n ## Number of files with indexes in google style\n number_of_files = int(number_of_images_all_present / number_of_images_per_file)\n if number_of_images_all_present % number_of_images_per_file:\n number_of_files += 1\n # end if\n nTmessage(\"Creating %s pages for %s image codes\" % (number_of_files, number_of_images_all_present))\n# nTmessage(\"Generating %s index html files\" % (number_of_files))\n\n file_name = os.path.join (self.base_dir, \"data\", self.results_base, \"indexPplot.html\")\n file_content = open(file_name, 'r').read()\n old_string = r\"<!-- INSERT NEW TITLE HERE -->\"\n new_string = capitalizeFirst( inputDirBase )\n file_content = string.replace(file_content, old_string, new_string)\n old_string = r\"<!-- INSERT NEW FOOTER HERE -->\"\n file_content = string.replace(file_content, old_string, self.htmlFooter)\n old_string = r\"<!-- INSERT GOOGLE ANALYTICS TEMPLATE HERE -->\"\n file_content = string.replace(file_content, old_string, GOOGLE_ANALYTICS_TEMPLATE)\n old_string = r\"<!-- INSERT GOOGLE PLUS ONE TEMPLATE HERE -->\"\n file_content = string.replace(file_content, old_string, GOOGLE_PLUS_ONE_TEMPLATE)\n ## Count will track the number of entries done per index file\n images_done_per_file = 0\n ## Following variable will track all done sofar\n images_done_all = 0\n ## Tracking the number in the current row. Set for the rare case that there\n ## are no entries at all. Otherwise it will be initialize on first pass.\n num_in_row = 0\n ## Tracking the index file number\n file_id = 1\n ## Text per row in an index file to insert\n insert_text = ''\n ## Repeat for all entries plus a dummy pass for writing the last index file\n for image_code in image_code_list + [ None ]:\n ## Finish this index file\n ## The last index file will only be written once...\n if images_done_per_file == number_of_images_per_file or images_done_all == number_of_images_all_present:\n begin_image_count = number_of_images_per_file * (file_id - 1) + 1\n end_image_count = min(number_of_images_per_file * file_id,\n number_of_images_all_present)\n# nTdebug(\"begin_image_count, end_image_count, number_of_images_all_present: %5d %5d %5d\" % (\n# begin_image_count, end_image_count, number_of_images_all_present))\n # image_code is just the base name of the file name.\n new_string = \"Images: %s-%s of %s.\" % (\n begin_image_count,\n end_image_count,\n number_of_images_all_present\n )\n old_string = r\"<!-- INSERT NEW RESULT STRING HERE -->\" \n new_file_content = string.replace(file_content, old_string, new_string)\n # Always end the row by adding dummy columns\n if num_in_row != number_of_entries_per_row:\n insert_text += (number_of_entries_per_row - num_in_row) * 2 * r\"<td>&nbsp;</td>\" + r\"</tr>\"\n # end if\n ## Create the new index file from the example one by replacing a string\n ## with the new content.\n old_string = r\"<!-- INSERT NEW ROWS HERE -->\"\n new_file_content = string.replace(new_file_content, old_string, insert_text)\n\n first_string = '<a href=\"index_%s.html\">First &lt; &lt;</a>' % 1\n final_string = '<a href=\"index_%s.html\">Last &gt; &gt;</a>' % number_of_files\n prev_string = ''\n if file_id > 1:\n prev_string = '<a href=\"index_%s.html\">Previous &lt;</a>' % ( file_id - 1)\n # end if\n next_string = ''\n if file_id < number_of_files:\n next_string = '<a href=\"index_%s.html\">> Next</a>' % ( file_id + 1)\n # end if\n first_link = max(1, file_id - number_of_files_per_column)\n last_link = min(number_of_files, file_id + number_of_files_per_column - 1)\n links_string = ''\n for link in range(first_link, last_link + 1):\n ## List link but don't include a link out for the current file_id\n if link == file_id:\n links_string += ' <B>%s</B>' % link\n else:\n links_string += ' <a href=\"index_%s.html\">%s</a>' % (\n link, link)\n # end if\n # end for\n old_string = r\"<!-- INSERT NEW LINKS HERE -->\"\n new_string = 'Result pages: ' + ' '.join([first_string, prev_string, links_string, next_string, final_string])\n new_file_content = string.replace(new_file_content, old_string, new_string)\n ## Make the first index file name still index.html\n new_file_name = os.path.join( outputDir, 'index_%s.html' % file_id)\n if not file_id:\n new_file_name = os.path.join( outputDir, '/index.html' )\n # end if \n writeTextToFile(new_file_name, new_file_content) \n images_done_per_file = 0\n num_in_row = 0\n insert_text = \"\"\n file_id += 1\n # end for\n ## Build on current index file\n ## The last iteration will not execute this block because of this clause\n if images_done_all < number_of_images_all_present:\n images_done_all += 1\n images_done_per_file += 1\n ## Get the html code right by abusing the formatting chars.\n ## as in sprintf etc.\n imageRelUrl = os.path.join( '..', inputDirBase, image_code + '.' + fnExtension)\n tmp_string = \"\"\"\n<td> <a href=\"%(imageRelUrl)s\"> <img SRC=\"%(imageRelUrl)s\" border=\"0\" width=\"%(imageWidth)s\" height=\"%(imageHeight)s\"> </a> </td>\"\"\" % dict(\n imageRelUrl=imageRelUrl, imageWidth=imageWidth, imageHeight=imageHeight)\n num_in_row = images_done_per_file % number_of_entries_per_row\n if num_in_row == 0:\n num_in_row = number_of_entries_per_row\n # end if\n if num_in_row == 1:\n # Start new row\n tmp_string = \"\\n<tr>\" + tmp_string\n elif (num_in_row == number_of_entries_per_row):\n # End this row\n tmp_string = tmp_string + \"\\n</tr>\"\n # end if\n insert_text += tmp_string\n # end if\n # end if\n index_file_first = 'index_1.html'\n index_file = os.path.join(outputDir, 'index.html')\n ## Assume that a link that is already present is valid and will do the job\n# nTdebug('Symlinking: %s %s' % (index_file_first, index_file))\n symlink(index_file_first, index_file)", "def _create_html(self, workdir, templatePath, imgFormat):\n num_cols = 14\n plot_table = []\n\n for i in range(len(self._labels)):\n plot_tuple_list = []\n plot_tuple = (0, 'label','{0}:'.format(self._labels[i]))\n plot_tuple_list.append(plot_tuple)\n plot_list = eval('self._expectedPlots_{0}'.format(self._labels[i]))\n\n for j in range(num_cols - 1):\n img_file = '{0}.{1}'.format(plot_list[j], imgFormat)\n rc, err_msg = cesmEnvLib.checkFile( '{0}/{1}'.format(workdir, img_file), 'read' )\n if not rc:\n plot_tuple = (j+1, self._linkNames[j],'{0} - Error'.format(img_file))\n else:\n plot_tuple = (j+1, self._linkNames[j], img_file)\n plot_tuple_list.append(plot_tuple)\n\n print('DEBUG... plot_tuple_list[{0}] = {1}'.format(i, plot_tuple_list))\n plot_table.append(plot_tuple_list)\n\n # create a jinja2 template object\n templateLoader = jinja2.FileSystemLoader( searchpath=templatePath )\n templateEnv = jinja2.Environment( loader=templateLoader, keep_trailing_newline=False )\n\n template = templateEnv.get_template( self._template_file )\n\n # add the template variables\n templateVars = { 'title' : self._name,\n 'plot_table' : plot_table,\n 'num_rows' : len(self._labels),\n }\n\n # render the html template using the plot tables\n self._html = template.render( templateVars )\n \n return self._shortname, self._html", "def _create_html(self, workdir, templatePath, imgFormat):\n num_cols = 14\n plot_table = []\n\n for i in range(len(self._labels)):\n plot_tuple_list = []\n plot_tuple = (0, 'label','{0}:'.format(self._labels[i]))\n plot_tuple_list.append(plot_tuple)\n plot_list = eval('self._expectedPlots_{0}'.format(self._labels[i]))\n\n for j in range(num_cols - 1):\n img_file = '{0}.{1}'.format(plot_list[j], imgFormat)\n rc, err_msg = cesmEnvLib.checkFile( '{0}/{1}'.format(workdir, img_file), 'read' )\n if not rc:\n plot_tuple = (j+1, self._linkNames[j],'{0} - Error'.format(img_file))\n else:\n plot_tuple = (j+1, self._linkNames[j], img_file)\n plot_tuple_list.append(plot_tuple)\n\n print('DEBUG... plot_tuple_list[{0}] = {1}'.format(i, plot_tuple_list))\n plot_table.append(plot_tuple_list)\n\n # create a jinja2 template object\n templateLoader = jinja2.FileSystemLoader( searchpath=templatePath )\n templateEnv = jinja2.Environment( loader=templateLoader, keep_trailing_newline=False )\n\n template = templateEnv.get_template( self._template_file )\n\n # add the template variables\n templateVars = { 'title' : self._name,\n 'plot_table' : plot_table,\n 'num_rows' : len(self._labels),\n }\n\n # render the html template using the plot tables\n self._html = template.render( templateVars )\n \n return self._shortname, self._html", "def _create_plots(self):\n #########################\n # Histograms of original data\n # Generates composite plot of specific variables before correction\n # We fill these variables by sampling a normal distribution, so we use this plot mainly as evidence for that.\n if self.generate_bokeh:\n ws_hist = plotting_functions.histogram_plot(self.data_ws[~np.isnan(self.data_ws)],\n 'Windspeed', 'black', 'm/s')\n tmax_hist = plotting_functions.histogram_plot(self.data_tmax[~np.isnan(self.data_tmax)],\n 'TMax', 'red', 'degrees C')\n tmin_hist = plotting_functions.histogram_plot(self.data_tmin[~np.isnan(self.data_tmin)],\n 'TMin', 'blue', 'degrees C')\n tavg_hist = plotting_functions.histogram_plot(self.data_tmin[~np.isnan(self.data_tmin)],\n 'TAvg', 'black', 'degrees C')\n tdew_hist = plotting_functions.histogram_plot(self.data_tdew[~np.isnan(self.data_tdew)],\n 'TDew', 'black', 'degrees C')\n k_not_hist = plotting_functions.histogram_plot(self.k_not[~np.isnan(self.k_not)],\n 'Ko', 'black', 'degrees C')\n\n output_file(\"correction_files/\" + self.station_name + '_histograms.html',\n title=self.station_name + ' histograms')\n\n save(gridplot([ws_hist, tmax_hist, tmin_hist, tavg_hist, tdew_hist, k_not_hist], ncols=2,\n plot_width=400, plot_height=400, toolbar_location=None))\n\n #########################\n # Generate bokeh composite plot\n # Creates one large plot featuring all variables as subplots, used to get a concise overview of the full dataset\n # If user opts to not correct data (sets script_mode = 0), then this plots data before correction\n # If user does correct data, then this plots data after correction\n print(\"\\nSystem: Now creating composite bokeh graph.\")\n if self.generate_bokeh: # Flag to create graphs or not\n plot_list = []\n x_size = 500\n y_size = 350\n\n if self.script_mode == 0:\n output_file(\"correction_files/before_graphs/\" + self.station_name +\n \"_before_corrections_composite_graph.html\")\n elif self.script_mode == 1:\n output_file(\"correction_files/after_graphs/\" + self.station_name +\n \"_after_corrections_composite_graph.html\")\n else:\n # Incorrect setup of script mode variable, raise an error\n raise ValueError('Incorrect parameters: script mode is not set to a valid option.')\n\n # Temperature Maximum and Minimum Plot\n plot_tmax_tmin = plotting_functions.line_plot(x_size, y_size, self.dt_array, self.data_tmax,\n self.data_tmin, 1, '')\n plot_list.append(plot_tmax_tmin)\n # Temperature Minimum and Dewpoint Plot\n plot_tmin_tdew = plotting_functions.line_plot(x_size, y_size, self.dt_array, self.data_tmin,\n self.data_tdew, 2, '', plot_tmax_tmin)\n plot_list.append(plot_tmin_tdew)\n\n # Subplot 3 changes based on what variables are provided\n if self.column_df.ea != -1: # Vapor pressure was provided\n plot_humid = plotting_functions.line_plot(x_size, y_size, self.dt_array, self.data_ea, self.data_null,\n 7, 'Provided ', plot_tmax_tmin)\n\n elif self.column_df.ea == -1 and self.column_df.tdew != -1: # Tdew was provided, show calculated ea\n plot_humid = plotting_functions.line_plot(x_size, y_size, self.dt_array, self.data_ea, self.data_null,\n 7, 'Calculated ', plot_tmax_tmin)\n\n elif self.column_df.ea == -1 and self.column_df.tdew == -1 and \\\n self.column_df.rhmax != -1 and self.column_df.rhmin != -1: # RH max and RH min\n plot_humid = plotting_functions.line_plot(x_size, y_size, self.dt_array, self.data_rhmax,\n self.data_rhmin, 8, '', plot_tmax_tmin)\n\n elif self.column_df.ea == -1 and self.column_df.tdew == -1 and \\\n self.column_df.rhmax == -1 and self.column_df.rhavg != -1: # RH Avg\n plot_humid = plotting_functions.line_plot(x_size, y_size, self.dt_array, self.data_rhavg,\n self.data_null, 9, '', plot_tmax_tmin)\n else:\n # If an unsupported combination of humidity variables is present, raise a value error.\n raise ValueError('Bokeh figure generation encountered an unexpected combination of humidity inputs.')\n\n plot_list.append(plot_humid)\n\n # If both ea and rhmax/rhmin are provided, generate a supplementary rhmax/min graph\n if self.column_df.rhmax != -1 and self.column_df.rhmin != -1 and self.column_df.ea != -1:\n plot_supplemental_rh = plotting_functions.line_plot(x_size, y_size, self.dt_array, self.data_rhmax,\n self.data_rhmin, 8, '', plot_tmax_tmin)\n plot_list.append(plot_supplemental_rh)\n\n # Mean Monthly Temperature Minimum and Dewpoint\n plot_mm_tmin_tdew = plotting_functions.line_plot(x_size, y_size, self.mm_dt_array, self.mm_tmin,\n self.mm_tdew, 2, 'MM ')\n plot_list.append(plot_mm_tmin_tdew)\n\n # Mean Monthly k0 curve (Tmin-Tdew)\n plot_mm_k_not = plotting_functions.line_plot(x_size, y_size, self.mm_dt_array, self.mm_k_not,\n self.data_null, 10, '', plot_mm_tmin_tdew)\n plot_list.append(plot_mm_k_not)\n\n # Solar radiation and clear sky solar radiation\n plot_rs_rso = plotting_functions.line_plot(x_size, y_size, self.dt_array, self.data_rs, self.rso,\n 5, '', plot_tmax_tmin)\n plot_list.append(plot_rs_rso)\n\n # Windspeed\n plot_ws = plotting_functions.line_plot(x_size, y_size, self.dt_array, self.data_ws, self.data_null,\n 3, '', plot_tmax_tmin)\n plot_list.append(plot_ws)\n\n # Precipitation\n plot_precip = plotting_functions.line_plot(x_size, y_size, self.dt_array, self.data_precip, self.data_null,\n 4, '', plot_tmax_tmin)\n plot_list.append(plot_precip)\n\n # Optimized mean monthly Thornton-Running solar radiation and Mean Monthly solar radiation\n plot_mm_opt_rs_tr = plotting_functions.line_plot(x_size, y_size, self.mm_dt_array, self.mm_rs,\n self.mm_opt_rs_tr, 6, 'MM Optimized ', plot_mm_tmin_tdew)\n plot_list.append(plot_mm_opt_rs_tr)\n\n # Optimized mean monthly Thornton-Running solar radiation and Mean Monthly solar radiation\n plot_mm_orig_rs_tr = plotting_functions.line_plot(x_size, y_size, self.mm_dt_array, self.mm_rs,\n self.mm_orig_rs_tr, 6, 'MM Original ', plot_mm_tmin_tdew)\n plot_list.append(plot_mm_orig_rs_tr)\n\n # Now construct grid plot out of all of the subplots\n number_of_plots = len(plot_list)\n number_of_rows = ceil(number_of_plots / self.gridplot_columns)\n\n grid_of_plots = [([None] * 1) for i in range(number_of_rows)]\n\n for i in range(number_of_rows):\n for j in range(self.gridplot_columns):\n\n if len(plot_list) > 0:\n grid_of_plots[i][j] = plot_list.pop(0)\n else:\n pass\n\n fig = gridplot(grid_of_plots, toolbar_location='left')\n save(fig)\n\n print(\"\\nSystem: Composite bokeh graph has been generated.\")", "def generate_plots():\n\n hmp = homemonitor_plot()\n hmp.load_data()\n hmp.plot_day()\n hmp.plot_hist()", "def _create_html(self, workdir, templatePath, imgFormat):\n labels = ['143&deg;E','156&deg;E','165&deg;E','180&deg;E','190&deg;E','205&deg;E','220&deg;E','235&deg;E','250&deg;E','265&deg;E']\n depths = ['143', '156', '165', '180', '190', '205', '220', '235', '250', '265']\n num_cols = 5\n long_plot_table = []\n lat_plot_table = []\n\n # generate the longitude table\n plot_tuple_list = []\n plot_list = eval('self._expectedPlots_Longitude_Depth')\n\n for j in range(len(plot_list)):\n img_file = '{0}.{1}'.format(plot_list[j], imgFormat)\n rc, err_msg = cesmEnvLib.checkFile( '{0}/{1}'.format(workdir, img_file), 'read' )\n if not rc:\n plot_tuple = (j, self._longitude_linkNames[j],'{0} - Error'.format(img_file))\n else:\n plot_tuple = (j, self._longitude_linkNames[j], img_file)\n plot_tuple_list.append(plot_tuple)\n\n print('DEBUG... plot_tuple_list = {0}'.format(plot_tuple_list))\n long_plot_table.append(plot_tuple_list)\n\n # generate the latitude table\n for i in range(len(labels)):\n plot_tuple_list = []\n plot_tuple = (0, 'label','{0}:'.format(labels[i]))\n plot_tuple_list.append(plot_tuple)\n plot_list = eval('self._expectedPlots_Latitude_Depth_{0}'.format(depths[i]))\n\n for j in range(num_cols - 1):\n img_file = '{0}.{1}'.format(plot_list[j], imgFormat)\n rc, err_msg = cesmEnvLib.checkFile( '{0}/{1}'.format(workdir, img_file), 'read' )\n if not rc:\n plot_tuple = (j+1, self._latitude_linkNames[j],'{0} - Error'.format(img_file))\n else:\n plot_tuple = (j+1, self._latitude_linkNames[j], img_file)\n plot_tuple_list.append(plot_tuple)\n\n print('DEBUG... plot_tuple_list[{0}] = {1}'.format(i, plot_tuple_list))\n lat_plot_table.append(plot_tuple_list)\n\n # create a jinja2 template object\n templateLoader = jinja2.FileSystemLoader( searchpath=templatePath )\n templateEnv = jinja2.Environment( loader=templateLoader, keep_trailing_newline=False )\n\n template = templateEnv.get_template( self._template_file )\n\n # add the template variables\n templateVars = { 'title' : self._name,\n 'long_plot_table' : long_plot_table,\n 'lat_plot_table' : lat_plot_table,\n 'num_rows' : len(labels),\n 'cols' : num_cols\n }\n\n # render the html template using the plot tables\n self._html = template.render( templateVars )\n \n return self._shortname, self._html", "def generate_data():\n for subdir, dirs, files in os.walk(legend_images_dir):\n for _file in files:\n getTables(_file)\n\n file_list = []\n for subdir, dirs, files in os.walk(pdf_output_dir):\n for _file in files:\n if _file.endswith('.pdf'):\n file_list.append(_file)\n\n print (\"Writing merged output in Output.pdf...\")\n current_dir = os.getcwd()\n mergeOutput(file_list, current_dir + \"/Output.pdf\")\n\n clean()", "def _make_tables(self, df):\n # Time table:\n time_keys = ['time', 'endtime', 'event_number_nv']\n self.df_event_time = df.loc[:, time_keys]\n\n # Properties tables:\n pos_keys = ['angle', 'pos_x', 'pos_x_spread', 'pos_y',\n 'pos_y_spread', 'pos_z', 'pos_z_spread']\n self.df_event_position = df.loc[:, pos_keys]\n\n keys = df.columns.values\n keys = [k for k in keys if k not in time_keys + pos_keys]\n self.df_event_properties = df.loc[:, keys]\n\n # Table panels:\n index = self.evt_sel_slid.value\n self.time_table = pn.panel(self.df_event_time.loc[index],\n )\n self.pos_table = pn.panel(self.df_event_position.loc[index:index, :],\n sizing_mode='scale_width')\n\n self.prop_table = pn.panel(self.df_event_properties.loc[index:index, :],\n sizing_mode='scale_width')", "def make_posterior_plots(self, combined=False):\n import matplotlib.pyplot as plt\n plt.rcParams['text.usetex'] = True\n\n if combined:\n outdir = os.path.join(self.outdir, 'CombinedPosteriors')\n maintitle = self.make_main_title(end='Posteriors')\n else:\n outdir = os.path.join(self.outdir, 'IndividualPosteriors')\n maintitle = self.make_main_title(end='Posterior')\n mkdir(outdir)\n\n for injkey in self.values.keys():\n for fhkey in self.values[injkey].keys():\n # Set up multi-plot if needed\n if combined:\n num_rows = self.get_num_rows(\n data=self.values[injkey][fhkey],\n omit_metric=False\n )\n plt.figure(figsize=(20, 5*num_rows+2))\n subplotnum = 1\n else:\n subplotnum = None\n # Loop through the systematics\n for systkey in self.values[injkey][fhkey].keys():\n fittitle = self.make_fit_title(\n fhkey=fhkey,\n trials=self.num_trials\n )\n systunits = self.values[injkey][fhkey][systkey]['units']\n if systkey == 'metric_val':\n xlabel = self.tex_axis_label(\n self.values[injkey][fhkey][systkey]['type']\n )\n else:\n xlabel = self.tex_axis_label(systkey)\n if not systunits == 'dimensionless':\n xlabel += r' (%s)'%self.tex_axis_label(systunits)\n # Specify the subplot, if necessary\n if combined:\n plt.subplot(num_rows, 4, subplotnum)\n self.make_1d_hist_plot(\n data=np.array(\n self.values[injkey][fhkey][systkey]['vals']\n ),\n xlabel=xlabel,\n title=maintitle+r'\\\\'+fittitle,\n ylabel='Number of Trials',\n subplotnum=subplotnum\n )\n # Add the details i.e. injected/fiducial lines and priors\n plt.ylim(0, 1.35*plt.ylim()[1])\n if not systkey == 'metric_val':\n self.add_inj_fid_lines(\n injkey=injkey,\n systkey=systkey,\n fhkey=fhkey\n )\n self.add_prior_region(\n injkey=injkey,\n systkey=systkey,\n fhkey=fhkey\n )\n plt.legend(\n loc='upper left',\n fontsize=12,\n framealpha=1.0\n )\n plt.subplots_adjust(\n left=0.10,\n right=0.90,\n top=0.85,\n bottom=0.11\n )\n # Advance the subplot number, if necessary\n if combined:\n subplotnum += 1\n # Else, save/close this plot\n else:\n self.save_plot(\n fhkey=fhkey,\n outdir=outdir,\n end='%s_posterior'%systkey\n )\n plt.close()\n # Save the whole canvas, if necessary\n if combined:\n plt.suptitle(maintitle+r'\\\\'+fittitle, fontsize=36)\n plt.tight_layout()\n plt.subplots_adjust(top=0.9)\n self.save_plot(\n fhkey=fhkey,\n outdir=outdir,\n end='posteriors'\n )\n plt.close()", "def plot_settings_table(settings, table_nr=1, plot_out=None):\n\n keys = settings.keys()\n\n data_matrix_1 = [keys[:len(keys) / 3], []]\n for key in data_matrix_1[0]:\n data_matrix_1[1].append(str(settings[key]))\n\n data_matrix_2 = [keys[len(keys) / 3:2 * len(keys) / 3], []]\n for key in data_matrix_2[0]:\n data_matrix_2[1].append(str(settings[key]))\n\n data_matrix_3 = [keys[2 * len(keys) / 3:len(keys)], []]\n for key in data_matrix_3[0]:\n data_matrix_3[1].append(str(settings[key]))\n\n data = [data_matrix_1, data_matrix_2, data_matrix_3]\n\n nr_columns = len(data[table_nr - 1][0])\n plot = {'data': [{'colorscale': [[0, '#00083e'], [0.5, '#ededee'], [1, '#ffffff']],\n 'hoverinfo': 'none',\n 'opacity': 0.75,\n 'showscale': False,\n 'type': 'heatmap',\n 'z': [[0, 0.5] for row in range(nr_columns)]\n }],\n 'layout': {\n 'annotations': [],\n 'yaxis1': {'autorange': 'reversed',\n 'showgrid': False,\n 'showticklabels': False,\n 'zeroline': False,\n 'ticks': ''\n },\n 'xaxis1': {\n 'showgrid': False,\n 'showticklabels': False,\n 'zeroline': False,\n 'ticks': '',\n 'range': [0, 1]\n\n },\n 'title': \" \"\n }\n }\n\n # heading\n for table_cell in range(nr_columns):\n plot['layout']['annotations'].append({})\n plot['layout']['annotations'][table_cell].update({'text': data[table_nr - 1][0][table_cell]})\n plot['layout']['annotations'][table_cell].update({'font': {\n 'color': '#ffffff',\n 'size': 15}\n })\n plot['layout']['annotations'][table_cell].update({'y': table_cell})\n plot['layout']['annotations'][table_cell].update({'x': 0.1})\n plot['layout']['annotations'][table_cell].update({'xref': 'x1'})\n plot['layout']['annotations'][table_cell].update({'yref': 'y1'})\n plot['layout']['annotations'][table_cell].update({'align': 'center'})\n plot['layout']['annotations'][table_cell].update({'xanchor': 'left'})\n plot['layout']['annotations'][table_cell].update({'showarrow': False})\n\n # content\n for table_cell in range(nr_columns):\n plot['layout']['annotations'].append({})\n plot['layout']['annotations'][table_cell + nr_columns].update({'text': data[table_nr - 1][1][table_cell]})\n plot['layout']['annotations'][table_cell + nr_columns].update({'x': 0.75})\n plot['layout']['annotations'][table_cell + nr_columns].update({'y': table_cell})\n plot['layout']['annotations'][table_cell + nr_columns].update({'xref': 'x1'})\n plot['layout']['annotations'][table_cell + nr_columns].update({'yref': 'y1'})\n plot['layout']['annotations'][table_cell + nr_columns].update({'showarrow': False})\n\n if plot_out is not None:\n plotly_plot(plot, filename=settings['plot_out'], auto_open=False)\n else:\n return plot", "def make_training_curve_plots(algos,\n tasks,\n n_runs_per_expt,\n csv_filepath_template,\n figure_outdir,\n window_size=None,\n subplot_height=5,\n subplot_width=8):\n n_algo = len(algos)\n n_task = len(tasks)\n\n plt.figure('raw', figsize=(subplot_width * n_algo, subplot_height * n_task))\n plt.figure(\n 'medians_percentiles',\n figsize=(subplot_width * n_algo, subplot_height * n_task))\n n_subplots_x, n_subplots_y = subplots_square(n_task)\n plt.figure(\n 'medians_percentiles_pertask',\n figsize=(subplot_width * n_subplots_x, subplot_height * n_subplots_y))\n plt.figure(\n 'means_CIs_pertask',\n figsize=(subplot_width * n_subplots_x, subplot_height * n_subplots_y))\n fig_names = [\n 'raw', 'medians_percentiles', 'medians_percentiles_pertask',\n 'means_CIs_pertask'\n ]\n\n subplot_pos = 0\n # Iterate through each task.\n for i_task, task in enumerate(tasks):\n print('%s...' % task, end='')\n\n # Initialize x- and y-lims.\n xlims_extremes = [np.inf, -np.inf]\n task_baselines = [\n baseline for key, baseline in BASELINES.items() if task in key\n ]\n if task_baselines:\n ylims_extremes = [np.inf, max(task_baselines)]\n else:\n ylims_extremes = [np.inf, -np.inf]\n\n # Iterate through each algorithm.\n for i_algo, algo in enumerate(algos):\n subplot_pos += 1\n algo_color = ALGO_COLORS[i_algo]\n\n plt.figure('raw')\n plt.subplot(n_task, n_algo, subplot_pos)\n\n # Load and plot the raw curves.\n curves = []\n for run in range(n_runs_per_expt):\n csv_filepath = csv_filepath_template % (task, algo, run)\n with open(csv_filepath, 'r') as csv_file:\n csv_reader = csv.reader(csv_file)\n curve = []\n for _ in range(2):\n curve.append(np.array(csv_reader.next(), dtype=float))\n curves.append(curve)\n plt.plot(curve[0], curve[1])\n plot_baseline(algo, task)\n\n # update the xlim/ylim extremes\n xlims_extremes, ylims_extremes = update_xylims_extremes(\n xlims_extremes, ylims_extremes)\n\n # Compute summaries\n curves = np.array(curves)\n timesteps, window_means = compute_window_means(curves, window_size)\n means = compute_means(window_means)\n medians = compute_medians(window_means)\n cis = compute_boot_ci(window_means)\n percentiles = compute_percentiles(window_means)\n\n # plot the medians + percentiles\n plt.figure('medians_percentiles')\n plt.subplot(n_task, n_algo, subplot_pos)\n fill_plot(timesteps, medians, percentiles[0], percentiles[1], algo, task,\n algo_color)\n\n # Plot the medians + percentiles on a single plot per task.\n plt.figure('medians_percentiles_pertask')\n plt.subplot(n_subplots_y, n_subplots_x, i_task + 1)\n fill_plot(timesteps, medians, percentiles[0], percentiles[1], algo, task,\n algo_color)\n\n # Plot the mean + CI on a single plot per task.\n plt.figure('means_CIs_pertask')\n plt.subplot(n_subplots_y, n_subplots_x, i_task + 1)\n fill_plot(timesteps, means, cis[0], cis[1], algo, task, algo_color)\n\n # Figure titles.\n for fig_name in ['raw', 'medians_percentiles']:\n plt.figure(fig_name)\n plt.title('%s - %s' % (algo, task))\n for fig_name in ['medians_percentiles_pertask', 'means_CIs_pertask']:\n plt.figure(fig_name)\n plt.title(task)\n\n # equalize axes for the task\n for fig_name in ['raw', 'medians_percentiles']:\n equalize_axes_for_task(fig_name, xlims_extremes, ylims_extremes,\n subplot_pos, n_task, n_algo)\n\n # Add legends\n for fig_name in ['medians_percentiles_pertask', 'means_CIs_pertask']:\n plt.figure(fig_name)\n plt.legend(algos)\n\n # Save the figures.\n io_utils.makedirs(figure_outdir)\n for fig_name in fig_names:\n plt.figure(fig_name)\n plt.tight_layout()\n output_path = '%s/%s.png' % (figure_outdir, fig_name)\n with open(output_path, 'wb') as outfile:\n plt.savefig(outfile, dpi=100)", "def make_figure_table(image_files):\n cols = 2\n table_data = []\n row_data = []\n for i, fn in enumerate(image_files):\n row_data.append(p.Image(fn, 3.4 * u.inch, 3.0 * u.inch))\n if (i % cols) == (cols - 1):\n table_data.append(row_data)\n row_data = []\n\n # Determine if there are any images left to print\n if len(row_data) != 0:\n for i in range(len(row_data), cols):\n row_data.append(p.Paragraph(\"\", STYLES[\"body_style\"]))\n table_data.append(row_data)\n\n # Style this into a reportlab table and add to the story\n width = 3.75 * u.inch\n table = p.Table(table_data, colWidths=[width, width])\n table.setStyle(\n p.TableStyle(\n [\n (\"ALIGNMENT\", (0, 0), (-1, -1), \"CENTER\"),\n (\"VALIGN\", (0, 0), (-1, -1), \"CENTER\"),\n (\"TOPPADDING\", (0, 0), (-1, -1), 6.0),\n (\"BOTTOMPADDING\", (0, 0), (-1, -1), 6.0),\n ]\n )\n )\n return table", "def buildTables(self, xmlfile, htmlfile):\n t = xeputils.xeptable.XEPTable()\n for xep in self.xeps:\n # Do not include XEP readme and template in the table.\n if isinstance( xep.nr, ( int, long ) ) :\n t.updateXEP(xep)\n t.writeXMLTable(xmlfile)\n t.writeHTMLTable(htmlfile)", "def defstuff():\n\t\n\tglobal PA, PB, col, col2, rng, xlimits, nbin, lPbw, WJK, outTab\n\t\n\tPA = ['Per1', 'Per2', 'Per3', 'Per4', 'Per5', 'Per6', 'Per7', 'Per8', 'Per9', 'Per10'] # Period columns for A sample\n\tPB = ['P_1', 'P_2', 'P_3'] # Period columns for B sample\n\t# logPB = ['logP_1', 'logP_2', 'logP_3'] \n\tcol = {1:'r', 2:'g', 3:'b'} \n\tcol2 = {1:'m', 2:'y', 3:'k'}\n\trng = (8,14) # Magnitude range\n\txlimits = (0.3 ,3.0) # X-axis plot limits\n\tbw = 0.01 # histogram bin width -- not global!\n\tnbin = (max(rng)-min(rng))/bw # How many bins for histogram.\n\n\t################# CAREFUL!!!!! #####################\n\tlPbw = 0.025 # log period bin width\n\t\n\toutTab = Table(np.zeros((len(B), 11)), names=('ID', 'WJK', 'est_mag', 'delta_mag', 'delta1', 'delta2', 'delta3', 'KDE_mag', 'KDEdelta_mag', 'sigma', 'nstar'), dtype=('string', 'float64', 'float64', 'float64', 'float64', 'float64', 'float64', 'float64', 'float64', 'float64', 'float64' ))", "def create_tables(times, accuracies, batch_sizes):\r\n #Get time data\r\n p_cpu_times = list(times[0].values())\r\n p_gpu_times = list(times[1].values())\r\n c_cpu_times = list(times[2].values())\r\n c_gpu_times = list(times[3].values())\r\n\r\n #Get differences in times\r\n p_diff_times = [a - b for a, b in zip(p_cpu_times, p_gpu_times)]\r\n c_diff_times = [a - b for a, b in zip(c_cpu_times, c_gpu_times)]\r\n cpu_diff_times = [a - b for a, b in zip(p_cpu_times, c_cpu_times)]\r\n gpu_diff_times = [a - b for a, b in zip(p_gpu_times, c_gpu_times)]\r\n\r\n #Set data in np array for table\r\n data = np.array([p_cpu_times,\r\n p_gpu_times,\r\n p_diff_times,\r\n c_cpu_times,\r\n c_gpu_times,\r\n c_diff_times,\r\n cpu_diff_times,\r\n gpu_diff_times]).T\r\n\r\n #Get data in text format\r\n n_rows = data.shape[0]\r\n cell_text = []\r\n for row in range(n_rows):\r\n cell_text.append(['%1.3f' % x for x in data[row]])\r\n \r\n #Get rows and cols for table\r\n columns = ('P CPU Time (s)', 'P GPU Time (s)', 'P Diff (s)', 'C CPU Time (s)', 'C GPU Time (s)', 'C Diff (s)', 'CPU Diff (s)', 'GPU Diff (s)')\r\n row_colors = plt.cm.BuPu(np.linspace(0, 0.5, n_rows))\r\n col_colors = np.array([192/255,192/255,192/255, 1])\r\n col_colors = np.repeat(col_colors.reshape((1, col_colors.shape[0])), len(columns), axis=0)\r\n\r\n #Create table\r\n plt.figure(figsize=(10.8,9.4)).canvas.set_window_title('CPU vs GPU MNIST Neural Network')\r\n plt.table(cellText=cell_text,\r\n rowLabels=batch_sizes,\r\n rowColours=row_colors,\r\n colLabels=columns,\r\n colColours=col_colors,\r\n loc='center')\r\n ax = plt.gca()\r\n ax.axis('off')\r\n plt.savefig('results\\\\figures\\\\table_time.png')\r\n\r\n\r\n #Get accuracy table\r\n #Get accuracy data\r\n p_cpu_accuracy = list(accuracies[0].values())\r\n p_gpu_accuracy = list(accuracies[1].values())\r\n c_cpu_accuracy = list(accuracies[2].values())\r\n c_gpu_accuracy = list(accuracies[3].values())\r\n\r\n #Get max of each batch\r\n p_cpu_max = [max(x) for x in p_cpu_accuracy]\r\n p_gpu_max = [max(x) for x in p_gpu_accuracy]\r\n c_cpu_max = [max(x) for x in c_cpu_accuracy]\r\n c_gpu_max = [max(x) for x in c_gpu_accuracy]\r\n\r\n #Get differences in accuracies\r\n p_diff_acc = [a - b for a, b in zip(p_cpu_max, p_gpu_max)]\r\n c_diff_acc = [a - b for a, b in zip(c_cpu_max, c_gpu_max)]\r\n cpu_diff_acc = [a - b for a, b in zip(p_cpu_max, c_cpu_max)]\r\n gpu_diff_acc = [a - b for a, b in zip(p_gpu_max, c_gpu_max)]\r\n\r\n #Set data in np array for table\r\n data = np.array([p_cpu_max,\r\n p_gpu_max,\r\n p_diff_acc,\r\n c_cpu_max,\r\n c_gpu_max,\r\n c_diff_acc,\r\n cpu_diff_acc,\r\n gpu_diff_acc]).T\r\n\r\n #Get data in text format\r\n n_rows = data.shape[0]\r\n cell_text = []\r\n for row in range(n_rows):\r\n cell_text.append(['%1.3f' % x for x in data[row]])\r\n \r\n #Get rows and cols for table\r\n columns = ('P CPU Acc (%)', 'P GPU Acc (%)', 'P Diff (%)', 'C CPU Acc (%)', 'C GPU Acc (%)', 'C Diff (%)', 'CPU Diff (%)', 'GPU Diff (%)')\r\n\r\n #Create table\r\n plt.clf()\r\n plt.figure(figsize=(10.8,9.4)).canvas.set_window_title('CPU vs GPU MNIST Neural Network')\r\n plt.table(cellText=cell_text,\r\n rowLabels=batch_sizes,\r\n rowColours=row_colors,\r\n colLabels=columns,\r\n colColours=col_colors,\r\n loc='center')\r\n ax = plt.gca()\r\n ax.axis('off')\r\n plt.savefig('results\\\\figures\\\\table_acc.png')", "def main():\n data_visualisation()\n write_hyper_params()\n write_result_tables()\n write_box_plots()", "def makePdf(sources):\n pdf = PdfPages(\"sample_features.pdf\")\n classnames = []\n classname_dict = {}\n x = 2 # number of subplot columns\n y = 3 # number of subplot rows\n for source in sources:\n lc = source.lcs[0]\n\n if lc.classname not in classnames:\n classnames.append(lc.classname)\n classname_dict[lc.classname] = [lc]\n else:\n classname_dict[lc.classname].append(lc)\n\n if len(classname_dict[lc.classname]) < 3:\n\n label = lc.classname + \"; ID: \" + lc.id\n # all_times histogram:\n fig = plt.figure()\n ax = fig.add_subplot(111)\n ax.set_title(label)\n ax.axis('off')\n\n ax1 = fig.add_subplot(321)\n ax2 = fig.add_subplot(322)\n ax2.axis('off')\n ax3 = fig.add_subplot(323)\n ax4 = fig.add_subplot(324)\n ax4.axis('off')\n ax5 = fig.add_subplot(325)\n ax6 = fig.add_subplot(326)\n ax6.axis('off')\n\n hist, bins, other = ax1.hist(lc.all_times, 50, normed=True)\n ax1.text(np.max(bins) * 0.1, np.max(hist) * 0.8,\n r'Histogram (normed) of all $\\Delta$Ts')\n\n ax2.text(0.0, 0.9, (r'$\\bullet$med time to next obs: ' +\n str(np.round(lc.cads_med, 4))))\n ax2.text(0.0, 0.75, (r'$\\bullet$avg time to next obs: ' +\n str(np.round(lc.avgt, 4))))\n ax2.text(0.0, 0.6, (r'$\\bullet$std dev of time to next obs: ' +\n str(np.round(lc.cads_std, 4))))\n ax2.text(0.0, 0.45, (r'$\\bullet$med of all $\\Delta$Ts: ' +\n str(np.round(lc.all_times_med, 4))))\n ax2.text(0.0, 0.3, (r'$\\bullet$avg of all $\\Delta$Ts: ' +\n str(np.round(lc.all_times_avg, 4))))\n ax2.text(0.0, 0.15, (r'$\\bullet$std dev of all $\\Delta$Ts: ' +\n str(np.round(lc.all_times_std, 4))))\n\n hist, bins, other = ax3.hist(lc.cads, 50)\n ax3.text(np.max(bins) * 0.1, np.max(hist) * 0.8,\n r'Hist of time to next obs')\n\n ax6.text(\n 0.0, 0.9, r'$\\bullet$Number of epochs: ' + str(lc.n_epochs))\n ax6.text(0.0, 0.75, (r'$\\bullet$Time b/w first & last obs (days): ' +\n str(np.round(lc.total_time, 2))))\n ax6.text(0.0, 0.6, (r'$\\bullet$Average error in mag: ' +\n str(np.round(lc.avg_err, 4))))\n ax6.text(0.0, 0.45, (r'$\\bullet$Median error in mag: ' +\n str(np.round(lc.med_err, 4))))\n ax6.text(0.0, 0.3, (r'$\\bullet$Std dev of error: ' +\n str(np.round(lc.std_err, 4))))\n ax6.text(0.0, 0.15, '')\n\n ax5.scatter(lc.epochs, lc.mags)\n\n ax4.text(0.0, 0.9, (r'$\\bullet$Avg double to single step ratio: ' +\n str(np.round(lc.avg_double_to_single_step, 3))))\n ax4.text(0.0, 0.75, (r'$\\bullet$Med double to single step: ' +\n str(np.round(lc.med_double_to_single_step, 3))))\n ax4.text(0.0, 0.6, (r'$\\bullet$Std dev of double to single step: ' +\n str(np.round(lc.std_double_to_single_step, 3))))\n ax4.text(\n 0.0, 0.45,\n (r'$\\bullet$1st peak to 2nd peak (in all $\\Delta$Ts): ' +\n str(np.round(lc.all_times_nhist_peak_1_to_2, 3))))\n ax4.text(\n 0.0, 0.3,\n (r'$\\bullet$2ndt peak to 3rd peak (in all $\\Delta$Ts): ' +\n str(np.round(lc.all_times_nhist_peak_2_to_3, 3))))\n ax4.text(\n 0.0, 0.15,\n (r'$\\bullet$1st peak to 3rd peak (in all $\\Delta$Ts): ' +\n str(np.round(lc.all_times_nhist_peak_1_to_3, 3))))\n\n pdf.savefig(fig)\n\n pdf.close()\n\n pdf = PdfPages('feature_plots.pdf')\n\n fig = plt.figure()\n\n ax1 = fig.add_subplot(221)\n ax2 = fig.add_subplot(222)\n ax3 = fig.add_subplot(223)\n ax4 = fig.add_subplot(224)\n\n plt.subplots_adjust(wspace=0.4, hspace=0.4)\n\n classnamenum = 0\n\n colors = ['red', 'yellow', 'green', 'blue', 'gray', 'orange', 'cyan',\n 'magenta']\n for classname, lcs in list(classname_dict.items()):\n classnamenum += 1\n print(classname, len(lcs), 'light curves.')\n attr1 = []\n attr2 = []\n attr3 = []\n attr4 = []\n attr5 = []\n attr6 = []\n attr7 = []\n attr8 = []\n for lc in lcs:\n attr1.append(lc.n_epochs)\n attr2.append(lc.avgt)\n attr3.append(lc.cads_std)\n attr4.append(lc.total_time)\n attr5.append(lc.all_times_hist_peak_val)\n attr6.append(lc.cad_probs[5000])\n attr7.append(lc.all_times_nhist_peak_1_to_3)\n attr8.append(lc.all_times_nhist_peak_val)\n\n ax2.scatter(attr1, attr2, color=colors[classnamenum], label=classname)\n ax1.scatter(attr3, attr4, color=colors[classnamenum], label=classname)\n ax2.set_xlabel('N Epochs')\n ax2.set_ylabel('Avg time to next obs')\n ax1.set_xlabel('Standard dev. of time to next obs')\n ax1.set_ylabel('Time b/w first and last obs')\n\n ax3.scatter(attr5, attr6, color=colors[classnamenum], label=classname)\n ax4.scatter(attr7, attr8, color=colors[classnamenum], label=classname)\n ax3.set_xlabel(r'All $\\Delta$T hist peak val')\n ax3.set_ylabel('Prob time to next obs <= 5000 min')\n ax4.set_xlabel(r'$\\Delta$Ts normed hist peak 1 to peak 3')\n ax4.set_ylabel(r'Peak val of all $\\Delta$Ts normed hist')\n\n #ax1.legend(bbox_to_anchor=(1.1, 1.1),prop={'size':6})\n ax2.legend(bbox_to_anchor=(1.1, 1.1), prop={'size': 6})\n #ax3.legend(loc='upper right',prop={'size':6})\n #ax4.legend(loc='upper right',prop={'size':6})\n\n pdf.savefig(fig)\n\n pdf.close()\n return 0", "def main(otu_table, mapping_data, cat_tables, output_dir, sample_type='fecal',\n samples_to_plot=None, legend=False, xaxis=True, debug=False):\n\n # Sets constants for analyzing the data\n LEVEL = 2\n CATEGORY = 'taxonomy'\n NUM_TAXA = 9\n NUM_CATS_TO_PLOT = 7\n\n # Sets up plotting constants\n COLORMAP = array([[0.8353, 0.2421, 0.3098],\n [0.9569, 0.4275, 0.2627],\n [0.9922, 0.6824, 0.3804],\n [0.9961, 0.8784, 0.5351],\n [0.9020, 0.9608, 0.5961],\n [0.6706, 0.8667, 0.6431],\n [0.4000, 0.7608, 0.6471],\n [0.1961, 0.5333, 0.7412],\n [0.3333, 0.3333, 0.3333]])\n\n FIG_DIMS = (4.44444, 3.33333)\n AXIS_DIMS = array([[0.05, 0.05],\n [0.95, 0.95]])\n\n # Common taxa are designated before processing to remain constant.\n COMMON_TAXA = [(u'k__Bacteria', u'p__Firmicutes'),\n (u'k__Bacteria', u'p__Bacteroidetes'),\n (u'k__Bacteria', u'p__Proteobacteria'),\n (u'k__Bacteria', u'p__Actinobacteria'),\n (u'k__Bacteria', u'p__Verrucomicrobia'),\n (u'k__Bacteria', u'p__Tenericutes'),\n (u'k__Bacteria', u'p__Cyanobacteria'),\n (u'k__Bacteria', u'p__Fusobacteria')]\n\n SKIPSET = set(('Sample', 'Average', 'MP'))\n\n # Names categories being plotted\n if sample_type == 'fecal':\n michael_pollan = '10317.000007108'\n cat_list = ['You', 'Average', 'Similar Diet', ' Similar BMI',\n 'Same Gender', 'Similar Age', 'Michael Pollan']\n order = ['Sample', 'Average', 'DIET_TYPE', 'BMI_CAT', 'SEX',\n 'AGE_CAT', 'MP']\n\n elif sample_type == 'skin':\n michael_pollan = '10317.000007113'\n cat_list = ['You', 'Average', 'Similar Cosmetic Use',\n 'Same Dominant Hand', 'Same Gender', 'Same Age',\n 'Michael Pollan']\n order = ['Sample', 'Average', 'COSMETICS_FREQUENCY',\n 'DOMINANT_HAND', 'SEX', 'AGE_CAT', 'MP']\n\n elif sample_type == 'oral':\n michael_pollan = '10317.000007109'\n cat_list = ['You', 'Average', 'Similar Diet', 'Flossing Frequency',\n 'Same Gender', 'Same Age', 'Michael Pollan']\n order = ['Sample', 'Average', 'DIET_TYPE', 'FLOSSING_FREQUENCY',\n 'SEX', 'AGE_CAT', 'MP']\n\n else:\n raise ValueError('%s is not a supported sample type.' % sample_type)\n\n # Gets the mapping file\n map_dict = map_to_2D_dict(mapping_data)\n\n # Gets the category file dictionary summarized with the common categories\n # Generates the category file dictionary\n categories = parse_category_files(raw_tables=cat_tables,\n common_groups=COMMON_TAXA[:8],\n level=LEVEL,\n metadata=CATEGORY)\n\n # Summarizes taxonomy for the category\n (whole_sample_ids, whole_summary, new_common_taxa) = \\\n summarize_common_categories(biom_table=otu_table,\n level=LEVEL,\n common_categories=COMMON_TAXA[:8],\n metadata_category=CATEGORY)\n\n # Converts the final taxa to a cleaned up list\n # Converts final taxa to a clean list\n common_phyla = []\n for taxon in new_common_taxa:\n common_phyla.append(taxon[1].strip(' p__').strip('[').strip(']'))\n new_common_taxa = common_phyla\n\n # Checks that the crrect sample ids are plotted\n if samples_to_plot is None:\n sample_ids = whole_sample_ids\n else:\n sample_ids = samples_to_plot\n\n if len(sample_ids) > 1:\n # TODO: make the rest of the code reflect this...\n raise ValueError(\"SCRIPT NO LONGER SUPPORTS MULTIPLE SAMPLES\")\n\n # Identifies Michael Pollan's pre-ABX sample\n if debug:\n mp_sample_pos = 2\n else:\n mp_sample_pos = whole_sample_ids.tolist().index(michael_pollan)\n mp_sample_taxa = whole_summary[:, mp_sample_pos]\n\n # Gets the table average\n table_average = mean(whole_summary, 1)\n\n # Generates a figure for each sample\n for idx, sample_id in enumerate(whole_sample_ids):\n if sample_id in sample_ids:\n meta_data = map_dict[sample_id]\n # Prealocates a numpy array to hold the data\n tax_array = zeros((NUM_TAXA, NUM_CATS_TO_PLOT))\n\n # Adds preset values to the array so the first column is the sample\n # the second column is the average and the last column is Michael\n # Pollan\n tax_array[:, 0] = whole_summary[:, idx]\n tax_array[:, 1] = table_average\n tax_array[:, -1] = mp_sample_taxa\n\n # Adds the categories to the table in the listed order\n for idx, cat in enumerate(order):\n # Skips over undesired categories\n if cat in SKIPSET:\n continue\n # Gets the sample metadata\n mapping_key = meta_data[cat]\n # Pulls taxonomic summary and group descriptions\n tax_summary = categories[cat]['Summary']\n group_descriptions = categories[cat]['Groups'].tolist()\n # Appends plotting tables\n try:\n mapping_col = group_descriptions.index(mapping_key)\n except:\n raise ValueError('The %s cannot be found in %s.'\n % (mapping_key, cat))\n tax_array[:, idx] = tax_summary[:, mapping_col]\n\n # Sets up the file to save the data\n filename = pjoin(output_dir, 'figure4.pdf')\n\n # Plots the data\n render_barchart(data_table=tax_array,\n x_axis=False,\n group_names=new_common_taxa,\n legend=False,\n sample_names=cat_list,\n y_axis=False,\n axis_dims=AXIS_DIMS,\n fig_dims=FIG_DIMS,\n file_out=filename,\n show_edge=False,\n colors=COLORMAP)", "def create_plots(file_list):\n # load data and transpose so that country names are\n # the columns and their gdp data becomes the rows\n\n # read data into a pandas dataframe and transpose\n for filename in file_list:\n data = pandas.read_csv(filename, index_col = 'country').T\n \n # create a plot the transposed data\n ax = data.plot(title=filename)\n\n # axes labels\n ax.set_xlabel('Year')\n ax.set_ylabel('GDP Per Capita')\n\n # set axes ticks\n ax.set_xticks( range(len(data.index)))\n ax.set_xticklabels(data.index, rotation=45)\n\n # display the plot\n plt.show()", "def set_up_tables(self):\n tables = []\n tables.append({'groupname': 'metadata',\n 'tablename': 'sim_info',\n 'description': desc.SimInfoRow,\n 'tabletitle': 'Simulation Information'})\n tables.append({'groupname': 'metadata',\n 'tablename': 'sim_timeseries',\n 'description': desc.SimTimeseriesRow,\n 'tabletitle': 'Simulation Power Data'})\n tables.append({'groupname': 'th',\n 'tablename': 'th_params',\n 'description': desc.ThMetadataRow,\n 'tabletitle': 'TH Component Parameters'})\n tables.append({'groupname': 'th',\n 'tablename': 'th_timeseries',\n 'description': desc.ThTimeseriesRow,\n 'tabletitle': 'TH Timeseries'})\n tables.append({'groupname': 'neutronics',\n 'tablename': 'neutronics_timeseries',\n 'description': desc.NeutronicsTimeseriesRow,\n 'tabletitle': 'Neutronics Timeseries'})\n tables.append({'groupname': 'neutronics',\n 'tablename': 'neutronics_params',\n 'description': desc.NeutronicsParamsRow,\n 'tabletitle': 'Neutronics Metadata'})\n tables.append({'groupname': 'neutronics',\n 'tablename': 'zetas',\n 'description': desc.ZetasTimestepRow,\n 'tabletitle': 'Neutron Precursor Concentrations'})\n tables.append({'groupname': 'neutronics',\n 'tablename': 'omegas',\n 'description': desc.OmegasTimestepRow,\n 'tabletitle': 'Decay Heat Fractions'})\n return tables", "def make_HTML_table(l, other_frac, total, red, other_cat, fracs_labels_other,\r\n fracs_labels, dir_path, all_counts, level,\r\n prefs, pref_colors, background_color, label_color, chart_type,\r\n label, generate_image_type,\r\n plot_width, plot_height, bar_width, dpi, resize_nth_label,\r\n label_type, include_html_legend, include_html_counts):\r\n img_data = []\r\n\r\n # generate html for pie charts\r\n if chart_type == 'pie':\r\n # in the case the user wants to trim down the number of taxa\r\n if other_cat > 0:\r\n # first generate the pie charts containing an other group for all\r\n # taxa below the cutoff.\r\n fracs_labels_other.append((\"All Other Categories\", other_frac))\r\n title = TITLE_include % (l, total, total,\r\n len(fracs_labels_other), total - red, other_cat)\r\n all_taxons = [l]\r\n pie_charts_placement = []\r\n\r\n # make pie chart image\r\n pie = make_pie_chart(fracs_labels_other, dir_path, level,\r\n prefs, pref_colors, background_color, label_color,\r\n generate_image_type,\r\n plot_width, plot_height, bar_width, dpi,\r\n include_html_legend,\r\n props={'title': title})\r\n\r\n pie_charts_placement.append(pie[0] + '&nbsp;&nbsp;' + pie[1] +\r\n '</td></tr><tr><td>' + pie[2] +\r\n '</td></tr><tr><td class=\"ntitle\">')\r\n\r\n # second generate the pie charts where the other category is removed\r\n # and percents are recalculated\r\n title = TITLE_exclude % (l, red, total, len(fracs_labels),\r\n total - red, other_cat)\r\n\r\n # make pie chart image\r\n pie = make_pie_chart(fracs_labels, dir_path, level,\r\n prefs, pref_colors, background_color, label_color,\r\n generate_image_type,\r\n plot_width, plot_height, bar_width, dpi,\r\n include_html_legend,\r\n props={'title': title})\r\n\r\n pie_charts_placement.append(pie[0] + '&nbsp;&nbsp;' + pie[1] +\r\n '</td></tr><tr><td class=\"ntitle\">' +\r\n pie[2])\r\n\r\n all_taxons.extend(pie_charts_placement)\r\n all_taxons.extend((\" \", \" \"))\r\n\r\n # put the charts into the html image data\r\n img_data.append(TABLE_graph % tuple(all_taxons))\r\n img_data.append(DATA_TABLE_HTML % ''.join(all_counts))\r\n\r\n else:\r\n # if there is no category cutoff generate plots, without other cat\r\n title = TITLE % (l, total, total, len(fracs_labels_other))\r\n all_taxons = [l]\r\n\r\n # make pie chart image\r\n pie = make_pie_chart(fracs_labels_other, dir_path, level,\r\n prefs, pref_colors, background_color, label_color,\r\n generate_image_type,\r\n plot_width, plot_height, bar_width, dpi,\r\n include_html_legend,\r\n props={'title': title})\r\n\r\n all_taxons.extend(pie)\r\n\r\n # put the charts into the html image data\r\n img_data.append(TABLE_graph % tuple(all_taxons))\r\n img_data.append(DATA_TABLE_HTML % ''.join(all_counts))\r\n\r\n # generate html for bar and area charts\r\n elif chart_type == 'area' or chart_type == 'bar':\r\n\r\n taxa_percents = fracs_labels_other\r\n sample_ids = l\r\n taxa = other_cat\r\n\r\n all_categories = []\r\n title = TITLE % (label, total, total, len(fracs_labels_other))\r\n all_taxons = [label]\r\n\r\n # make area chart image\r\n area = make_area_bar_chart(sample_ids, taxa_percents, taxa, dir_path,\r\n level, prefs, pref_colors,\r\n background_color, label_color, chart_type,\r\n generate_image_type,\r\n plot_width, plot_height, bar_width, dpi,\r\n resize_nth_label, label_type,\r\n include_html_legend, include_html_counts,\r\n props={'title': title})\r\n\r\n all_taxons.extend(area)\r\n\r\n # put the charts into the html image data\r\n img_data.append(TABLE_graph % tuple(all_taxons))\r\n\r\n return img_data", "def draw_all_plots(self):\n\n plot_names = []\n e = self.find_di_tri(self.lang_found)\n letter_dct = e[1]\n di_dct = e[2]\n tri_dct = e[3]\n\n plot_name = self.lang_found + '_letters'\n self.wykres(letter_dct, 'Wyres liter', 'litera', plot_name, 0)\n plot_names.append(plot_name)\n plot_name = self.lang_found + '_digram'\n self.wykres(di_dct, 'Wykres digramów', 'digram', plot_name, 1)\n plot_names.append(plot_name)\n plot_name = self.lang_found + '_trigram'\n self.wykres(tri_dct, 'Wykres trigramów', 'trigram', plot_name, 2)\n plot_names.append(plot_name)\n\n for cnt, plt_scn in enumerate(self.plot_scenes):\n pic = QtGui.QPixmap(self.img_dir + '/' + plot_names[cnt] + \".png\")\n plt_scn.setPixmap(pic.scaled(427, 320, Qt.KeepAspectRatio))", "def table_plot(true4_found4_corr, true4_found4_incorr, true4_found3,\n true3_found4, true3_found3, savename=None):\n \n # Prepare plot on which to place table\n _, ax = plt.subplots()\n plt.xlim(-0.1,5.1)\n plt.ylim(-0.1,3.7)\n ax.axis('off')\n\n n_events = sum([true4_found4_corr, true4_found4_incorr, true4_found3,\n true3_found4, true3_found3])\n\n n_col_1 = sum([true4_found4_corr, true4_found4_incorr, true4_found3])\n n_col_2 = sum([true3_found4, true3_found3])\n\n n_row_1 = sum([true4_found4_corr, true4_found4_incorr, true3_found4])\n n_row_2 = sum([true4_found3, true3_found3])\n \n if n_col_1 != 0:\n true4_found4_corr_pc = true4_found4_corr / n_col_1 * 100\n true4_found4_incorr_pc = true4_found4_incorr / n_col_1 * 100\n true4_found3_pc = true4_found3 / n_col_1 * 100\n else:\n true4_found4_corr_pc = 0\n true4_found4_incorr_pc = 0\n true4_found3_pc = 0\n if n_col_2 != 0:\n true3_found4_pc = true3_found4 / n_col_2 * 100\n true3_found3_pc = true3_found3 / n_col_2 * 100\n else:\n true3_found4_pc = 0\n true3_found3_pc = 0\n\n # add a whole bunch of squares and text\n ax.text(0.5,1, \"4th Jet\\nReco\", fontsize=18, verticalalignment='center', horizontalalignment='center', fontweight='heavy')\n ax.add_patch(patches.Rectangle((0,0),1,2,linewidth=1,edgecolor='#262626',facecolor='w'))\n\n ax.text(1.5,1+1/3, \"4th jet\\nfound\", fontsize=13, verticalalignment='center', horizontalalignment='center', fontweight='heavy')\n ax.text(1.5,1, f\"({n_row_1:.0f})\", fontsize=10, verticalalignment='center', horizontalalignment='center', fontweight='normal')\n ax.add_patch(patches.Rectangle((1,0),1,1-1/3,linewidth=1,edgecolor='#262626',facecolor='w'))\n\n ax.text(3-0.05,1/3, f\"{true4_found3_pc:.1f}%\", fontsize=14, verticalalignment='center', horizontalalignment='center', fontweight='normal')\n ax.text(3-0.05,1/9, f\"({true4_found3:.0f})\", fontsize=10, verticalalignment='center', horizontalalignment='center', fontweight='normal')\n ax.add_patch(patches.Rectangle((2,0),2-0.1,1-1/3,linewidth=1,edgecolor='#262626',facecolor='#ffff66'))\n\n ax.text(4.45,1/3, f\"{true3_found3_pc:.1f}%\", fontsize=14, verticalalignment='center', horizontalalignment='center', fontweight='normal')\n ax.text(4.45,1/9, f\"({true3_found3:.0f})\", fontsize=10, verticalalignment='center', horizontalalignment='center', fontweight='normal')\n ax.add_patch(patches.Rectangle((4-0.1,0),1+0.1,1-1/3,linewidth=1,edgecolor='#262626',facecolor='#00ff66'))\n\n ax.text(1.5,0.4, \"No 4th jet\\nfound\", fontsize=13, verticalalignment='center', horizontalalignment='center', fontweight='heavy')\n ax.text(1.5,1/9, f\"({n_row_2:.0f})\", fontsize=10, verticalalignment='center', horizontalalignment='center', fontweight='normal')\n ax.add_patch(patches.Rectangle((1,1-1/3),1,1+1/3,linewidth=1,edgecolor='#262626',facecolor='w'))\n\n ax.text(2.5,1+2/3, \"Correct\\n4th jet\", fontsize=14, verticalalignment='center', horizontalalignment='center', fontweight='normal')\n ax.add_patch(patches.Rectangle((2,1-1/3),1,0.5*(1+1/3),linewidth=1,edgecolor='#262626',facecolor='w'))\n\n ax.text(2.5,1, \"Incorrect\\n4th jet\", fontsize=14, verticalalignment='center', horizontalalignment='center', fontweight='normal')\n ax.add_patch(patches.Rectangle((2,1-1/3+0.5*(1+1/3)),1,0.5*(1+1/3),linewidth=1,edgecolor='#262626',facecolor='w'))\n\n ax.text(3.45,1+2/3, f\"{true4_found4_corr_pc:.1f}%\", fontsize=14, verticalalignment='center', horizontalalignment='center', fontweight='normal')\n ax.text(3.45,1+2/3-2/9, f\"({true4_found4_corr:.0f})\", fontsize=10, verticalalignment='center', horizontalalignment='center', fontweight='normal')\n ax.add_patch(patches.Rectangle((3,1-1/3),1-0.1,0.5*(1+1/3),linewidth=1,edgecolor='k',facecolor='#ff6666'))\n\n ax.text(3.45,1, f\"{true4_found4_incorr_pc:.1f}%\", fontsize=14, verticalalignment='center', horizontalalignment='center', fontweight='normal')\n ax.text(3.45,1-2/9, f\"({true4_found4_incorr:.0f})\", fontsize=10, verticalalignment='center', horizontalalignment='center', fontweight='normal')\n ax.add_patch(patches.Rectangle((3,1-1/3+0.5*(1+1/3)),1-0.1,0.5*(1+1/3),linewidth=1,edgecolor='#262626',facecolor='#00ff66'))\n\n ax.text(4.45,1+1/3, f\"{true3_found4_pc:.1f}%\", fontsize=14, verticalalignment='center', horizontalalignment='center', fontweight='normal')\n ax.text(4.45,1+1/3-2/9, f\"({true3_found4:.0f})\", fontsize=10, verticalalignment='center', horizontalalignment='center', fontweight='normal')\n ax.add_patch(patches.Rectangle((4-0.1,1-1/3),1+0.1,1+1/3,linewidth=1,edgecolor='#262626',facecolor='#ff6666'))\n\n ax.text(3,2.375, \"4th tag exists\", fontsize=14, verticalalignment='center', horizontalalignment='center', fontweight='heavy')\n ax.text(3,2.375-2/9, f\"({n_col_1:.0f})\", fontsize=10, verticalalignment='center', horizontalalignment='center', fontweight='normal')\n ax.add_patch(patches.Rectangle((2,2),2-0.1,0.75,linewidth=1,edgecolor='#262626',facecolor='w'))\n\n ax.text(4.45,2.375, \"No 4th tag\", fontsize=14, verticalalignment='center', horizontalalignment='center', fontweight='heavy')\n ax.text(4.45,2.375-2/9, f\"({n_col_2:.0f})\", fontsize=10, verticalalignment='center', horizontalalignment='center', fontweight='normal')\n ax.add_patch(patches.Rectangle((4-0.1,2),1+0.1,0.75,linewidth=1,edgecolor='#262626',facecolor='w'))\n\n ax.text(3.5,3.1, \"Truth-Matching\", fontsize=18, verticalalignment='center', horizontalalignment='center', fontweight='heavy')\n\n ax.text(1,2.375, f\"(# events={n_events:.0f})\", fontsize=14, verticalalignment='center', horizontalalignment='center', fontweight='normal')\n ax.add_patch(patches.Rectangle((2,2+0.75),3,0.75,linewidth=1,edgecolor='#262626',facecolor='w'))\n\n # format and show/save\n plt.tight_layout()\n if savename:\n plt.savefig(f\"table_{savename}.png\", dpi=300)\n plt.show()", "def tables(args):\n\n config_file = args.setupfn\n conf_base = os.path.basename(config_file).split('.')[0]\n statfile = os.path.join(args.outputdir,\n \"{}_radvel.stat\".format(conf_base))\n status = load_status(statfile)\n\n assert status.getboolean('mcmc', 'run'), \\\n \"Must run MCMC before making tables\"\n\n P, post = radvel.utils.initialize_posterior(config_file)\n post = radvel.posterior.load(status.get('fit', 'postfile'))\n chains = pd.read_csv(status.get('mcmc', 'chainfile'))\n minafactor = status.get('mcmc', 'minafactor')\n maxarchange = status.get('mcmc', 'maxarchange')\n maxgr = status.get('mcmc', 'maxgr')\n mintz = status.get('mcmc', 'mintz')\n if 'derive' in status.sections() and status.getboolean('derive', 'run'):\n dchains = pd.read_csv(status.get('derive', 'chainfile'))\n chains = chains.join(dchains, rsuffix='_derived')\n derived = True\n else:\n derived = False\n report = radvel.report.RadvelReport(P, post, chains, minafactor, maxarchange, maxgr, mintz, derived=derived)\n tabletex = radvel.report.TexTable(report)\n attrdict = {'priors': 'tab_prior_summary', 'rv': 'tab_rv',\n 'params': 'tab_params', 'derived': 'tab_derived',\n 'crit': 'tab_crit'}\n for tabtype in args.type:\n print(\"Generating LaTeX code for {} table\".format(tabtype))\n\n if tabtype == 'ic_compare':\n assert status.has_option('ic_compare', 'ic'), \\\n \"Must run Information Criteria comparison before making comparison tables\"\n\n compstats = eval(status.get('ic_compare', 'ic'))\n report = radvel.report.RadvelReport(\n P, post, chains, minafactor, maxarchange, maxgr, mintz, compstats=compstats\n )\n tabletex = radvel.report.TexTable(report)\n tex = tabletex.tab_comparison()\n elif tabtype == 'rv':\n tex = getattr(tabletex, attrdict[tabtype])(name_in_title=args.name_in_title, max_lines=None)\n elif tabtype == 'crit':\n tex = getattr(tabletex, attrdict[tabtype])(name_in_title=args.name_in_title)\n else:\n if tabtype == 'derived':\n assert status.has_option('derive', 'run'), \\\n \"Must run `radvel derive` before making derived parameter table\"\n assert tabtype in attrdict, 'Invalid Table Type %s ' % tabtype\n tex = getattr(tabletex, attrdict[tabtype])(name_in_title=args.name_in_title)\n\n saveto = os.path.join(\n args.outputdir, '{}_{}.tex'.format(conf_base, tabtype)\n )\n with open(saveto, 'w+') as f:\n f.write(tex)\n\n savestate = {'{}_tex'.format(tabtype): os.path.relpath(saveto)}\n save_status(statfile, 'table', savestate)", "def main():\n\n if not os.path.exists( os.path.join(os.getcwd(), 'Plots') ):\n os.mkdir('Plots')\n\n # Initialise the canvas and set aesthetics\n canv = TCanvas(\"canv\", \"canv\", 800, 600)\n canv.SetLogy()\n gStyle.SetOptStat(0)\n gStyle.SetOptTitle(0)\n\n # Initialise legend and set colours\n leg_height = len(models) * 0.06 # make y-length of legend dependent on n_models\n myLeg = TLegend(0.6, 0.9 - leg_height, 0.9, 0.9)\n myLeg.SetTextSize(0.02)\n\n # Initialise histogram arrays\n nJetHist = [None] * len(models)\n jetPtHist = [None] * len(models)\n leadJetPtHist = [None] * len(models)\n metPtHist = [None] * len(models)\n dPhiJJHist = [None] * len(models)\n\n # x-axis labels for plots\n nJetLabel = \"#it{n}_{jet}\"\n jetPtLabel = \"#it{p}_{T}^{jet}\"\n leadJetPtLabel = \"#it{p}_{T}^{j_{1}}\"\n metPtLabel = \"#it{E}_{T}^{miss}\"\n dPhiJJLabel = \"#Delta#it{#phi}_{j_{1} j_{2}}\"\n\n # Initialise histograms here so I can use them later\n for i, model in enumerate(models):\n nJetHist[i] = TH1F(\"nJet\"+model, \"nJet dist \"+model, 30, 0, 29)\n jetPtHist[i] = TH1F(\"jetPt\"+model, \"Jet pT dist \"+model, 30, 0, 3000)\n leadJetPtHist[i] = TH1F(\"leadJetPt\"+model, \"Lead jet pT dist \"+model, 30, 0, 3000)\n metPtHist[i] = TH1F(\"met\"+model, \"MET dist \"+model, 30, 0, 3000)\n dPhiJJHist[i] = TH1F(\"dPhijj\"+model, \"DPhi dist \"+model, 20, -1*(pi+0.1), pi+0.1)\n \n\n # Open root files, then draw individual histograms\n for i, model in enumerate(models):\n print Fore.MAGENTA + \"Running over model {0}/{1}.\".format(i+1, len(models))\n openFile = TFile(files[i])\n tree = openFile.Get(\"Events\")\n nEntries = tree.GetEntries()\n\n # Initialise progress bar\n widgets = [Percentage(), Bar('>'), ETA()]\n pbar = ProgressBar(widgets = widgets, maxval = nEntries).start() \n\n for entry in xrange(nEntries):\n treeEntry = tree.GetEntry(entry)\n nJetHist[i].Fill(tree.nJet)\n \n for jet in xrange( len(tree.Jet_pt) ):\n jetPtHist[i].Fill(tree.Jet_pt[jet])\n\n if len(tree.Jet_pt) > 0: leadJetPtHist[i].Fill(tree.Jet_pt[0])\n metPtHist[i].Fill(tree.MET_pt)\n\n if len(tree.Jet_phi) >= 2:\n deltaPhi = tree.Jet_phi[0] - tree.Jet_phi[1]\n dPhiJJHist[i].Fill(deltaPhi) \n\n pbar.update(entry+1)\n \n pbar.finish()\n\n # Normalise histograms\n nJetHist[i].Scale(1./nEntries)\n jetPtHist[i].Scale(1./nEntries)\n leadJetPtHist[i].Scale(1./nEntries)\n metPtHist[i].Scale(1./nEntries)\n dPhiJJHist[i].Scale(1./nEntries)\n\n # Draw individual histograms and save\n drawIndivHistos(model, nJetHist[i], canv, myLeg, nJetLabel, \"nJet\", index=i)\n drawIndivHistos(model, jetPtHist[i], canv, myLeg, jetPtLabel, \"jetPT\", index=i)\n drawIndivHistos(model, leadJetPtHist[i], canv, myLeg, leadJetPtLabel, \"leadJetPT\", index=i)\n drawIndivHistos(model, metPtHist[i], canv, myLeg, metPtLabel, \"MET\", index=i)\n drawIndivHistos(model, dPhiJJHist[i], canv, myLeg, dPhiJJLabel, \"dPhi\", index=i)\n \n\n # Draw histograms for different models overlaid\n drawMultipleHistos(nJetHist, canv, myLeg, nJetLabel, \"nJet\")\n drawMultipleHistos(jetPtHist, canv, myLeg, jetPtLabel, \"jetPT\")\n drawMultipleHistos(leadJetPtHist, canv, myLeg, leadJetPtLabel, \"leadJetPT\")\n drawMultipleHistos(metPtHist, canv, myLeg, metPtLabel, \"MET\")\n drawMultipleHistos(dPhiJJHist, canv, myLeg, dPhiJJLabel, \"dPhi\")", "def _make_files(self):\n if not self.path.is_dir():\n raise FileNotFoundError(f\"Path {self.path} does not exist.\")\n\n # Make the filepaths\n self.file_points = self.path / \"point.dat\"\n self.file_lines = self.path / \"line.dat\"\n self.file_cadastre = self.path / \"cadastre.dat\"\n self.file_portals = self.path / \"portals.dat\"\n\n with open(self.file_points, \"w\") as f:\n # 2 lines ignored\n header = datetime.datetime.now().strftime(\"Generated: %d/%m/%Y %H:%M\\n\")\n f.write(header)\n self.points_dfs = []\n with open(self.file_lines, \"w\") as f:\n # 5 lines ignored\n header = (\n datetime.datetime.now().strftime(\"Generated: %d/%m/%Y %H:%M\\n\")\n + 3 * \"Generated: \\n\"\n + \"Name,Section,source_group,x1,y1,z1,x2,y2,z2,width,vert. ext.,-,-,\"\n \"emission_rate[kg/h/km],-,-,-,-\\n\"\n )\n f.write(header)\n with open(self.file_cadastre, \"w\") as f:\n # 1 line ignored\n header = \"x,y,z,dx,dy,dz,emission_rate[kg/h],-,-,-,source_group\\n\"\n f.write(header)\n with open(self.file_portals, \"w\") as f:\n # 2 lines ignored\n header = (\n datetime.datetime.now().strftime(\"Generated: %d/%m/%Y %H:%M\\n\")\n + \"x1,y1,x2,y2,z0,z1,emission_rate[kg/h],-,-,-,source_group\\n\"\n )\n f.write(header)\n\n\n # File to save the source groups values\n self.file_source_groups = self.path / \"source_groups.json\"\n with open(self.file_source_groups, \"w\") as f:\n # reverse the dict (items become keys and vice versa)\n reversed_source_groups = {v: k for k, v in self.source_groups.items()}\n json.dump(reversed_source_groups, f, indent=2)", "def create_plots() -> str:\r\n return _find_or_create_dir(PLOTS_FOLDER)", "def prepare_plots(task_list, productiontype=''):\n ev_states = ['waiting', 'running', 'done', 'failed']\n plots_groups = {\n 'main': ['nevents_sum_status', 'age_hist'],\n 'main_preset': [],\n 'extra': ['nevents_by_status', 'nevents_by_priority']\n }\n plots_dict = {\n 'nevents_sum_status': {\n 'data': [],\n 'title': 'Evts by status',\n 'options': {}\n },\n 'nevents_by_status': {\n 'data': [],\n 'title': 'Evts by task status',\n 'options': {}\n },\n 'nevents_by_priority': {\n 'data': [],\n 'title': 'Evts by task priority',\n 'options': {}\n },\n 'nevents_by_processingtype': {\n 'data': [],\n 'title': 'Evts by type',\n 'options': {}\n },\n 'nevents_by_cutcampaign': {\n 'data': [],\n 'title': 'Evts by campaign',\n 'options': {}\n },\n 'aslots_by_processingtype': {\n 'data': [],\n 'title': 'Slots by type',\n 'options': {}\n },\n 'aslots_by_cutcampaign': {\n 'data': [],\n 'title': 'Slots by campaign',\n 'options': {}\n },\n 'age_hist': {\n 'data': [],\n 'title': 'Task age histogram',\n 'options': {'labels': ['Task age, days', 'Number of tasks']}\n },\n }\n\n plots_data = {\n 'group_by': {\n 'nevents': {\n 'processingtype': {},\n 'status': {},\n 'priority': {},\n 'cutcampaign': {},\n },\n 'aslots': {\n 'processingtype': {},\n 'cutcampaign': {},\n },\n },\n 'hist': {\n 'age': {\n 'stats': {},\n 'rawdata': [],\n },\n },\n 'sum': {\n 'nevents': {\n 'status': {\n 'waiting': 0,\n 'running': 0,\n 'done': 0,\n 'failed': 0,\n }\n }\n },\n }\n\n if productiontype == 'MC':\n plots_data['group_by']['nevents']['simtype'] = {}\n plots_data['group_by_by'] = {\n 'nevents': {\n 'simtype_processingtype': {}\n }\n }\n plots_dict['nevents_by_simtype'] = {\n 'data': [],\n 'title': 'Evts by sim type',\n 'options': {}\n }\n plots_dict['nevents_by_simtype_by_processingtype'] = {}\n plots_groups['main_preset'].extend(['nevents_by_simtype', 'nevents_by_processingtype', 'aslots_by_processingtype'])\n plots_groups['extra'].extend(['nevents_by_cutcampaign', 'aslots_by_cutcampaign'])\n elif productiontype == 'DATA':\n plots_groups['main_preset'].extend(['nevents_by_cutcampaign', 'aslots_by_cutcampaign'])\n plots_groups['extra'].extend(['nevents_by_processingtype', 'aslots_by_processingtype'])\n elif productiontype == 'DPD':\n plots_data['group_by']['nevents']['outputdatatype'] = {}\n plots_data['group_by']['aslots']['outputdatatype'] = {}\n plots_dict['nevents_by_outputdatatype'] = {\n 'data': [],\n 'title': 'Evts by output type',\n 'options': {}\n }\n plots_dict['aslots_by_outputdatatype'] = {\n 'data': [],\n 'title': 'Slots by output type',\n 'options': {}\n }\n\n plots_data['group_by_by'] = {\n 'nevents': {\n 'outputdatatype_evstatus': {}\n }\n }\n plots_dict['nevents_by_outputdatatype_by_evstatus'] = {\n 'data': [],\n 'title': 'Event states by output type',\n 'options': {'type': 'sbar', 'labels': ['', 'Number of events']}\n }\n\n plots_groups['main_preset'].extend(['aslots_by_outputdatatype','nevents_by_outputdatatype_by_evstatus'])\n plots_groups['extra'].extend(['nevents_by_outputdatatype'], )\n else:\n plots_groups['main_preset'].extend(['nevents_by_processingtype', 'aslots_by_processingtype'])\n\n\n # collect data for plots\n for task in task_list:\n for plot_type, pdict in plots_data.items():\n if plot_type == 'sum':\n for sumparam, byparams in pdict.items():\n for byparam, keys in byparams.items():\n if byparam == 'status':\n for key in keys:\n plots_data[plot_type][sumparam][byparam][key] += task[sumparam+key] if sumparam+key in task else 0\n elif plot_type == 'hist':\n for param in pdict.keys():\n plots_data[plot_type][param]['rawdata'].append(task[param])\n elif plot_type == 'group_by':\n for sumparam, byparams in pdict.items():\n for byparam in byparams:\n if task[byparam] not in plots_data[plot_type][sumparam][byparam]:\n plots_data[plot_type][sumparam][byparam][task[byparam]] = 0\n plots_data[plot_type][sumparam][byparam][task[byparam]] += task[sumparam]\n elif plot_type == 'group_by_by':\n for sumparam, byparams in pdict.items():\n for param in byparams:\n byby_params = param.split('_')\n if byby_params[0] == 'evstatus':\n for es in ev_states:\n if es not in plots_data[plot_type][sumparam][param]:\n plots_data[plot_type][sumparam][param][es] = {}\n if task[byby_params[1]] not in plots_data[plot_type][sumparam][param][es]:\n plots_data[plot_type][sumparam][param][es][task[byby_params[1]]] = 0\n plots_data[plot_type][sumparam][param][es][task[byby_params[1]]] += task[sumparam+es]\n elif byby_params[1] == 'evstatus':\n if task[byby_params[0]] not in plots_data[plot_type][sumparam][param]:\n plots_data[plot_type][sumparam][param][task[byby_params[0]]] = {}\n for es in ev_states:\n if es not in plots_data[plot_type][sumparam][param][task[byby_params[0]]]:\n plots_data[plot_type][sumparam][param][task[byby_params[0]]][es] = 0\n plots_data[plot_type][sumparam][param][task[byby_params[0]]][es] += task[sumparam+es]\n else:\n if task[byby_params[0]] not in plots_data[plot_type][sumparam][param]:\n plots_data[plot_type][sumparam][param][task[byby_params[0]]] = {}\n if task[byby_params[1]] not in plots_data[plot_type][sumparam][param][task[byby_params[0]]]:\n plots_data[plot_type][sumparam][param][task[byby_params[0]]][task[byby_params[1]]] = 0\n plots_data[plot_type][sumparam][param][task[byby_params[0]]][task[byby_params[1]]] += task[sumparam]\n\n # build histograms\n N_BIN_MAX = 50\n plots_to_delete = []\n for pname, pdata in plots_data['hist'].items():\n rawdata = pdata['rawdata']\n if len(rawdata) > 0:\n plots_data['hist'][pname]['stats'] = [np.average(rawdata), np.std(rawdata)]\n try:\n bins, ranges = np.histogram(rawdata, bins='auto')\n except MemoryError:\n bins, ranges = np.histogram(rawdata, bins=N_BIN_MAX)\n if len(ranges) > N_BIN_MAX + 1:\n bins, ranges = np.histogram(rawdata, bins=N_BIN_MAX)\n\n mranges = [sum(ranges[i:i + 2])/2 for i in range(len(ranges) - 2 + 1)]\n plots_data['hist'][pname]['data'] = [['x'], ['N tasks']]\n plots_data['hist'][pname]['data'][0].extend(list(np.floor(mranges)))\n plots_data['hist'][pname]['data'][1].extend(list(np.histogram(rawdata, ranges)[0]))\n else:\n plots_to_delete.append(pname)\n\n # deleting plots if no data\n if len(plots_to_delete) > 0:\n print(plots_to_delete)\n for pname in plots_to_delete:\n try:\n del (plots_data['hist'][pname])\n except:\n pass\n\n # inject plots data to plot dict\n to_delete = []\n extra_plots = {}\n for pname, pdict in plots_dict.items():\n if pname.count('_by') == 2:\n [sumparam, byparam, bybyparam] = pname.split('_by_')\n if sumparam in plots_data['group_by_by'] and byparam+'_'+bybyparam in plots_data['group_by_by'][sumparam]:\n if 'options' in plots_dict[pname] and 'type' in plots_dict[pname]['options'] and plots_dict[pname]['options']['type'] == 'sbar':\n tlist = []\n for key, kdict in plots_data['group_by_by'][sumparam][byparam + '_' + bybyparam].items():\n if sum([v for v in kdict.values()]) > 0:\n tdict = kdict\n tdict[byparam] = key\n tlist.append(tdict)\n # sort by sum of events\n tlist = sorted(tlist, key=lambda x: -sum([x[es] for es in ev_states]))\n # convert into list of lists\n data_list = [[byparam]]\n data_list.extend([[es] for es in ev_states])\n for row in tlist:\n data_list[0].append(row[byparam])\n for i, es in enumerate(ev_states):\n data_list[i + 1].append(row[es])\n plots_dict[pname]['data'] = data_list\n else:\n for key, kdict in plots_data['group_by_by'][sumparam][byparam+'_'+bybyparam].items():\n extra_plots[sumparam+'_'+key+'_by_'+bybyparam] = {\n 'data': [[k, v] for k, v in plots_data['group_by_by'][sumparam][byparam+'_'+bybyparam][key].items()],\n 'title': key,\n 'options': {\n 'total': sum(plots_data['group_by_by'][sumparam][byparam+'_'+bybyparam][key].values())\n },\n }\n plots_groups['extra'].append(sumparam+'_'+key+'_by_'+bybyparam)\n to_delete.append(pname)\n elif pname.count('_by') == 1:\n [sumparam, byparam] = pname.split('_by_')\n if sumparam in plots_data['group_by'] and byparam in plots_data['group_by'][sumparam]:\n plots_dict[pname]['data'] = [[k, v] for k, v in plots_data['group_by'][sumparam][byparam].items() if v > 0]\n plots_dict[pname]['options']['total'] = sum(plots_data['group_by'][sumparam][byparam].values())\n elif '_hist' in pname:\n param = pname.split('_')[0]\n if param in plots_data['hist']:\n plots_dict[pname]['data'] = plots_data['hist'][param]['data']\n plots_dict[pname]['options']['stats'] = plots_data['hist'][param]['stats']\n elif '_sum_' in pname:\n [sumparam, byparam] = pname.split('_sum_')\n if sumparam in plots_data['sum'] and byparam in plots_data['sum'][sumparam]:\n plots_dict[pname]['data'] = [[k, v] for k, v in plots_data['sum'][sumparam][byparam].items() if v > 0]\n plots_dict[pname]['options']['total'] = sum(plots_data['sum'][sumparam][byparam].values())\n\n # # check if plot is in one of main groups\n # if pname not in plots_groups['main'] and pname not in plots_groups['main_preset'] and pname not in to_delete:\n # plots_groups['extra'].append(pname)\n\n plots_dict.update(extra_plots)\n for key in to_delete:\n try:\n del plots_dict[key]\n except KeyError:\n pass\n\n # divide plots by groups\n plots = {}\n for plots_group, pnames in plots_groups.items():\n plots[plots_group] = {}\n for pname in pnames:\n if pname in plots_dict:\n plots[plots_group][pname] = plots_dict[pname]\n\n return plots", "def plot(self):\n\t\tplot_chain(self.database_path, self.temp_folder)\n\t\tplot_density(self.database_path, self.temp_folder, self.cal_params)", "def write_all_data_tables( phasename, eos_prop_d, output_d ):\n\n dataio.write_data_table( 'temperature_' + phasename + '.dat',\n (eos_prop_d[key] for key in\n ('Pmesh_a', 'Smesh_a', 'Tmesh_a')),\n ('GPa', 'eV', 1), output_d )\n\n dataio.write_data_table( 'density_' + phasename + '.dat',\n (eos_prop_d[key] for key in\n ('Pmesh_a', 'Smesh_a', 'rhomesh_a')),\n ('GPa', 'eV','g_cc'), output_d )\n dataio.write_data_table( 'heat_capacity_' + phasename + '.dat',\n (eos_prop_d[key] for key in\n ('Pmesh_a', 'Smesh_a', 'Cpmesh_a')),\n ('GPa','eV','eV'), output_d )\n\n dataio.write_data_table( 'thermal_exp_' + phasename + '.dat',\n (eos_prop_d[key] for key in\n ('Pmesh_a', 'Smesh_a', 'alphamesh_a')),\n ('GPa','eV',1), output_d )\n\n dataio.write_data_table( 'adiabat_temp_grad_' + phasename + '.dat',\n (eos_prop_d[key] for key in\n ('Pmesh_a', 'Smesh_a', 'dTdP_Smesh_a')),\n ('GPa','eV','GPa-1'), output_d )\n pass", "def make_all_charts(data, dir_path, filename, num_categories, colorby, args,\r\n color_data, prefs, background_color, label_color,\r\n chart_type, generate_image_type, plot_width, plot_height,\r\n bar_width, dpi, resize_nth_label, label_type,\r\n include_html_legend, include_html_counts):\r\n\r\n # iterate over the preferences and assign colors according to taxonomy\r\n img_data = []\r\n for label, f_name in data:\r\n raw_fpath = os.path.join(\r\n dir_path,\r\n 'raw_data',\r\n os.path.split(f_name)[-1])\r\n # move raw file to output directory\r\n shutil.copyfile(f_name, raw_fpath)\r\n\r\n f = color_data['counts'][f_name]\r\n level = max([len(t.split(';')) - 1 for t in f[1]])\r\n\r\n for key in prefs.keys():\r\n if prefs[key]['column'] != str(level):\r\n continue\r\n col_name = 'Taxon'\r\n mapping = [['Taxon']]\r\n mapping.extend([[m] for m in f[1]])\r\n if 'colors' in prefs[key]:\r\n if isinstance(prefs[key]['colors'], dict):\r\n pref_colors = prefs[key]['colors'].copy()\r\n # copy so we can mutate\r\n else:\r\n pref_colors = prefs[key]['colors'][:]\r\n else:\r\n pref_colors = {}\r\n labelname = prefs[key]['column']\r\n\r\n # Define groups and associate appropriate colors to each group\r\n groups = group_by_field(mapping, col_name)\r\n pref_colors, data_colors, data_color_order = \\\r\n get_group_colors(groups, pref_colors)\r\n\r\n updated_pref_colors = {}\r\n\r\n if chart_type == 'area' and len(f[0]) == 1:\r\n raise ValueError(\r\n 'When generating area charts, the number of samples (or category values) must be greater than 1. However, you can still produce a pie chart or bar chart with only 1 sample (or category value), but you must remove the area chart value from the input arguments.')\r\n\r\n for key in pref_colors:\r\n updated_pref_colors[key.replace('\"', '')] = pref_colors[key]\r\n\r\n for i, val in enumerate(f[1]):\r\n f[1][i] = val.replace('\"', '')\r\n\r\n # parse the counts and continue processing\r\n img_data.extend(get_counts(label.strip(), colorby, num_categories,\r\n dir_path, level, f, prefs, updated_pref_colors,\r\n background_color,\r\n label_color, chart_type, generate_image_type,\r\n plot_width, plot_height, bar_width, dpi, raw_fpath,\r\n resize_nth_label, label_type, include_html_legend,\r\n include_html_counts))\r\n\r\n # generate html filepath\r\n outpath = os.path.join(dir_path, '%s_charts.html' % chart_type)\r\n out_table = ''.join(img_data)\r\n # write out html file\r\n write_html_file(out_table, outpath)", "def make_timing_plots(self, active_test_list=None, valid_dirs=None, all_tests=None):\n\n if active_test_list is not None:\n valid_dirs, all_tests = self.get_run_history(active_test_list)\n timings = self.get_wallclock_history()\n\n try: bokeh\n except NameError:\n\n convf = dates.datestr2num\n using_mpl = True\n self.plot_ext = \"png\"\n\n else:\n\n convf = lambda s: dt.strptime(s, '%Y-%m-%d')\n using_mpl = False\n self.plot_ext = \"html\"\n\n def convert_date(date):\n \"\"\" Convert to a matplotlib readable date\"\"\"\n\n if len(date) > 10: date = date[:date.rfind(\"-\")]\n return convf(date)\n\n def hover_tool():\n \"\"\"\n Encapsulates hover tool creation to prevent errors when generating\n multiple documents.\n \"\"\"\n\n return HoverTool(\n tooltips=[(\"date\", \"@date{%F}\"), (\"runtime\", \"@runtime{0.00}\")],\n formatters={\"@date\": \"datetime\"})\n\n # make the plots\n for t in all_tests:\n\n try: test_dict = timings[t]\n except KeyError: continue\n\n days = list(map(convert_date, test_dict[\"dates\"]))\n times = test_dict[\"runtimes\"]\n\n if len(times) == 0: continue\n\n if using_mpl:\n\n plt.clf()\n plt.plot_date(days, times, \"o\", xdate=True)\n\n years = dates.YearLocator() # every year\n months = dates.MonthLocator()\n years_fmt = dates.DateFormatter('%Y')\n\n ax = plt.gca()\n ax.xaxis.set_major_locator(years)\n ax.xaxis.set_major_formatter(years_fmt)\n ax.xaxis.set_minor_locator(months)\n\n plt.ylabel(\"time (seconds)\")\n plt.title(t)\n\n if max(times) / min(times) > 10.0:\n ax.set_yscale(\"log\")\n\n fig = plt.gcf()\n fig.autofmt_xdate()\n\n plt.savefig(f\"{self.webTopDir}/{t}-timings.{self.plot_ext}\")\n\n else:\n\n source = ColumnDataSource(dict(date=days, runtime=times))\n\n settings = dict(x_axis_type=\"datetime\")\n if max(times) / min(times) > 10.0: settings[\"y_axis_type\"] = \"log\"\n plot = figure(**settings)\n plot.add_tools(hover_tool())\n\n plot.circle(\"date\", \"runtime\", source=source)\n plot.xaxis.axis_label = \"Date\"\n plot.yaxis.axis_label = \"Runtime (s)\"\n\n save(plot, resources=CDN,\n filename=f\"{self.webTopDir}/{t}-timings.{self.plot_ext}\",\n title=f\"{t} Runtime History\")", "def test_make_HTML_table(self):\r\n\r\n # test pie charts\r\n fracs_labels_other, fracs_labels, all_counts, other_cat, red, other_frac = \\\r\n get_fracs(self.counts1, 5, 10, 'pie')\r\n\r\n img_data = make_HTML_table(\"Phylum\", other_frac, 10, red, other_cat,\r\n fracs_labels_other, fracs_labels,\r\n self.dir_path, all_counts, 1, self.prefs,\r\n self.color_prefs, 'black', 'white', 'pie',\r\n 'Test1',\r\n self.generate_image_type, self.plot_width,\r\n self.plot_height, self.bar_width, self.dpi, 0,\r\n 'categorical', False, False)\r\n\r\n self.assertEqual(len(img_data), 2)\r\n\r\n # test area charts\r\n fracs_labels_other, fracs_labels, all_counts, other_cat, red, other_frac = \\\r\n get_fracs(self.counts1, 5, 10, 'area')\r\n\r\n img_data = make_HTML_table(\"Phylum\", other_frac, 10, red, other_cat,\r\n fracs_labels_other, fracs_labels,\r\n self.dir_path, all_counts, 1, self.prefs,\r\n self.color_prefs, 'black', 'white', 'pie',\r\n 'Test1',\r\n self.generate_image_type, self.plot_width,\r\n self.plot_height, self.bar_width, self.dpi, 0,\r\n 'categorical', False, False)\r\n\r\n self.assertEqual(len(img_data), 2)\r\n\r\n # test bar charts\r\n fracs_labels_other, fracs_labels, all_counts, other_cat, red, other_frac = \\\r\n get_fracs(self.counts1, 5, 10, 'bar')\r\n\r\n img_data = make_HTML_table(\"Phylum\", other_frac, 10, red, other_cat,\r\n fracs_labels_other, fracs_labels,\r\n self.dir_path, all_counts, 1, self.prefs,\r\n self.color_prefs, 'black', 'white', 'pie',\r\n 'Test1',\r\n self.generate_image_type, self.plot_width,\r\n self.plot_height, self.bar_width, self.dpi, 0,\r\n 'categorical', False, False)\r\n\r\n self.assertEqual(len(img_data), 2)\r\n self._paths_to_clean_up = [\"/tmp/qiimewebfiles/charts/\" + f\r\n for f in listdir(\"/tmp/qiimewebfiles/charts\")]", "def _buildtable(self):\n\n tabrows = []\n\n for i, (expid, exfiles) in enumerate(self._exposure_files.items()):\n specflux_b, specflux_r, specflux_z = [], [], []\n tab = None\n\n if len(exfiles) == 0:\n continue\n\n print(expid)\n for exfile in exfiles:\n print(exfile)\n hdu = fits.open(exfile)\n\n # The following tables are present in the redux sframes and the\n # nightwatch qcframes.\n wave = hdu['WAVELENGTH'].data\n\n # However, in the nightwatch files the wavelength data are a\n # table of size nfiber x nwavelength.\n if self._filetype == 'nightwatch':\n if wave.ndim > 1:\n wave = wave[0]\n\n fluxhead = hdu['FLUX'].header\n fluxdata = hdu['FLUX'].data\n ivardata = hdu['IVAR'].data\n fibermap = hdu['FIBERMAP'].data\n exptime = fluxhead['EXPTIME']\n if not np.all(self._unditherfa['FIBER'] ==\n np.arange(len(self._unditherfa))):\n raise ValueError('weird fiberassign file format!')\n fibermap = self._unditherfa[fibermap['FIBER']]\n\n target_id = fibermap['TARGETID']\n target_ra = fibermap['TARGET_RA']\n target_dec = fibermap['TARGET_DEC']\n fiber = fibermap['FIBER']\n objtype = fibermap['OBJTYPE']\n flux_g = fibermap['FLUX_G']\n flux_r = fibermap['FLUX_R']\n flux_z = fibermap['FLUX_Z']\n x, y = [fibermap['FIBERASSIGN_{}'.format(val)] for val in ('X', 'Y')]\n\n camera = fluxhead['CAMERA'][0].upper()\n\n if getattr(self, '_deltara', None) is not None:\n dra = self._deltara[i]*np.ones(len(fiber))\n ddec = self._deltadec[i]*np.ones(len(fiber))\n elif self._dithertype == 'telescope':\n dithra = self._ditherfa['target_ra']\n dithdec = self._ditherfa['target_dec']\n udithra = self._unditherfa['target_ra']\n udithdec = self._unditherfa['target_dec']\n ontarget = ((self._ditherfa['targetid'] ==\n self._unditherfa['targetid']) &\n (self._ditherfa['objtype'] == 'TGT'))\n dfiberra = (dithra-udithra)*np.cos(np.radians(udithdec))*60*60\n dfiberdec = (dithdec-udithdec)*60*60\n if not np.all(self._ditherfa['FIBER'] ==\n np.arange(len(self._ditherfa))):\n raise ValueError('unexpected shape of dither file')\n dfiberra[~ontarget] = np.nan\n dfiberdec[~ontarget] = np.nan\n dfiberra = dfiberra[fiber]\n dfiberdec = dfiberdec[fiber]\n wcs = self.lookup_wcs(fluxhead['MJD-OBS'])\n centralwcs = self._central_wcs\n if (~np.isfinite(centralwcs['cenra'][1]) or\n ~np.isfinite(centralwcs['cendec'][1])):\n raise ValueError('central pointing ra/dec is NaN!')\n dtelra = (wcs['cenra'][1]-centralwcs['cenra'][1])\n dtelra *= np.cos(np.radians(centralwcs['cendec'][1]))\n dteldec = wcs['cendec'][1]-centralwcs['cendec'][1]\n dra = dfiberra + dtelra*60*60\n ddec = dfiberdec + dteldec*60*60\n if np.all(~np.isfinite(dra)):\n print('warning: no good telescope offset for %s' %\n exfile)\n else:\n raise ValueError('not implemented')\n \n for j, fiber_id in enumerate(fiber):\n flux = fluxdata[j]\n ivar = ivardata[j]\n if not np.any(ivar > 0):\n specflux = 0\n specflux_ivar = 0\n else:\n meanivar = np.mean(ivar[ivar > 0])\n mask = ivar > meanivar / 100\n specflux = np.trapz(flux*mask, wave)\n specflux_ivar = 1./np.sum(ivar[mask]**-1)\n # Schlegel: sum over correct wavelengths, all three\n # filters, plus 11 pixel median filter to reject\n # cosmics.\n # will require being better about reading in\n # the spectrographs together.\n tabrows.append((expid, exptime,\n target_id[j], target_ra[j], target_dec[j],\n fiber[j], objtype[j],\n flux_g[j], flux_r[j], flux_z[j],\n specflux, specflux_ivar, camera,\n dra[j], ddec[j],\n x[j], y[j]))\n\n tab = Table(rows=tabrows,\n names=('EXPID', 'EXPTIME',\n 'TARGETID', 'TARGET_RA', 'TARGET_DEC',\n 'FIBER', 'OBJTYPE',\n 'FLUX_G', 'FLUX_R', 'FLUX_Z',\n 'SPECTROFLUX', 'SPECTROFLUX_IVAR', 'CAMERA',\n 'DELTA_X_ARCSEC', 'DELTA_Y_ARCSEC',\n 'XFOCAL', 'YFOCAL'),\n meta={'EXTNAME' : 'DITHER',\n 'TILEID' : '{}'.format(self._tileid)})\n\n return tab", "def generate_2d_plots(prefs, data, html_dir_path, data_dir_path, filename,\r\n background_color, label_color, generate_scree):\r\n coord_tups = [(\"1\", \"2\"), (\"3\", \"2\"), (\"1\", \"3\")]\r\n mapping = data['map']\r\n out_table = ''\r\n # Iterate through prefs and generate html files for each colorby option\r\n # Sort by the column name first\r\n sample_location = {}\r\n\r\n groups_and_colors = iter_color_groups(mapping, prefs)\r\n groups_and_colors = list(groups_and_colors)\r\n\r\n for i in range(len(groups_and_colors)):\r\n labelname = groups_and_colors[i][0]\r\n groups = groups_and_colors[i][1]\r\n colors = groups_and_colors[i][2]\r\n data_colors = groups_and_colors[i][3]\r\n data_color_order = groups_and_colors[i][4]\r\n\r\n data_file_dir_path = mkdtemp(dir=data_dir_path)\r\n\r\n new_link = os.path.split(data_file_dir_path)\r\n data_file_link = os.path.join('.', os.path.split(new_link[-2])[-1],\r\n new_link[-1])\r\n\r\n new_col_name = labelname\r\n img_data = {}\r\n plot_label = labelname\r\n\r\n if 'support_pcoas' in data:\r\n matrix_average, matrix_low, matrix_high, eigval_average, m_names = \\\r\n summarize_pcoas(data['coord'], data['support_pcoas'],\r\n method=data['ellipsoid_method'])\r\n data['coord'] = \\\r\n (m_names, matrix_average, data['coord'][2], data['coord'][3])\r\n for i in range(len(m_names)):\r\n sample_location[m_names[i]] = i\r\n else:\r\n matrix_average = None\r\n matrix_low = None\r\n matrix_high = None\r\n eigval_average = None\r\n m_names = None\r\n iterator = 0\r\n\r\n for coord_tup in coord_tups:\r\n if isarray(matrix_low) and isarray(matrix_high) and \\\r\n isarray(matrix_average):\r\n coord_1r = asarray(matrix_low)\r\n coord_2r = asarray(matrix_high)\r\n mat_ave = asarray(matrix_average)\r\n else:\r\n coord_1r = None\r\n coord_2r = None\r\n mat_ave = None\r\n sample_location = None\r\n\r\n coord_1, coord_2 = coord_tup\r\n img_data[coord_tup] = draw_pcoa_graph(\r\n plot_label, data_file_dir_path,\r\n data_file_link, coord_1, coord_2,\r\n coord_1r, coord_2r, mat_ave,\r\n sample_location,\r\n data, prefs, groups, colors,\r\n background_color, label_color,\r\n data_colors, data_color_order,\r\n generate_eps=True)\r\n\r\n out_table += TABLE_HTML % (labelname,\r\n \"<br>\".join(img_data[(\"1\", \"2\")]),\r\n \"<br>\".join(img_data[(\"3\", \"2\")]),\r\n \"<br>\".join(img_data[(\"1\", \"3\")]))\r\n\r\n if generate_scree:\r\n data_file_dir_path = mkdtemp(dir=data_dir_path)\r\n new_link = os.path.split(data_file_dir_path)\r\n data_file_link = os.path.join(\r\n '.',\r\n os.path.split(new_link[-2])[-1],\r\n new_link[-1])\r\n\r\n img_src, download_link = draw_scree_graph(\r\n data_file_dir_path, data_file_link, background_color,\r\n label_color, generate_eps=True, data=data)\r\n\r\n out_table += SCREE_TABLE_HTML % (\"<br>\".join((img_src, download_link)))\r\n\r\n outfile = create_html_filename(filename, '.html')\r\n outfile = os.path.join(html_dir_path, outfile)\r\n\r\n write_html_file(out_table, outfile)", "def plot_1():\n p_files = []\n filename = \"energy_data_2D_80\"\n for file in sorted(os.listdir(folder)):\n if file.startswith(filename):\n p_files.append(os.path.join(folder,file))\n T_list = []\n fig, ax = plt.subplots()\n for p_file in p_files[3::3]:\n T = (os.path.splitext(os.path.basename(p_file))[0]).split('_',4)[4]\n #print(T)\n E = []\n t = []\n if (T not in T_list):\n T_list.append(T)\n with open(p_file) as csvfile:\n lines = csv.reader(csvfile, delimiter=' ')\n sweep = 0\n for row in lines:\n E.append(float(row[0]))\n t.append(sweep)\n sweep += 1\n ax.plot(t[0:200], E[0:200],label=\"T = \"+format(T[0:3]))\n ax.set_title(\"Energy per bond vs Time\")\n ax.set_ylabel(\"e / J\")\n ax.set_xlabel(\"t / sweeps\")\n ax.legend()\n\n fig.savefig(folder2+\"energy_vs_time.png\")\n fig.savefig(texfolder+\"energy_vs_time.pdf\")", "def __init__(self):\n self.tables = pd.DataFrame({\n \"tables\": TABLES,\n \"year\": GREEN_YEARS + YELLOW_YEARS,\n \"color\": [\"green\" for i in GREEN_YEARS] +\n [\"yellow\" for i in YELLOW_YEARS]\n })", "def __init__(self,\n title = '',\n x_title = None,\n y_title = None,\n plot_header = True,\n ratio = False,\n x_range = None,\n y_max = None,\n y_min = None,\n legendColumns = 1):\n # Store the title\n self._title = title\n self._x_title, self._y_title = x_title, y_title\n\n # Store whether or not the user wants to create a plot header\n self._plot_header = plot_header\n\n # Calculate a unique name for the plot components\n name = _rand_uuid()\n\n # Default logy if off\n self._logy = False\n\n # Default off for integer x-ticks \n self._x_integer_ticks = False \n\n # store n columns for legend\n self.PLOT_LEGEND_N_COLUMNS = legendColumns \n\n # Create a canvas\n self._canvas = TCanvas(name + '_canvas',\n name,\n int(self.PLOT_WIDTH),\n int(self.PLOT_HEIGHT))\n SetOwnership(self._canvas, False)\n\n\n\n # Create the main plot and draw it\n self._plot = TPad(\n 'upperPad',\n 'upperPad',\n #name + '_plot', # WJF: don't need upper pad to have unique name \n #name,\n 0.0,\n (self.PLOT_RATIO_FRACTION\n if ratio\n else 0.0),\n 1.0,\n 1.0\n )\n SetOwnership(self._plot, False)\n self._plot.SetMargin(*(self.PLOT_MARGINS_WITH_RATIO\n if ratio\n else self.PLOT_MARGINS))\n self._plot.Draw()\n\n # Store ranges\n self._x_range = x_range\n if y_max is not None:\n self._set_maximum_value(y_max)\n if y_min is not None:\n self._set_minimum_value(y_min)\n\n # Switch back to the context of the canvas\n self._canvas.cd()\n\n\n # Create a ratio plot and draw it if requested\n if ratio:\n self._ratio_plot = TPad(\n 'lowerPad', # WJF, don't need lower pad to have unique name\n 'lowerPad',\n 0.0,\n 0.0,\n 1.0,\n self.PLOT_RATIO_FRACTION\n )\n SetOwnership(self._ratio_plot, False)\n self._ratio_plot.SetMargin(*self.PLOT_RATIO_MARGINS)\n self._ratio_plot.SetGridy(True)\n self._ratio_plot.Draw()\n else:\n self._ratio_plot = None\n # increase canvas margins\n #self._canvas.SetBottomMargin(1)\n #self._plot.SetMargin\n #self._canvas.SetLeftMargin(\n\n # Track whether or not we've already drawn to the main pad\n self._drawn = False\n\n # Track whether or not we've already drawn to the ratio pad\n self._ratio_drawn = False\n\n # Track that object which sets up the axes in the main plot\n self._axes_object = None\n\n # Track whether or not we've already added the atlas label to the main pad\n self._atlas_label_drawn = False\n\n # Create a structure to track any histograms we generate internally\n # which need to be added to any legends created\n self._legend_extras = []\n \n # Flag if y-axis has been set to a log scale \n self._logy = False", "def visualize(stuff, **options):\n separate = r\"\\newpage\" #by default, a new tupel is put on a new page\n name = \"some_text_file\" #by default this file is used\n for key in options:\n if key == \"separate\":\n separate = options[key]\n if key == \"name\":\n name = options[key]\n works = True\n totallines = [r\"\\documentclass{article}\", r\"\\usepackage{xcolor}\", r\"\\usepackage{tikz,pgf}\", r\"\\usepackage[left = 0 cm, top = 0cm, bottom = 0cm, right = 2cm]{geometry}\", r\"\\begin{document}\", r\"\\pagestyle{empty}\"]\n for description in stuff:\n data = stuff[description]\n if checkdataformat(description, data):\n if description == \"config\":\n lines = gentikz(data)\n elif description == \"movelist\":\n lines = showmoveslist(data[0], data[1], data[2])\n elif description == \"movelists\":\n lines = compareshowmoveslists(data[0], data[1], data[2])\n elif description == \"list\":\n lines = showlist(data)\n elif description == \"configurations\":\n lines = showconfigurations(data)\n elif description == \"movetable\":\n lines = nktable(data[0], data[1], sort = 'value')\n elif description == \"incrementtable\":\n lines = nktable(data[0], data[1], sort = 'increment')\n elif description == \"totalptable\":\n lines = nktable(data[0], data[1], sort = 'totalpossibilities')\n elif description == \"ptable\":\n lines = nktable(data[0], data[1], sort = 'adjustedpossibilities')\n elif description == \"bfptable\":\n lines = nktable(data[0], data[1], sort = 'bfadjustedpossibilities')\n else:\n print(\"unknown description\")\n lines = []\n for line in lines:\n totallines.append(line)\n totallines.append(separate)\n else:\n print(description, \":\", data, \"don't match, please read help(visualization)\")\n works = False\n totallines.append(r\"\\end{document}\")\n if works:\n compile(totallines, name)", "def file_table(list_observations, indir, informat, outfile):\n print('Creating file summary table ...')\n\n # We gather all infos in a list of dicts and write this\n # as a FITS table at the end.\n # for documentation see http://gamma-astro-data-formats.readthedocs.org/en/latest/data_storage/hdu_index/index.html\n\n HDU_CLASS_TAGS = dict(\n events='events',\n aeff='aeff_2d',\n edisp='edisp_2d',\n psf_3gauss='psf_3gauss',\n psf_king='psf_king',\n psf_table='psf_table',\n gti='gti'\n )\n\n rows = []\n for obs in list_observations.observations:\n events_filename = Path(indir) / obs.filename('events', format=informat)\n try:\n table = Table.read(str(events_filename), hdu='EVENTS')\n except Exception:\n print \"fits corrupted for file \" + str(events_filename)\n continue\n if table.meta[\"OBS_ID\"]!=obs.obs_id:\n continue\n # for filetype in ['events', 'aeff', 'edisp', 'psf_3gauss']:\n # for filetype in ['events']:\n #for filetype in ['events', 'aeff', 'edisp', 'psf_3gauss']:\n for filetype in ['events', 'aeff', 'edisp', 'psf_table']:\n filename = Path(indir) / obs.filename(filetype, format=informat)\n\n if filename.is_file():\n print('Processing {}'.format(filename))\n\n data = dict()\n\n # OBS_ID\n data['OBS_ID'] = obs.obs_id\n\n # HDU_TYPE\n if filetype in ('psf_3gauss'):\n data['HDU_TYPE'] = 'psf'\n elif filetype in ('psf_table'):\n data['HDU_TYPE'] = 'psf'\n else:\n data['HDU_TYPE'] = str(filetype)\n\n # HDU_CLASS\n data['HDU_CLASS'] = HDU_CLASS_TAGS[filetype]\n\n # FILE_DIR (relative path)\n data['FILE_DIR'] = str(\n os.path.relpath(str(obs.out_filename(filetype).parent), str(Path(outfile).parent)))\n\n # FILE_NAME\n data['FILE_NAME'] = str(obs.filename(filetype, format=informat).parts[-1])\n\n # HDU-INFOS\n hdu_list = fits.open(str(filename))\n hdu = hdu_list[1]\n header = hdu.header\n data['HDU_NAME'] = hdu.name\n\n # FILE-INFOS\n stat = filename.stat()\n data['SIZE'] = stat.st_size\n data['MTIME'] = stat.st_mtime\n data['MD5'] = hashlib.md5(filename.open('rb').read()).hexdigest()\n\n # if 'HDUCLAS2' in header:\n # data['HDUCLASS'] = header['HDUCLAS2']\n # else:\n # data['HDUCLASS'] = 'EVENTS'\n\n # if its the events-file, use a second dict for the gti-hdu\n if filetype == 'events':\n data_gti = dict()\n data_gti['OBS_ID'] = obs.obs_id\n data_gti['HDU_TYPE'] = 'gti'\n data_gti['HDU_CLASS'] = 'gti'\n data_gti['FILE_DIR'] = data['FILE_DIR']\n data_gti['FILE_NAME'] = data['FILE_NAME']\n data_gti['HDU_NAME'] = hdu_list[2].name\n data_gti['SIZE'] = data['SIZE']\n data_gti['MTIME'] = data['MTIME']\n data_gti['MD5'] = data['MD5']\n\n rows.append(data_gti)\n\n rows.append(data)\n hdu_list.close()\n\n else:\n print('File not found: {}'.format(filename))\n\n names = [\n 'OBS_ID', 'HDU_TYPE', 'HDU_CLASS',\n 'FILE_DIR', 'FILE_NAME', 'HDU_NAME',\n 'SIZE', 'MTIME', 'MD5'\n ]\n\n table = Table(rows=rows, names=names)\n\n print('Writing {}'.format(indir + \"/\" + str(outfile)))\n table.write(indir + \"/\" + str(outfile), overwrite=True)\n # add hdu name\n hdulist = fits.open(indir + \"/\" + str(outfile), mode='update')\n hdulist[1].name = 'HDU_INDEX'\n hdulist.close()", "def create_OER_plots(\n data,\n annotation_placement_dict={},\n plots_folder=\"OER_plots\",\n exp_lines=True,\n annotate_data=True,\n axes_ranges={\n \"x1\": 1.0,\n \"x2\": 2.0,\n \"y1\": 1.4,\n \"y2\": 2.0,\n },\n ):\n # | - create_OER_plots\n calc_systems = data\n\n # | - Styling and Setup\n # settings size and font for revtex stylesheet\n\n # Get this from LaTeX using \\showthe\\columnwidth\n fig_width_pt = 1.8 * 246.0\n #fig_width_pt *= 300./72 # convert to 300 dpi\n inches_per_pt = 1.0 / 72.27 # Convert pt to inches\n #inches_per_pt = 1.0/300 # Convert pt to inches\n golden_mean = (np.sqrt(5) - 1.0) / 2.0 # Aesthetic ratio\n fig_width = fig_width_pt * inches_per_pt # width in inches\n fig_height = fig_width * golden_mean # height in inches\n fig_size = [fig_width, fig_height]\n fig = plt.figure(figsize=fig_size, dpi=300)\n\n font_size = 9\n tick_font_size = 8\n xlabel_pad = 8\n ylabel_pad = 18\n matplotlib.rcParams['ps.usedistiller'] = 'xpdf'\n\n matplotlib.rcParams['font.size'] = 10\n #matplotlib.rcParams['axes.labelsize'] = 2*font_size\n matplotlib.rcParams['axes.labelsize'] = font_size\n matplotlib.rcParams['legend.fontsize'] = font_size\n matplotlib.rcParams['xtick.labelsize'] = tick_font_size\n matplotlib.rcParams['ytick.labelsize'] = tick_font_size\n\n font_default = 'helvetica'\n #font_default='cmss'\n\n def setfont(font=font_default, unicode=True):\n \"\"\"Set font.\n\n Set Matplotlibs rcParams to use LaTeX for font rendering.\n Revert all changes by calling rcdefault() from matplotlib.\n\n Parameters:\n -----------\n font: string\n \"Helvetica\"\n \"Times\"\n \"Computer Modern\"\n\n usetex: Boolean\n Use unicode. Default: False.\n\n \"\"\"\n # | - setfont\n # Use TeX for all figure text!\n plt.rc('text', usetex=True)\n\n font = font.lower().replace(\" \", \"\")\n if font == 'times':\n # Times\n font = {'family': 'serif', 'serif': ['Times']}\n preamble = r\"\"\"\n \\usepackage{color}\n \\usepackage{mathptmx}\n \"\"\"\n elif font == 'helvetica':\n # Helvetica\n # set serif, too. Otherwise setting to times and then\n # Helvetica causes an error.\n font = {'family': 'sans-serif', 'sans-serif': ['Helvetica'],\n 'serif': ['cm10']}\n preamble = r\"\"\"\n \\usepackage{color}\n \\usepackage[tx]{sfmath}\n \\usepackage{helvet}\n \\usepackage{sansmath}\n \"\"\"\n else:\n # Computer modern serif\n font = {'family': 'serif', 'serif': ['cm10']}\n # preamble = r\"\"\"\n preamble = r\"\"\"\n \\usepackage{color}\n \"\"\"\n\n if font == 'cmss':\n # Computer modern sans serif\n font = {'family': 'sans-serif', 'serif': ['cmss']}\n preamble = r\"\"\"\n \\usepackage{color}\n \\usepackage[tx]{sfmath}\n \"\"\"\n\n if unicode:\n # Unicode for Tex\n #preamble = r\"\"\"\\usepackage[utf8]{inputenc}\"\"\" + preamble\n # inputenc should be set automatically\n plt.rcParams['text.latex.unicode'] = True\n\n # print font, preamble\n plt.rc('font', **font)\n plt.rcParams['text.latex.preamble'] = preamble\n #__|\n\n setfont(\n font_default,\n unicode=True,\n # unicode=False,\n )\n\n matplotlib.rcParams['lines.linewidth'] = 1.\n\n #matplotlib.rcParams['ytick.direction'] = 'out'\n #matplotlib.rcParams['xtick.direction'] = 'out'\n\n ax = fig.add_axes([0.2, 0.2, 0.6, 0.6])\n\n zoom = 0.5\n d1 = 3 * zoom\n d2 = 4 * zoom\n xcenter = 1.5 # 0.65\n #ycenter=1.23#2.4\n ycenter = 0.8 # 2.4\n\n x1 = xcenter - d1 # -0.6\n x2 = xcenter + d1 # 2.2\n y1 = ycenter - d2 # 1#0.5\n y2 = ycenter + d2 # 5\n ax.axis([x1, x2, y1, y2])\n ax.set_xlabel(r'$\\Delta$G$_{\\sf O}$ - $\\Delta$G$_{\\sf OH}$ (eV)')\n #ax.set_ylabel(r'$\\Delta$G$_{\\sf OOH}$ -$\\Delta$G$_{\\sf O}$ (eV)')\n ax.set_ylabel(r'$\\Delta$G$_{\\sf OH}$')\n\n delta = 0.025\n x = np.arange(x1, x2 + delta, delta)\n y = np.arange(y1, y2 + delta, delta)\n X, Y = np.meshgrid(x, y)\n\n #Z1 = mlab.bivariate_normal(X, Y, 1.0, 1.0, 0.0, 0.0)\n #Z2 = mlab.bivariate_normal(X, Y, 1.5, 0.5, 1, 1)\n # difference of Gaussians\n #Z = 10.0 * (Z2 - Z1)\n #__|\n\n # | - Methods\n #fit=[0.84527288, 3.38026638]\n def ooh_oh_scaling(doh):\n \"\"\"ooh_oh_scaling equation.\"\"\"\n # | - ooh_oh_scaling\n #like ambars\n #dooh=0.5*doh + 3.0\t\t #O\n #normal one\n\n dooh = doh + 3.2\n return(dooh)\n #__|\n\n def overpotential(doh, do):\n \"\"\"Calculate overpotential.\n\n Args:\n doh:\n do:\n \"\"\"\n # | - overpotential\n dooh = ooh_oh_scaling(doh)\n dg14 = [doh, do - doh, dooh - do, - dooh + 4.92]\n m = max(dg14)\n return(m - 1.23)\n #return doh*do\n #__|\n\n def overpotential2(x, doh):\n \"\"\"Calculate overpotential (version 2).\n\n Args:\n x:\n doh:\n \"\"\"\n # | - overpotential2\n dooh = ooh_oh_scaling(doh)\n dg14 = [doh, x, -x + 2.46, -dooh + 4.92]\n m = max(dg14)\n return(m - 1.23)\n #return doh*do\n #__|\n\n def overpotential3(x, doh):\n \"\"\"Calculate overpotential (version 3).\n\n Args:\n x:\n doh:\n \"\"\"\n # | - overpotential3\n dooh = ooh_oh_scaling(doh)\n dg14 = [doh, x, dooh - (x + doh), -dooh + 4.92]\n m = max(dg14)\n return(m - 1.23)\n\n #return doh*do\n #__|\n\n def overpotential_label(doh, do):\n \"\"\"Return overpotential label.\n\n Args:\n doh:\n do:\n \"\"\"\n # | - overpotential_label\n dooh = ooh_oh_scaling(doh)\n dg14 = [doh, do - doh, dooh - do, -dooh + 4.92]\n m = max(dg14)\n for i in range(len(dg14)):\n if(m == dg14[0]):\n return(r'OH lim.')\n if(m == dg14[1]):\n return(r'OH-O lim.')\n if(m == dg14[2]):\n return(r'O-OOH lim.')\n if(m == dg14[3]):\n return( r'OOH-O$_{\\sf 2}$ lim.')\n #return doh*do\n #__|\n\n #Z=overpotential(X,Y)\n #__|\n\n # *************************************************************************\n # | - OER_contour_plot *****************************************************\n Z = []\n for j in y:\n tmp = []\n for i in x:\n tmp.append(overpotential3(i, j))\n Z.append(tmp)\n\n\n #print overpotential(0.8,2.4)\n\n Z = np.array(Z)\n\n\n #im = plt.imshow(Z, origin='lower',interpolation='bilinear',\n # cmap=cm.jet_r, extent=(x1,x2,y1,y2), vmin=0, vmax=2)\n\n origin = 'lower'\n levels = np.arange(0.0, 2, 0.1)\n #levels = np.arange(0.2, 2, 0.1)\n CS = plt.contourf(\n X,\n Y,\n Z,\n levels,\n #20,\n # [-1, -0.1, 0, 0.1],\n #alpha=0.8,\n #cmap=plt.cm.bone,\n cmap=plt.cm.jet_r,\n #extend='both',\n extend='max',\n origin=origin,\n )\n\n # Note that in the following, we explicitly pass in a subset of\n # the contour levels used for the filled contours. Alternatively,\n # We could pass in additional levels to provide extra resolution,\n # or leave out the levels kwarg to use all of the original levels.\n\n CS2 = plt.contour(\n CS,\n levels=CS.levels,\n colors='white',\n linewidths=0.05,\n alpha=0.3,\n origin=origin,\n # hold='on',\n )\n\n cbar = plt.colorbar(CS)\n #cbar.ax.set_ylabel('Overpotential [V]')\n #cbar.ax.set_ylabel(r'$\\eta_{\\sf calc.}$')\n cbar.ax.set_ylabel(r'$\\eta_{\\sf OER}$')\n\n ax.tick_params(axis='both', direction='out')\n ax.get_xaxis().tick_bottom() # remove unneeded ticks\n ax.get_yaxis().tick_left()\n\n offset = [0.0, 0.08]\n\n #foo=r': %f' % (calc_systems[i][3])\n for i in range(len(calc_systems)):\n\n # ax.plot(calc_systems[i][1]-calc_systems[i][0], calc_systems[i][0],\n # 'or',color=calc_systems[i][5])\n\n marker_color = calc_systems[i][10]\n marker_border_color = calc_systems[i][5]\n marker_border_width = 1.\n\n if marker_border_color is None:\n marker_border_width = 0.\n\n x_i = calc_systems[i][1] - calc_systems[i][0]\n y_i = calc_systems[i][0]\n size_i = calc_systems[i][9]\n\n lim_pot_i = calc_systems[i][3] + 1.23\n label_i = calc_systems[i][4] + ' : %.2f V' % (lim_pot_i)\n\n ax.plot(\n x_i,\n y_i,\n size_i,\n mec=marker_border_color,\n mew=marker_border_width,\n mfc=marker_color,\n zorder=4,\n marker=calc_systems[i][11],\n label=calc_systems[i][4] + ' : %.2f V' % (calc_systems[i][3])\n )\n\n # | - __old__\n # if i!=0 and 1:\n # ax.text(calc_systems[i][1]-calc_systems[i][0]+calc_systems[i][6],\n # calc_systems[i][0]+calc_systems[i][7],\n # calc_systems[i][4]+'(%.2f)' %(calc_systems[i][3]),color='black',\n # fontsize=6,horizontalalignment='center',rotation=0,zorder=1)\n # else:\n # ax.text(calc_systems[i][1]-calc_systems[i][0]+calc_systems[i][6],\n # calc_systems[i][0]+calc_systems[i][7],\n # calc_systems[i][4]+'(%.2f)' %(calc_systems[i][3]),\n # color='white',fontsize=6,horizontalalignment='center',\n # rotation=0,zorder=1)\n #ax.text(calc_systems[i][0],calc_systems[i][1],'%i' %(i+1),\n # color='black',fontsize=4,\n # horizontalalignment='center',\n # verticalalignment='center',\n # rotation=0,zorder=2)\n #__|\n\n corners = [\n [1.3, 1.0],\n [x1 + (x2 - x2) * 0.2, y1 + (y2 - y1) * 0.9],\n [x1 + (x2 - x2) * 0.8, y1 + (y2 - y1) * 0.1],\n [-2, 0],\n ]\n\n #for i in range(len(corners)):\n # ax.text(corners[i][0],corners[i][1], overpotential_label(corners[i][0],\n # corners[i][1]), color='white',fontsize='x-small',\n # horizontalalignment='center',rotation=0,zorder=3)\n\n ax.legend(\n bbox_to_anchor=(1.25, 1.05),\n loc=2,\n borderaxespad=1,\n ncol=1,\n fancybox=True,\n shadow=True,\n fontsize='x-small',\n handlelength=2,\n )\n\n fig_path_i = os.path.join(\n plots_folder,\n \"OER_2D_Volcano.pdf\",\n )\n\n if not os.path.exists(plots_folder):\n os.makedirs(plots_folder)\n\n fig.savefig(\n fig_path_i,\n bbox_inches=\"tight\",\n )\n\n # fig.savefig('OER_contour_plot_v13.pdf', bbox_inches='tight')\n fig.clf()\n\n #__| **********************************************************************\n\n # *************************************************************************\n # | - OER_scaling **********************************************************\n # fig = plt.figure(figsize=fig_size, dpi=300)\n # ax = fig.add_axes([0.2, 0.2, 0.6, 0.6])\n # x1 = -1\n # x2 = 2.5\n # ax.axis([x1, x2, x1, ooh_oh_scaling(x2)])\n #\n # ax.set_xlabel(r'$\\Delta$G$_{\\sf OH}$ (eV)')\n # ax.set_ylabel(r'$\\Delta$G$_{\\sf OOH}$,$\\Delta$G$_{\\sf O}$ (eV)')\n #\n # xdata = []\n # ydata = []\n # y2data = []\n #\n # #for i in range(3):\n # for i in range(len(calc_systems)):\n # xdata.append(calc_systems[i][0])\n # ydata.append(calc_systems[i][2])\n # y2data.append(calc_systems[i][1])\n #\n # # print(xdata)\n # # print(ydata)\n #\n # fit = polyfit(xdata, ydata, 1)\n # fit_fn = poly1d(fit)\n # print(fit_fn)\n # aa = fit_fn[1]\n # bb = fit_fn[0]\n #\n # fit1 = polyfit(xdata, y2data, 1)\n # fit_fn1 = poly1d(fit1)\n # print(fit_fn1)\n #\n # #print fit_fn[0], fit_fn[1]\n # # #how bad is scaling\n # # for i in range(len(calc_systems)):\n # # error = calc_systems[i][2] - \\\n # # (fit_fn[1] * calc_systems[i][0] + fit_fn[0])\n # #\n # # print(error, calc_systems[i])\n #\n # xx = np.arange(x1, x2, delta)\n #\n # # Plotting Scaling Lines\n # ax.plot(xx, fit_fn[1] * xx + fit_fn[0], '--',\n # lw=1, dashes=(3, 1), c='grey', label='OOH scaling',\n # )\n #\n # ax.plot(xx, xx + 3.2, '--', lw=1, dashes=(3, 1), c='black')\n #\n # ax.plot(xx, xx, '--', lw=1, dashes=(3, 1), c='black')\n #\n # ax.plot(xx, fit_fn1[1] * xx + fit_fn1[0], '--',\n # lw=1, dashes=(3, 1), c='red', label='O scaling',\n # )\n #\n # for i in range(len(calc_systems)):\n # ax.plot(\n # calc_systems[i][0],\n # calc_systems[i][2],\n # 'ro',\n # ms=3,\n # marker=calc_systems[i][11],\n # #alpha=0.2,\n # color=calc_systems[i][10],\n # )\n #\n # ax.plot(\n # calc_systems[i][0],\n # calc_systems[i][1],\n # 'ro',\n # ms=3,\n # marker=calc_systems[i][11],\n # #alpha=0.2,\n # color=calc_systems[i][10],\n # )\n #\n # ax.plot(\n # calc_systems[i][0],\n # calc_systems[i][0],\n # calc_systems[i][9],\n # mec=calc_systems[i][5],\n # mfc=calc_systems[i][10],\n # mew=0.8,\n # zorder=4,\n # marker=calc_systems[i][11],\n # label=calc_systems[i][4] + ' : %.2f V' % (calc_systems[i][3]),\n # )\n #\n # # ax.text(calc_systems[i][0],\n # # calc_systems[i][0]+calc_systems[i][7]+0.08,\n # # calc_systems[i][4]+'(%.2f)' %(calc_systems[i][3]),\n # # color='black',fontsize=6,horizontalalignment='center',\n # # rotation=0,zorder=1)\n #\n # ax.legend(\n # bbox_to_anchor=(1.05, 1.05),\n # loc=2,\n # borderaxespad=0.5,\n # ncol=1,\n # fancybox=True,\n # shadow=True,\n # fontsize='x-small',\n # handlelength=2,\n # )\n #\n #\n # fig_path_i = os.path.join(\n # plots_folder,\n # \"OER_scaling.pdf\",\n # )\n #\n # if not os.path.exists(plots_folder):\n # os.makedirs(plots_folder)\n #\n # fig.savefig(\n # fig_path_i,\n # bbox_inches=\"tight\",\n # )\n #\n # # fig.savefig('OER_scaling.pdf', bbox_inches='tight')\n #\n # fig.clf()\n #\n #__| **********************************************************************\n\n # *************************************************************************\n # | - OER_1D_plot **********************************************************\n ax = fig.add_axes([0.2, 0.2, 0.6, 0.6])\n\n x1 = axes_ranges[\"x1\"]\n x2 = axes_ranges[\"x2\"]\n y2 = axes_ranges[\"y2\"]\n y1 = axes_ranges[\"y1\"]\n\n ax.axis([x1, x2, y1, y2])\n delta = 0.01\n x = np.arange(x1, x2, delta)\n\n ax.set_xlabel(r'$\\Delta$G$_{\\sf O}-\\Delta$G$_{\\sf OH}$ (eV)')\n\n #ax.set_ylabel(r'$\\Delta$G$_{\\sf O}$ (eV)')\n # ax.set_ylabel(r'U_{\\sf OER}$ (V)')\n # ax.set_ylabel(r'$\\eta_{\\sf OER}$')\n\n ax.set_ylabel(r'Limiting Potential (V)')\n ax.set_ylim(ax.get_ylim()[::-1])\n\n # | - Plotting Volcano Lines\n plot(\n x,\n np.maximum(x, 3.2 - x),\n '--',\n color='black',\n lw=0.67,\n dashes=(3, 1),\n zorder=2,\n )\n #__|\n\n # | - Plotting Data Points\n for i in range(len(calc_systems)):\n\n marker_color = calc_systems[i][10]\n marker_border_color = calc_systems[i][5]\n marker_border_width = 1.\n\n if marker_border_color is None:\n marker_border_width = 0.\n\n x_i = calc_systems[i][1] - calc_systems[i][0]\n y_i = calc_systems[i][3] + 1.23\n size_i = calc_systems[i][9]\n\n lim_pot_i = calc_systems[i][3] + 1.23\n label_i = calc_systems[i][4] + ' : %.2f V' % (lim_pot_i)\n ax.plot(\n\n x_i,\n y_i,\n size_i,\n\n mec=marker_border_color,\n mew=marker_border_width,\n mfc=marker_color,\n\n zorder=4,\n marker=calc_systems[i][11],\n label=label_i,\n )\n\n # | - NEW | Adding Labels\n if annotate_data:\n x_tmp = calc_systems[i][1] - calc_systems[i][0]\n y_tmp = calc_systems[i][3] + 1.23\n\n sys_i_name = calc_systems[i][4]\n\n xytext_i = annotation_placement_dict.get(sys_i_name, None)\n\n if xytext_i is None:\n xytext_i = (4, 10)\n # xytext_i = (-10, 10)\n else:\n xytext_i = xytext_i[0]\n\n facet_i = calc_systems[i][-1][\"facet\"]\n\n ax.annotate(\n str(facet_i),\n\n # xy=(x_tmp, y_tmp), xytext=(-10, 10),\n xy=(x_tmp, y_tmp), xytext=xytext_i,\n\n textcoords='offset points', ha='right', va='bottom',\n\n # bbox=dict(\n # boxstyle='round,pad=0.5',\n # fc='yellow',\n # alpha=0.5,\n # ),\n\n arrowprops=dict(\n arrowstyle='->',\n connectionstyle='arc3,rad=0',\n ),\n size=font_size - 2,\n )\n\n #__|\n\n #__|\n\n # | - Plotting Experimental Overpotentials <--------------------------------\n\n # 1.45 for IrO3\n # and 1.6 for IrO2\n # and 1.57 for IrOx (some unknown polymorph from Ir-metal they made)\n\n if exp_lines:\n\n # | - IrO3\n pos_IrO3 = 1.45\n color_IrO3 = \"red\"\n plot(\n [x1, x2],\n 2 * [pos_IrO3, ],\n '--',\n color=color_IrO3,\n lw=0.67,\n dashes=(1, 1),\n zorder=2,\n )\n\n plt.text(\n x1,\n pos_IrO3 - 0.014,\n r'$IrO_3$',\n fontsize=font_size,\n color=color_IrO3,\n horizontalalignment='left',\n verticalalignment='center',\n )\n\n #__|\n\n # | - IrO2\n pos_IrO2 = 1.8\n color_IrO3 = \"blue\"\n plot(\n [x1, x2],\n 2 * [pos_IrO2, ],\n '--',\n color=color_IrO3,\n lw=0.67,\n dashes=(1, 1),\n zorder=2,\n )\n\n plt.text(\n x1,\n pos_IrO2 + 0.024,\n r'$IrO_2$',\n fontsize=font_size,\n color=color_IrO3,\n horizontalalignment='left',\n verticalalignment='center',\n )\n\n #__|\n\n # | - IrOx\n pos_IrOx = 1.57\n color_IrOx = \"green\"\n plot(\n [x1, x2],\n 2 * [pos_IrOx, ],\n '--',\n color='green',\n lw=0.67,\n dashes=(1, 1),\n zorder=2,\n )\n\n plt.text(\n x1,\n pos_IrOx - 0.014,\n r'$IrO_x$',\n fontsize=font_size,\n color=color_IrOx,\n horizontalalignment='left',\n verticalalignment='center',\n # transform=ax.transAxes,\n )\n #__|\n\n #__|\n\n ax.legend(\n bbox_to_anchor=(-0.15, 1.425),\n loc=2,\n borderaxespad=0.5,\n ncol=3,\n fancybox=True,\n shadow=False,\n fontsize=\"x-small\",\n handlelength=2,\n )\n\n # fig.savefig('OER_1D_plot_v13.pdf', bbox_inches='tight')\n\n fig_path_i = os.path.join(\n plots_folder,\n \"OER_1D_Volcano.pdf\",\n )\n\n # fig_path_i_svg = os.path.join(\n # plots_folder,\n # \"OER_1D_plot_v13.svg\",\n # )\n\n if not os.path.exists(plots_folder):\n os.makedirs(plots_folder)\n\n fig.savefig(\n fig_path_i,\n bbox_inches=\"tight\",\n )\n\n # fig.savefig(\n # fig_path_i_svg,\n # bbox_inches=\"tight\",\n # )\n\n fig.clf()\n\n #__| **********************************************************************\n\n #__|", "def create_four_subplots():\n pass", "def generate_plots(self):\n freq_to_channel = {v: k for k, v in self.frequency_dict.iteritems()}\n data_axes = None\n for index, frequency in enumerate(sorted(freq_to_channel)):\n channel = freq_to_channel[frequency]\n td_f = self.frequency_dict[channel]\n title = 'Volume Backscatter (Sv) :Channel #%d: Frequency: %.1f kHz' % (channel, td_f)\n data_axes = self._generate_plot(self.ax[index], self.power_data_dict[channel], title,\n self.min_db, self.max_db)\n\n if data_axes:\n self._display_x_labels(self.ax[2], self.data_times)\n self.fig.tight_layout(rect=[0, 0.0, 0.97, 1.0])\n self._display_colorbar(self.fig, data_axes)", "def generate_plots(self, headers: [str], barplot_name: str, barplot_data: [int],\n lineplot_name: str, multi_data: [[int]]):\n self.export_data(barplot_name, headers, barplot_data)\n title = 'Total IRTs (' + str(C.MAX_ITERATIONS) + ' Steps)'\n self.barplot(barplot_name, y_label='Total IRT', img_title=title)\n self.lineplot(lineplot_name, headers, multi_data, img_title='IRT Paths')", "def create_plots(self, keys):\n\n self.plots = VPlotContainer(resizable = \"hv\", bgcolor=\"lightgray\",\n fill_padding=True, padding = 10)\n # this looks cryptic, but it is equivalent to\n # ArrayPlotData(a=[], b=[], c=[])\n # if the keys are a,b,c. This just does it for all of the keys.\n self.plotdata = ArrayPlotData(**dict(zip(keys, [[]]*len(keys))))\n\n # figure out which key will be the x axis\n if 'Step' in keys:\n x = 'Step'\n elif 'Time (ps)' in keys:\n x = 'Time (ps)'\n else:\n raise ValueError('The reporter published neither the step nor time'\n 'count, so I don\\'t know what to plot on the x-axis!')\n\n\n colors = itertools.cycle(['blue', 'green', 'silver', 'pink', 'lightblue',\n 'red', 'darkgray', 'lightgreen',])\n for y in filter(lambda y: y != x, keys):\n self.plots.add(chaco_scatter(self.plotdata, x_name=x, y_name=y,\n color=colors.next()))", "def create_pdf(self):\n\n my_datetime = datetime.now()\n self.pdf_name = (\n self.pdf_name + \"_\" + my_datetime.strftime(\"%H%M_%d%m%Y\") + \".pdf\"\n )\n fig_width = aW * self.column_ratio[0]\n\n clm_width_meta = (aW * self.column_ratio[1]) / len(self.fields)\n\n c = canvas.Canvas(os.path.join(self.pdf_folder, self.pdf_name), pagesize=A4)\n\n for qc_run_id, fig_file in sorted(self._files.items()):\n (param_values, feature_values) = get_param_values(\n qc_run_id, self.db_name, return_meta_add_on=True\n )\n\n comment = self.subject + \"<br/>\"\n # c.saveState()\n title = \"Dataset \" + qc_run_id\n\n # Prepare header\n header = Paragraph(title, title_style)\n h_w, h_h = header.wrap(aW, aH)\n\n # Prepare image\n img = ImageReader(fig_file)\n im_width, im_height = img.getSize()\n aspect = im_height / float(im_width)\n fig_height = fig_width * aspect\n\n # Prepare metadata section\n\n meta_table = Table(\n param_values,\n colWidths=[clm_width_meta] * len(self.fields),\n hAlign=\"CENTER\",\n rowHeights=0.22 * inch,\n )\n meta_table.setStyle(\n TableStyle(\n [\n (\"FONT\", (0, 0), (-1, 0), \"Helvetica-Bold\"),\n (\"FONT\", (0, 1), (-1, -1), \"Helvetica\"),\n (\"LINEBELOW\", (0, 0), (1, 0), 0.08, colors.black),\n (\"SIZE\", (0, 0), (-1, -1), 8),\n (\"VALIGN\", (0, 0), (-1, -1), \"BOTTOM\"),\n # ('ALIGN', (0, 0), (-1, 0), 'CENTER'),\n (\"ALIGN\", (0, 0), (0, -1), \"LEFT\"),\n (\"ALIGN\", (1, 1), (1, -1), \"LEFT\"),\n (\"INNERGRID\", (0, 0), (-1, -1), 0.08, colors.beige),\n # ('BOX', (0,0), (-1,-1), 0.25, colors.grey),\n ]\n )\n )\n\n meta_width, meta_height = meta_table.wrap(aW - im_width, aH / 2)\n\n # Prepare comments header\n comments_header = Paragraph(\"Comments:\", title_style)\n avail_height = aH - fig_height - v_padding\n comm_h_width, comm_h_height = comments_header.wrap(\n im_width, avail_height # aW - meta_width,\n )\n # Prepare comments\n my_datetime = datetime.now()\n ts = \"Printed on \" + my_datetime.strftime(\"%c\")\n\n try:\n data_specific_comment = self.comments[int(qc_run_id)]\n comment += data_specific_comment + \"<br/>\"\n comment += self.comments[\"general\"] + \"<br/>\"\n\n comment += self.smalltalk + \"<br/>\"\n except Exception:\n logger.warning(\n \"Unable to summarize result of \" + \"dataset {}\".format(qc_run_id)\n )\n comment_ts = comment + ts\n comment_ts = textwrap.fill(comment_ts, 70)\n comment_ts = comment_ts.replace(\"\\n\", \"<br/>\")\n\n comments_p = Paragraph(comment_ts, body_style)\n\n avail_height = aH - fig_height - v_padding - comm_h_height\n\n comm_width, comm_height = comments_p.wrap(im_width, avail_height) # aW,\n\n line_widths = comments_p.getActualLineWidths0()\n number_of_lines = len(line_widths)\n if number_of_lines > 1:\n pass\n if number_of_lines == 1:\n min(line_widths)\n comm_width, comm_height = comments_p.wrap(im_width, avail_height)\n\n # Prepare features\n feat_table = Table(\n feature_values,\n colWidths=[clm_width_meta] * len(self.fields),\n hAlign=\"CENTER\",\n rowHeights=0.22 * inch,\n )\n feat_table.setStyle(\n TableStyle(\n [\n (\"FONT\", (0, 0), (-1, 0), \"Helvetica-Bold\"),\n (\"FONT\", (0, 1), (-1, -1), \"Helvetica\"),\n (\"LINEBELOW\", (0, 0), (1, 0), 0.08, colors.black),\n (\"SIZE\", (0, 0), (-1, -1), 8),\n (\"VALIGN\", (0, 0), (-1, -1), \"BOTTOM\"),\n # ('ALIGN', (0, 0), (-1, 0), 'CENTER'),\n (\"ALIGN\", (0, 0), (0, -1), \"LEFT\"),\n (\"ALIGN\", (1, 1), (1, -1), \"LEFT\"),\n (\"INNERGRID\", (0, 0), (-1, -1), 0.08, colors.beige),\n # ('BOX', (0,0), (-1,-1), 0.25, colors.grey),\n ]\n )\n )\n avail_height = aH - meta_height # fig_height - v_padding - comm_h_height\n avail_height -= comm_height\n feat_width, feat_height = feat_table.wrap(aW - im_width, avail_height)\n\n # Draw everyting on canvas\n\n header.drawOn(c, left_margin, aH - top_margin)\n\n c.drawImage(\n img,\n left_margin,\n aH - top_margin - fig_height - v_padding,\n width=fig_width * 1.1,\n height=fig_height * 1.1,\n mask=\"auto\",\n )\n\n meta_table.drawOn(\n c,\n left_margin + fig_width + h_padding,\n aH - meta_height - top_margin / 2, # - v_padding\n )\n\n comments_header.drawOn(\n c,\n left_margin,\n aH\n - top_margin\n - comm_h_height\n - fig_height\n - 2 * v_padding, # - add_on_height\n )\n\n comments_p.drawOn(\n c,\n left_margin,\n aH\n - top_margin\n - comm_h_height\n - comm_height\n - fig_height\n - 2 * v_padding\n - comm_h_height, # - add_on_height\n )\n\n feat_table.drawOn(\n c,\n left_margin + fig_width + h_padding,\n aH - meta_height - top_margin / 2 - feat_height - v_padding,\n # top_margin - fig_height - 2*v_padding - feat_height\n )\n\n # new page\n c.showPage()\n c.saveState()\n\n c.save()", "def generate_all_cost_plots(suffix):\n directory_name = \"inputs/\"\n directory = os.fsencode(directory_name)\n outfolder = \"plots/\" + suffix.strip(\".in\") + \"/\"\n try:\n os.makedirs(outfolder)\n except FileExistsError:\n pass\n for file in os.listdir(directory):\n filename = os.fsdecode(file)\n if filename.endswith(suffix):\n print(\"Solving : \", filename)\n inputfile = directory_name + filename\n num_clusters, cost = cost_vs_clusters(inputfile)\n outfile = outfolder + filename.strip(\".in\") + \".png\"\n plot_cost_vs_clusters(cost,num_clusters,outfile)", "def create_html_page_of_plots(list_of_plots, prefix='html'):\n if not os.path.exists(prefix):\n os.makedirs(prefix)\n os.system('mv *.png %s' % prefix)\n #print(list_of_plots)\n idx = 0\n htmlfile = open('%s/index_0.html' % prefix, 'w')\n htmlfile.write('<!DOCTYPE html><html><body><div>\\n')\n for plot in list_of_plots:\n if idx > 0 and idx % 200 == 0:\n htmlfile.write('</div></html></html>\\n')\n htmlfile.close()\n htmlfile = open('%s/index_%d.html' % (prefix, (idx//200)), 'w')\n htmlfile.write('<!DOCTYPE html><html><body><div>\\n')\n htmlfile.write('<p><img src=\"%s\"></p>\\n' % plot)\n idx += 1\n htmlfile.write('</div></html></html>\\n')\n htmlfile.close()", "def createPlots(ncfile, files, loc, savepath, sim_name, bfric, tight=False, \\\n ratio=1.0, debug=False, plot=False):\n\n sns.set(font=\"serif\")\n\n # find the location centre for flood/tide split calculation\n # not yet working...\n if loc == 'GP':\n centre = [-66.33906, 44.26898]\n if tight:\n bounds = [-66.355, -66.31, 44.245, 44.2925]\n else:\n bounds = []\n elif loc == 'DG':\n centre = [-65.76000, 44.67751]\n if tight:\n bounds = [-65.775, -65.77, 44.665, 44.69]\n else:\n bounds = []\n elif loc == 'PP':\n centre = [-66.206924, 44.389368]\n # find out the tightness required for PP\n if tight:\n bounds = [-66.225, -66.195, -44.37, -44.41]\n else:\n bounds = []\n\n if debug:\n print 'calculating ebb/flood split at centre of location...'\n print 'calculating model velocity norm...'\n\n fI, eI, _, _ = ncfile.Util2D.ebb_flood_split_at_point(centre[0], centre[1])\n ncfile.Util3D.velo_norm()\n\n if debug:\n print '{} plot(s) will be created...'.format(len(files))\n\n if not plot:\n savepath = savepath + 'bfric_' + bfric + '/' + loc + '_' + sim_name\n if ratio != 1.0:\n savepath = savepath + '/with_ratio_{}'.format(str(ratio))\n\n # creates a subdirectory, so as not to overwrite existing files\n if debug:\n print 'creating new subdirectory...'\n now = datetime.now()\n now = now.strftime(\"%Y%m%d\")\n if not osp.exists(savepath):\n os.makedirs(savepath)\n else:\n savepath = savepath + '/_' + now\n os.makedirs(savepath)\n savepath = savepath + '/'\n\n for i, fname in enumerate(files, start=1):\n if debug:\n print 'creating drifter object...'\n drift = Drifter(fname, debug=False)\n fname = fname[48:-4]\n\n # creates drifter object window for flow map\n if debug:\n print 'creating drifter object window...'\n tModel = ncfile.Variables.matlabTime\n tDrift = drift.Variables.matlabTime\n win1 = (np.abs(tModel-tDrift.min())).argmin()\n win2 = (np.abs(tModel-tDrift.max())).argmin()\n\n tide = str(drift.Data['water_level'].tide)\n # averages velocity norm over flood or ebb cycle within drifter window\n if tide == 'flood':\n tideNorm = np.mean(ncfile.Variables.velo_norm[win1:win2,:,:], 0)\n elif tide == 'ebb':\n tideNorm = np.mean(ncfile.Variables.velo_norm[win1:win2,:,:], 0)\n\n # create spatially varying color map of mean velocity norm\n if debug:\n print 'preparing to create colormap...'\n fig = createColorMap(ncfile, tideNorm[0,:], mesh=False, bounds=bounds, \\\n title='Trajectory for ' + fname[:-4], debug=debug, \\\n label='Mean Velocity Norm during '+tide.capitalize()+' Tide (m/s)')\n # create title\n # fig.suptitle('Data from ' + fname[:-4], fontsize=14)\n\n x = drift.Variables.lon\n y = drift.Variables.lat\n u = drift.Variables.u\n v = drift.Variables.v\n\n if debug:\n print 'creating scatter plot...'\n plt.scatter(x,y)\n\n if debug:\n print 'preparing to plot time series...'\n\n # create validation structure\n if debug:\n print 'creating validation object...'\n\n try:\n valid = Validation(drift, ncfile, flow='sf', debug=False)\n except IndexError:\n print 'cannot create validation object for drifter %i.' % i\n continue\n\n # calculate speed from interpolated and observed date\n mTimes = valid.Variables.struct['mod_time']\n oU = valid.Variables.struct['obs_timeseries']['u']\n oV = valid.Variables.struct['obs_timeseries']['v']\n mU = valid.Variables.struct['mod_timeseries']['u']\n mV = valid.Variables.struct['mod_timeseries']['v']\n\n if debug:\n print '\\tcalculating speeds...'\n speedS = np.asarray(np.sqrt(mU**2 + mV**2))\n speedO = np.asarray(np.sqrt(oU**2 + oV**2))\n\n # ratio addition\n if debug:\n print '\\tadding ratio adjustments...'\n speedS = speedS * ratio\n datetimes = np.asarray([dn2dt(time) for time in mTimes])\n\n # For now, separate the two plots.\n # Set BFRIC for now\n # WHAT IS WRONG WITH THIS\n # fig2=plt.figure()\n # result, axis = plotTimeSeries(fig2, np.reshape(np.tile(datetimes,2),\\\n # (2, len(datetimes))), np.vstack((speedS, speedO)), \\\n # loc, label=['Simulated','Observed'], where=111, \\\n # title='Path Speeds for ' + fname[:-4] + ' | BFRIC=0.015', \\\n # axis_label='Speed (m/s)')\n\n # if not result:\n # if debug:\n # print '...error encountered with drifter {}.'.format(i)\n # print 'continuing...'\n # plt.close()\n # continue\n\n if plot:\n if debug:\n print 'displaying plot...'\n plt.show()\n else:\n if debug:\n print 'saving plot...'\n fig.savefig(savepath + fname + '_traj.png')\n # result.savefig(savepath + fname + '_speed.png')\n if debug:\n print '...plot saved to: ', savepath+fname\n\n # clear the figure window\n plt.close()", "def three_PDF_plots(res=200,table_exts=[''],**kwargs):\n\n p = copy.copy(params)\n for key,val in kwargs.items():\n setattr(p,key,val)\n GR = glo.global_results()\n\n fig, axs = plt.subplots(3, sharex='col',\\\n figsize=(8,15),facecolor='w',\\\n gridspec_kw={'hspace': 0, 'wspace': 0})\n\n # First print cell data distribution\n i = 0\n for gal_index in zip(p.gal_index):\n ax1 = axs[i]\n gal_ob = gal.galaxy(GR=GR, gal_index=gal_index)\n df = gal_ob.cell_data.get_dataframe()\n lognH = np.log10(df.nH)\n hist = np.histogram(lognH[df.nH.values > 0],bins=200,weights=df.m[df.nH.values > 0])\n hist1 = np.asarray(hist[0]) # histogram\n hist2 = np.asarray(hist[1]) # bin edges\n hist1 = hist1*1./sum(hist1)\n ax1.plot(hist2[0:len(hist1)],hist1,drawstyle='steps',ls='-',lw=1.5,\\\n alpha=0.7,color=p.color[0],label='Original cell distribution')\n \n for table_ext,ls,color in zip(table_exts,['--',':'],p.color[1::]):\n if '_M10' in table_ext: lab = 'Mach = 10'\n if '_arepoPDF_M51' in table_ext: lab = 'AREPO parametrized PDF'\n PDF(gal_index,color=color,table_ext=table_ext,ls=ls,res=200,add=True,ax=ax1,label=lab,ow=p.ow)\n \n if i == 0: ax1.legend(loc='upper right',fontsize=12)\n if i == 2: ax1.set_xlabel(getlabel('lnH'))\n ax1.set_ylabel('Mass fraction per bin')\n\n i += 1\n\n if p.savefig:\n if not os.path.isdir(p.d_plot + 'cell_data/PDFs/'): os.mkdir(p.d_plot + 'cell_data/PDFs/') \n plt.savefig(p.d_plot + 'cell_data/PDFs/simple_PDF_%s%s%s_x3.png' % (p.sim_name,p.sim_run,p.table_ext), format='png', dpi=250, facecolor='w')", "def initial_plots(runs):\n for run in runs.keys():\n meta = runs[run]\n plot_pdfs(meta)\n plot_priorsamps(meta)\n plot_ivals(meta)\n# if meta.truNz is not None:\n# plot_true(meta)\n timesaver(meta,'iplot',meta.key)", "def makePlotWithFileList(isoFileList, numerator, denominator, subunits=None, normProtein=None, yMax=1.5, title=None, legendCols=4,\r\n median=False, names=None, colors=None, figSize=(22,5), markerSize=None, noFill=False, legend=False,\r\n mew=1, yMin=-0.05, highlightMed=False, hms=2, hmFilled=True, yAxis=None, alpha=1.0, adjProt=None):\r\n if names is None:\r\n names = isoFileList\r\n \r\n namesList = [(isoFileList[i], names[i]) for i in range(len(isoFileList))]\r\n allStats = qMS.multiStatsDict(isoFileList, numerator, denominator, normalization=1.0, offset=0.0, normProtein=normProtein, noProcess=True, adjProt=adjProt)\r\n \r\n return makePlotWithStatsDictDict(allStats, subunits=subunits, yMax=yMax, title=title, legend=legend, legendCols=legendCols, yAxis=yAxis,\r\n median=median, namesList=namesList, colors=colors, figSize=figSize, markerSize=markerSize,\r\n noFill=noFill, mew=mew, yMin=yMin, highlightMed=highlightMed, hms=hms, hmFilled=hmFilled, alpha=alpha)", "def create_plot_dir(base_dir: str) -> str:\n time_str = datetime.now().strftime('%Y%b%d-%H%M%S') \n plot_dir = os.path.join(res_dir, 'fig_'+time_str)\n# plot_dir = os.path.join(res_dir, 'plot')\n if not os.path.exists(plot_dir):\n os.makedirs(plot_dir)\n\n return plot_dir", "def file_table(list_observations, indir, informat, outfile):\n print('Creating file summary table ...')\n\n # We gather all infos in a list of dicts and write this\n # as a FITS table at the end.\n # for documentation see http://gamma-astro-data-formats.readthedocs.org/en/latest/data_storage/hdu_index/index.html\n\n HDU_CLASS_TAGS = dict(\n events='events',\n aeff='aeff_2d',\n edisp='edisp_2d',\n psf_3gauss='psf_3gauss',\n psf_king='psf_king',\n psf_table='psf_table',\n gti='gti'\n )\n\n rows = []\n for obs in list_observations.observations:\n testfile=obs.out_filename(\"events\", format=informat, dir=indir)\n try:\n table = Table.read(str(testfile), hdu='EVENTS')\n except Exception:\n print \"fits corrupted for file \"+str(filename)\n continue\n #for filetype in ['events', 'aeff', 'edisp', 'psf_3gauss']:\n #for filetype in ['events']:\n for filetype in ['events', 'aeff', 'edisp', 'psf_3gauss']:\n filename = obs.out_filename(filetype, format=informat, dir=indir)\n\n if filename.is_file():\n print('Processing {}'.format(filename))\n\n data = dict()\n\n # OBS_ID\n data['OBS_ID'] = obs.obs_id\n\n # HDU_TYPE\n if filetype in ('psf_3gauss'):\n data['HDU_TYPE'] = 'psf'\n else:\n data['HDU_TYPE'] = str(filetype)\n\n # HDU_CLASS\n data['HDU_CLASS'] = HDU_CLASS_TAGS[filetype]\n\n # FILE_DIR (relative path)\n data['FILE_DIR'] = str(os.path.relpath(str(obs.out_filename(filetype).parent), str(Path(outfile).parent)))\n\n # FILE_NAME\n data['FILE_NAME'] = str(obs.filename(filetype, format=informat).parts[-1])\n\n # HDU-INFOS\n hdu_list = fits.open(str(filename))\n hdu = hdu_list[1]\n header = hdu.header\n data['HDU_NAME'] = hdu.name\n\n # FILE-INFOS\n stat = filename.stat()\n data['SIZE'] = stat.st_size\n data['MTIME'] = stat.st_mtime\n data['MD5'] = hashlib.md5(filename.open('rb').read()).hexdigest()\n\n # if 'HDUCLAS2' in header:\n # data['HDUCLASS'] = header['HDUCLAS2']\n # else:\n # data['HDUCLASS'] = 'EVENTS'\n\n # if its the events-file, use a second dict for the gti-hdu\n if filetype == 'events':\n data_gti = dict()\n data_gti['OBS_ID'] = obs.obs_id\n data_gti['HDU_TYPE'] = 'gti'\n data_gti['HDU_CLASS'] = 'gti'\n data_gti['FILE_DIR'] = data['FILE_DIR']\n data_gti['FILE_NAME'] = data['FILE_NAME']\n data_gti['HDU_NAME'] = hdu_list[2].name\n data_gti['SIZE'] = data['SIZE']\n data_gti['MTIME'] = data['MTIME']\n data_gti['MD5'] = data['MD5']\n\n rows.append(data_gti)\n\n rows.append(data)\n hdu_list.close()\n\n else:\n print('File not found: {}'.format(filename))\n\n names = [\n 'OBS_ID', 'HDU_TYPE', 'HDU_CLASS',\n 'FILE_DIR', 'FILE_NAME', 'HDU_NAME',\n 'SIZE', 'MTIME', 'MD5'\n ]\n table = Table(rows=rows, names=names)\n\n print('Writing {}'.format(outfile))\n table.write(str(outfile), overwrite=True)\n # add hdu name\n hdulist = fits.open(str(outfile), mode='update')\n hdulist[1].name = 'HDU_INDEX'\n hdulist.close()", "def dms_plots_perf_data(perffile, action, parameters, destination_folder, graph_or_rdf = \"Graph\"):\n\n all_data = pd.read_csv(perffile, index_col = False)\n list_of_tables = []\n all_parameters = \"cycles,instructions,cache-references,cache-misses,bus-cycles,L1-dcache-loads,L1-dcache-load-misses,L1-dcache-stores,dTLB-loads,dTLB-load-misses,LLC-loads,LLC-load-misses,LLC-stores,branches,branch-misses,context-switches,cpu-migrations,page-faults\".split(\",\")\n\n for each in parameters:\n for each_action in actions:\n list_of_tables.append(perf_specific_parameter(all_data, each_action, each, graph_or_rdf, only_table = False))\n \n only_tables = list(set(all_parameters) - set(parameters))\n for each in only_tables:\n for each_action in actions:\n list_of_tables.append(perf_specific_parameter(all_data, each_action, each, graph_or_rdf, only_table = True))\n\n\n if not os.path.exists(destination_folder + \"/tables/\"):\n os.mkdir(destination_folder + \"/tables/\")\n #save_tables(destination_folder + \"/tables/\", list_of_tables, graph_or_rdf)", "def make_F792_plots(data_obj, title_pages=False):\n\n print(\"Generating plots...\")\n\n # Create color maps\n cmap = plt.get_cmap('jet')\n cmap = plt.get_cmap('gray')\n\n # Call the\n plot_front_title(data_obj)\n\n # -----------------------------------------------------------------------#\n # Initialize the position variables for the text and graphs on the pdf. #\n # -----------------------------------------------------------------------#\n y0 = 0.9\n dy = [0.03, 0.025]\n\n ha = 'left'\n va = 'center'\n fs = 10\n dfs = 2\n\n # metric name value unc min\n xpos = [0.0, 0.4, 0.5, 0.75]\n yi = y0 - 0.1 # The position of the text on the y access, which is constantly updated as more text is added\n\n # -----------------------------------------------------------------------------------#\n # Plot the 'summary' page listing all the tests and the overall results - TEXT ONLY #\n # -----------------------------------------------------------------------------------#\n\n # Create the title of the page\n plot_overall_text(data_obj, yi, xpos, ha, va, fs)\n\n #Plot the overall results text of the first test, Steel Differentiation\n\n\n # Plot the overall results text of the second test, Penetration\n yi = yi - dy[0]\n plot_pen_text(data_obj, 2, yi, xpos, ha, va, fs, dfs)\n\n # Plot the overall results text of the third test, Organic Material Detection\n yi = yi - dy[0]\n plot_BSNR_text(data_obj, 3, yi, xpos, ha, va, fs, dfs)\n\n # Plot the overall results text of the fourth test, Spatial Resolution\n yi = yi - dy[0]\n plot_spatial_text(data_obj, 4, yi, yi - dy[1], xpos, ha, va, fs, dfs)\n yi = yi - dy[1] # Make sure the local yi is updated\n\n # Plot the overall results text of the fifth test, Dynamic Range\n yi = yi - dy[0]\n plot_dyn_text(data_obj, 5, yi, xpos, ha, va, fs, dfs)\n\n # Plot the overall results text of the sixth test, Noise\n yi = yi - dy[0]\n plot_noise_text(data_obj, 6, yi, dy, xpos, ha, va, fs, dfs)\n yi = yi - (dy[1] * 2) # Make sure to update yi, as it was only locally changed in 'plot_noise_text()'\n\n # --------------------------------------------------#\n # Plot the footnotes for the overall results page. #\n # --------------------------------------------------#\n plot_overall_footnotes(xpos, ha, va, fs, dfs, standard=\"ASTM F792\")\n\n\n #---------------------------------------------------------#\n # Plot the cropped and rotated images from the processing #\n #---------------------------------------------------------#\n plot_images(data_obj, fs) # Plot the images to the pdf\n\n plot_image_footnotes(data_obj, xpos, ha, va, fs, dfs) # Add in the footnotes to the pdf\n\n # NOTE: Above image plotting the same, with the same footnotes, for F792???\n\n #-----------------------------#\n # Steel differentiation plots #\n #-----------------------------#\n if title_pages:\n new_title_page(data_obj, \"Test 1: Steel Differentiation\")\n\n #Call the function to plot the Steel Differentiation results to the pdf\n\n\n #-------------------#\n # Penetration plots #\n #-------------------#\n if title_pages:\n new_title_page(data_obj, \"Test 2: Penetration\")\n\n # Call the function to plot the Steel Penetration results to the pdf\n #plot_steel_pen(data_obj, 2)\n\n #------------#\n # BSNR plots #\n #------------#\n if title_pages:\n new_title_page(data_obj, \"Test 3: Organic Material Detection\")\n\n # Call the function to plot the Organic Material Detection results to the pdf\n plot_BSNR(data_obj, 3, cmap)\n\n #--------------------#\n # Spatial Resolution #\n #--------------------#\n if title_pages:\n new_title_page(data_obj, \"Test 4: Spatial Resolution\")\n\n # Call the function to plot the Spatial Resolution results to the pdf\n plot_spatial_res(data_obj, 4)\n\n #---------------#\n # Dynamic Range #\n #---------------#\n if title_pages:\n new_title_page(data_obj, \"Test 5: Dynamic Range\")\n\n # Call the function to plot the Dynamic Range results to the pdf\n plot_dynamic_range(data_obj, 5)\n\n #-------#\n # Noise #\n #-------#\n if title_pages:\n new_title_page(data_obj, \"Test 6: Noise (NEQ)\")\n\n # Call the function to plot the Noise (NEQ) results to the pdf\n plot_noise(data_obj, 6)\n\n fig = new_pdf_page(data_obj.pdf_obj, open_fig=False)", "def main():\n \n data_base = '/local/duman/SIMULATIONS/many_polymers_5/'\n save_base = '/usr/users/iff_th2/duman/RolfData/many_polymers_5'\n \n \n ## Index the data\n# density = [0.08, 0.2, 0.4]\n# xi_L = [0.05, 0.1, 0.2, 1.0, 2.5, 5.0, 8.0, 16.0]\n# Pe = [3.0, 7.0, 15.0, 50.0, 150.0, 500.0, 750.0, 1500.0, 4375.0, 8000.0, 10000.0]\n# kappa = [1.25, 2.5, 5.0, 25.0, 62.5, 125.0, 200.0, 400.0]\n# fp = [0.0, 0.0048, 0.0112, 0.024, 0.08, 0.24, 0.8, 1.2, 2.4, 7.0]\n \n density = [0.2]\n kappa = [1.25, 2.5, 5.0, 25.0, 62.5, 125.0, 200.0, 400.0]\n xi_L = [0.05, 0.1, 0.2, 1.0, 2.5, 5.0, 8.0, 16.0]\n fp = [0.0048, 0.0112, 0.024, 0.08, 0.24, 0.8, 1.2, 2.4, 7.0]\n Pe = [3.0, 7.0, 15.0, 50.0, 150.0, 500.0, 750.0, 1500.0, 4375.0]\n \n ## Create points\n points = []\n for i, x in enumerate(xi_L):\n for j, p in enumerate(Pe):\n points.append( Phase(x, p, kappa[i], fp[j], 'short') ) \n \n for point in points:\n point.analyse_type()\n point.set_plot_props()\n \n long_xil = [0.05, 0.2, 1.0, 2.5, 16.0]\n long_pe = [3.0, 150.0, 750.0, 8000.0, 10000.0]\n long_kappa = [5.0, 20.0, 100.0, 250.0, 1600.0]\n long_fp = [0.0003, 0.015, 0.075, 0.0, 0.0]\n long_points = []\n for i, x in enumerate(long_xil):\n for j, p in enumerate(long_pe):\n long_points.append( Phase(x, p, long_kappa[i], long_fp[j], 'long') ) \n \n for point in long_points:\n point.determine_type()\n point.set_plot_props()\n \n plot_data(points, long_points, save_base, xi_L, Pe)", "def do_data_plots(cat, subdir):\n dla_data.noterdaeme_12_data()\n (l_N, cddf, cddf68, cddf95) = cat.plot_cddf(zmax=5,color=\"blue\")\n np.savetxt(path.join(subdir,\"cddf_all.txt\"), (l_N, cddf, cddf68[:,0], cddf68[:,1], cddf95[:,0],cddf95[:,1]))\n plt.xlim(1e20, 1e23)\n plt.ylim(1e-28, 5e-21)\n plt.legend(loc=0)\n save_figure(path.join(subdir, \"cddf_gp\"))\n plt.clf()\n\n (l_N, cddf, cddf68, cddf95) = cat.plot_cddf(zmax=5,color=\"blue\", moment=True)\n plt.xlim(1e20, 1e23)\n plt.legend(loc=0)\n save_figure(path.join(subdir, \"cddf_moment_gp\"))\n plt.clf()\n\n #Evolution with redshift\n (l_N, cddf, cddf68, cddf95) = cat.plot_cddf(4,5, label=\"4-5\", color=\"brown\")\n np.savetxt(path.join(subdir,\"cddf_z45.txt\"), (l_N, cddf, cddf68[:,0], cddf68[:,1], cddf95[:,0],cddf95[:,1]))\n (l_N, cddf, cddf68, cddf95) = cat.plot_cddf(3,4, label=\"3-4\", color=\"black\")\n np.savetxt(path.join(subdir,\"cddf_z34.txt\"), (l_N, cddf, cddf68[:,0], cddf68[:,1], cddf95[:,0],cddf95[:,1]))\n (l_N, cddf, cddf68, cddf95) = cat.plot_cddf(2.5,3, label=\"2.5-3\", color=\"green\")\n np.savetxt(path.join(subdir,\"cddf_z253.txt\"), (l_N, cddf, cddf68[:,0], cddf68[:,1], cddf95[:,0],cddf95[:,1]))\n (l_N, cddf, cddf68, cddf95) = cat.plot_cddf(2,2.5, label=\"2-2.5\", color=\"blue\")\n np.savetxt(path.join(subdir,\"cddf_z225.txt\"), (l_N, cddf, cddf68[:,0], cddf68[:,1], cddf95[:,0],cddf95[:,1]))\n plt.xlim(1e20, 1e23)\n plt.ylim(1e-28, 5e-21)\n plt.legend(loc=0)\n save_figure(path.join(subdir,\"cddf_zz_gp\"))\n plt.clf()\n\n #dNdX\n dla_data.dndx_not()\n dla_data.dndx_pro()\n (z_cent, dNdX, dndx68, dndx95) = cat.plot_line_density(zmax=5)\n np.savetxt(path.join(subdir,\"dndx_all.txt\"), (z_cent, dNdX, dndx68[:,0],dndx68[:,1], dndx95[:,0],dndx95[:,1]) )\n plt.legend(loc=0)\n plt.ylim(0,0.16)\n save_figure(path.join(subdir,\"dndx_gp\"))\n plt.clf()\n\n #Omega_DLA\n dla_data.omegahi_not()\n dla_data.omegahi_pro()\n dla_data.crighton_omega()\n (z_cent, omega_dla, omega_dla_68, omega_dla_95) = cat.plot_omega_dla(zmax=5)\n# cat.tophat_prior = True\n# cat.plot_omega_dla(zmax=5, label=\"Tophat Prior\", twosigma=False)\n# cat.tophat_prior = False\n np.savetxt(path.join(subdir,\"omega_dla_all.txt\"), (z_cent, omega_dla, omega_dla_68[:,0],omega_dla_68[:,1], omega_dla_95[:,0], omega_dla_95[:,1]))\n plt.legend(loc=0)\n plt.xlim(2,5)\n plt.ylim(0,2.5)\n save_figure(path.join(subdir,\"omega_gp\"))\n plt.clf()", "def table(self):\n\n param=self.x_param\n\n device=self.device\n\n base_params=device.get_params()\n\n data_tot=DataFrame()\n\n for i in range(len(param)):\n\n print_index=1\n\n for name in param.names:\n\n device._set_params(param(i))\n\n device.draw()\n\n df=device.export_all()\n\n if self.labels_bottom is not None:\n\n index=self.labels_bottom[i]\n\n else:\n\n index=str(i)\n\n print(\"Generating table, item {} of {}\\r\".format(print_index,len(param)),end=\"\")\n\n data_tot=data_tot.append(Series(df,name=index))\n\n device._set_params(base_params)\n\n return data_tot", "def main(x_axis, y_axis, filtered, unfiltered, name, histogram, total, true_max):\n axes = [x_axis, y_axis, 'description']\n uf_dict, f_dict, min_x, max_x, min_y, max_y = data_from_sc_file(axes, filtered, unfiltered, true_max)\n gen_plots(uf_dict, f_dict, min_x, max_x, min_y, max_y, axes, name, histogram, total)", "def save_plots(plot_dict, filename) :\n outfile = ROOT.TFile(filename, \"RECREATE\")\n\n for key in sorted(plot_dict) :\n if type( plot_dict[key] ) is types.DictType :\n directory = outfile.mkdir( key )\n directory.cd()\n save_plot( plot_dict[key], directory )\n outfile.cd()\n elif plot_dict[key] is not None :\n plot_dict[key].Write()\n \n outfile.Close()", "def setup(self):\n self.table = prettytable.PrettyTable()\n self.table.field_names = self.titles\n if self.convert_columns:\n self.rows = self.convert_columns_to_rows(self.rows)\n if self.colour:\n self.colour = self.convert_columns_to_rows(self.colour)", "def _create_ts_plots(ts_agent_list, output_directory):\n\n # create traces for plots\n makespans_traces, makespans_layout, \\\n nh_sizes_traces, nh_sizes_layout, \\\n tl_sizes_traces, tl_sizes_layout = _make_ts_traces(ts_agent_list)\n\n # create plots\n plot(dict(data=makespans_traces, layout=makespans_layout),\n filename=str(output_directory / 'ts_makespans.html'),\n auto_open=False)\n plot(dict(data=nh_sizes_traces, layout=nh_sizes_layout),\n filename=str(output_directory / 'neighborhood_sizes.html'),\n auto_open=False)\n plot(dict(data=tl_sizes_traces, layout=tl_sizes_layout),\n filename=str(output_directory / 'tabu_list_sizes.html'),\n auto_open=False)\n\n # create schedule\n best_solution = min([ts_agent.best_solution for ts_agent in ts_agent_list])\n best_solution.create_schedule_xlsx_file(str(output_directory / 'ts_schedule'), continuous=True)\n best_solution.create_gantt_chart_html_file(str(output_directory / 'ts_gantt_chart.html'), continuous=True)", "def _create_layout(root_dir, subsets):\n _create_folder(os.path.join(root_dir, \"images\"))\n _create_folder(os.path.join(root_dir, \"labels\"))\n\n for subset in subsets:\n _create_folder(os.path.join(root_dir, \"images\", subset))\n _create_folder(os.path.join(root_dir, \"labels\", subset))", "def do_htt_plots(tree, output_dir, cut=''):\n for logz in [True, False]:\n make_2d_plot(tree, 'httRef', HTT_REF_STR, NB_HTT, HTT_MIN, HTT_MAX, 'httL1', HTT_L1_STR, NB_HTT, HTT_MIN, HTT_MAX,\n os.path.join(output_dir, 'httRef_httL1.pdf'), logz=logz, normx=False,\n cut=cut, title=TITLE, diagonal_line=True)\n for normx in [True, False]:\n make_2d_plot(tree, 'httL1', HTT_L1_STR, NB_HTT, HTT_MIN, HTT_MAX, 'httL1/httRef', HTT_RATIO_STR, NB_HTT_RATIO, HTT_RATIO_MIN, HTT_RATIO_MAX,\n os.path.join(output_dir, 'httRatio_httL1.pdf'), logz=logz, normx=normx,\n cut=cut, title=TITLE, horizontal_line=True)\n make_2d_plot(tree, 'httRef', HTT_REF_STR, NB_HTT, HTT_MIN, HTT_MAX, 'httL1/httRef', HTT_RATIO_STR, NB_HTT_RATIO, HTT_RATIO_MIN, HTT_RATIO_MAX,\n os.path.join(output_dir, 'httRatio_httRef.pdf'), logz=logz, normx=normx,\n cut=cut, title=TITLE, horizontal_line=True)\n\n make_2d_plot(tree, 'httL1', HTT_L1_STR, NB_HTT, HTT_MIN, HTT_MAX, 'httL1-httRef', HTT_DIFF_STR, NB_HTT_DIFF, HTT_DIFF_MIN, HTT_DIFF_MAX,\n os.path.join(output_dir, 'httDiff_httL1.pdf'), logz=logz, normx=normx,\n cut=cut, title=TITLE, horizontal_line=True)\n make_2d_plot(tree, 'httRef', HTT_REF_STR, NB_HTT, HTT_MIN, HTT_MAX, 'httL1-httRef', HTT_DIFF_STR, NB_HTT_DIFF, HTT_DIFF_MIN, HTT_DIFF_MAX,\n os.path.join(output_dir, 'httDiff_httRef.pdf'), logz=logz, normx=normx,\n cut=cut, title=TITLE, horizontal_line=True)\n make_2d_plot(tree, 'httL1/httRef', HTT_RATIO_STR, NB_HTT_RATIO, HTT_RATIO_MIN, HTT_RATIO_MAX,\n 'httL1-httRef', HTT_DIFF_STR, NB_HTT_DIFF, HTT_DIFF_MIN, HTT_DIFF_MAX,\n os.path.join(output_dir, 'httDiff_httRatio.pdf'), logz=logz, normx=normx,\n cut=cut, title=TITLE, horizontal_line=True)", "def make_plots():\n prep = DataPrep(filepath='/home/ubuntu/ca_bills_project/data/extra/topic_intro_data_05-23-17-08-23.csv')\n prep.prepare()\n k = 100\n trained_model_file = \"/home/ubuntu/ca_bills_project/data/extra/intro_model_100_topics_rf_10000trees.pkl\"\n with open(trained_model_file) as p:\n model = pickle.load(p)\n print \"loaded model\"\n features = [u'days_since_start', u'session_type', u'party_ALL_DEM', u'party_ALL_REP',\n u'party_BOTH', 'party_COM', u'urgency_No', u'urgency_Yes',\n u'taxlevy_No',\n u'taxlevy_Yes']\n topic_features = [\"topic_\"+str(x) for x in range(k)]\n features += topic_features\n X_train, y_train = prep.subset(features)\n feature_importance(model, features)\n feature_subset_indices = [73, 13]\n gb_file = \"/home/ubuntu/ca_bills_project/data/extra/intro_model_100_topics_gb.pkl\"\n with open(gb_file) as p:\n gb = pickle.load(p)\n make_partial_dependence(gb, X_train, y_train, features, feature_subset_indices)", "def genFluxTable(self, fname=\"1dFEhdfoutput.h5\"):\n h5data = {}\n for g in range(10):\n plotData = np.array([self.nodes[:, 1], self.angleIntFlux[g]])\n plotData = plotData[:, np.argsort(plotData[0])]\n h5data[\"mesh\" + str(g)] = plotData[0]\n h5data[\"groupFlx\" + str(g)] = plotData[1]\n h5d.writeToHdf5(h5data, fname)", "def generate_figures_and_xls_all_strains(outdir, cols_starts, region2data, ext, xls, group2pos, feature_names, samples):\n all_freqs = []\n # concatenate all pos and samples into one dataframe\n dframes = []\n for ri, (ref, pos) in enumerate(region2data.keys()): #regions): #[3]#; print(ref, pos, mt)\n mer, calls = region2data[(ref, pos)]\n for c, s in zip(calls, samples): \n df = pd.DataFrame(c, columns=feature_names)\n df[\"Strain\"] = s\n df[\"chr_pos\"] = \"%s:%s\"%(ref, pos)\n dframes.append(df)\n # read all tsv files\n df = pd.concat(dframes).dropna().reset_index()\n chr_pos, strains = df[\"chr_pos\"].unique(), df[\"Strain\"].unique() \n # compare individual methods\n for clf, method in (\n (KMeans(n_clusters=2), \"KMeans\"), \n (KNeighborsClassifier(), \"KNN\"), \n #(iso_new.iForest(ntrees=100, random_state=0), \"GMM+eIF\"), \n (GaussianMixture(random_state=0, n_components=2), \"GMM\"), \n (AgglomerativeClustering(n_clusters=2), \"AggClust\"), \n #(OneClassSVM(), \"OCSVM\"), \n (IsolationForest(random_state=0), \"IF\"), \n #(iso_new.iForest(ntrees=100, random_state=0), \"eIF\"), \n (RandomForestClassifier(), \"RF\"), \n ):\n fname = method\n for i, cols_start in enumerate(cols_starts, 1):\n results = []\n feat_name = \"_\".join(cols_start)\n fname = \"%s.%s\"%(method, feat_name); print(fname)\n outfn = os.path.join(outdir, \"%s.%s\"%(fname, ext))\n # narrow down the features to only signal intensity & trace\n cols = list(filter(lambda n: n.startswith(cols_start), feature_names))#; print(cols) #, \"DT\"\n # compare all samples to 0%\n s0 = samples[0]\n for s in samples[3:]: \n with np.errstate(under='ignore'):\n if \"+\" in method:\n clf2_name = method.split(\"+\")[-1]\n results += get_mod_freq_two_step(df, cols, chr_pos, [s0, s], feat_name, \n OFFSET=0.5, clf2_name=clf2_name, clf2=clf)\n elif method in (\"KNN\", \"RF\"):\n results += get_mod_freq_clf_train_test(df, cols, chr_pos, [s0, s], samples[1:3], clf, feat_name)\n else:\n results += get_mod_freq_clf(df, cols, chr_pos, [s0, s], clf, feat_name)\n \n # and store mod_freq predicted by various methods\n freqs = pd.DataFrame(results, columns=[\"chr_pos\", \"features\", \"mod_freq wt\", \"mod_freq strain\", \"strain\"])\n freqs[\"diff\"] = freqs.max(axis=1)-freqs.min(axis=1); freqs\n for name, pos in group2pos.items(): #((\"negative\", negatives), (\"pU\", pU_pos), (\"Nm\", Nm_pos)):\n freqs.loc[freqs[\"chr_pos\"].isin(pos), \"group\"] = name\n #freqs.to_csv(outfn, sep=\"\\t\"); freqs.head()\n freqs.to_excel(xls, fname, index=False)\n # plot differences between methods\n for group, pos in group2pos.items():\n freqs.loc[freqs[\"chr_pos\"].isin(pos), \"modification\"] = group\n #return freqs\n fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(15, 5))#, sharey=\"all\")\n sns.barplot(x=\"chr_pos\", y=\"mod_freq strain\", hue=\"strain\", edgecolor=\"white\", palette=[\"#f8786fff\", \"#7aae02ff\", \"#00bfc2ff\", \"#c67afeff\"], \n data=freqs[(freqs[\"features\"]==feat_name)&(freqs[\"group\"]==\"pU\")], ax=ax1)\n sns.barplot(x=\"chr_pos\", y=\"mod_freq strain\", hue=\"strain\", edgecolor=\"white\", palette=[\"#ed823aff\", \"#1c6ca9ff\", \"#35d1bbff\", \"#c978fdff\"], \n data=freqs[(freqs[\"features\"]==feat_name)&(freqs[\"group\"]==\"Nm\")], ax=ax2)\n ax1.set_ylabel(\"Per-site stoichiometry\"); ax2.set_ylabel(\"\")\n ax1.get_legend().remove(); ax2.get_legend().remove()#ax1.legend([]); ax2.legend([])\n ax1.set_ylim(0, 1); ax2.set_ylim(0, 1); #ax2.set(aspect=1.7)\n ax1.set_title(\"pU modifications\"); ax2.set_title(\"Nm modifications\")\n fig.suptitle(fname)\n fig.savefig(outfn)\n plt.close() # clear axis\n freqs[\"name\"] = fname\n all_freqs.append(freqs)\n return all_freqs", "def setUp(self):\r\n self.output_dir = '/tmp/'\r\n\r\n otu_table_vals = array([[0, 0], [1, 5]])\r\n\r\n self.otu_table = table_factory(otu_table_vals,\r\n ['Sample1', 'Sample2'],\r\n ['OTU1', 'OTU2'],\r\n [None, None],\r\n [{\"taxonomy\": [\"Bacteria\"]},\r\n {\"taxonomy\": [\"Archaea\"]}])\r\n\r\n filt_otu_table_vals = array([[1, 5]])\r\n\r\n self.filt_otu_table = table_factory(filt_otu_table_vals,\r\n ['Sample1', 'Sample2'],\r\n ['OTU2'],\r\n [None, None],\r\n [{\"taxonomy\": [\"Archaea\"]}])\r\n\r\n self.num_otu_hits = 5\r\n self._folders_to_cleanup = []", "def format_gnuplot(tds):\n if tds['type'] == '%':\n fname = \"Occupancy-By-Tile\"\n xlabel = \"% Occupied\"\n ylabel = \"% Pass Filter\"\n xrange = yrange = 100.0\n plot = [\"pct_occup\", \"pct_pf\"]\n else:\n fname = \"Cluster-Density-By-Tile\"\n xlabel = \"Cluster Density (kclusters/mm^2)\"\n ylabel = \"PF Density (kclusters/mm^2)\"\n xrange = yrange = tds['density_max']\n plot = [\"density_k\", \"pf_density_k\"]\n\n\n gp_lines = [ \"# generated by {}\".format(' '.join(sys.argv)),\n \"set terminal pngcairo enhanced font 'sans,10'\",\n \"set output '{}.png'\".format(fname),\n \"set title '{}'\".format(tds['fcid']),\n \"set xrange [0 : {} ]\".format(xrange),\n \"set yrange [0 : {} ]\".format(yrange),\n \"set xlabel '{}'\".format(xlabel),\n \"set ylabel '{}'\".format(ylabel),\n \"set boxwidth 0.3\",\n \"set key outside right top\",\n \"set grid\",\n \"set datafile separator ','\",\n ]\n\n # Add a line to show x=y, because no point should ever be above this line\n gp_lines.extend([\"set style arrow 2 nohead linewidth 1.0 dashtype 3\",\n \"set arrow from 0,0 rto {},{} as 2 lc 'grey'\".format(xrange, yrange)])\n\n # Add lines (using headless arrows) to show the means. And make them dotted (supported by pngcairo)\n # Taking means of percentages is OK here as all the tiles are the same size.\n gp_lines.extend([\"set style arrow 1 nohead linewidth 1.2 dashtype 2\"])\n for n, l in enumerate(tds['lanes']):\n mean_x = mean([atile[plot[0]] for atile in tds[l]])\n mean_y = mean([atile[plot[1]] for atile in tds[l]])\n\n gp_lines.append(\"set arrow from {},0 rto 0,{} as 1 lc {} front\".format(mean_x, yrange, n+1))\n gp_lines.append(\"set arrow from 0,{} rto {},0 as 1 lc {} front\".format(mean_y, xrange, n+1))\n\n # Now plot the actual datas. Start by declaring all tiles to plot on one line.\n gp_lines.extend([\"set style fill transparent solid 0.1 border\",\n \"set style circle radius graph 0.01 noclip\"])\n plot_cmds = [ \"'-' title '{}' with circles\".format(l)\n for l in tds['lanes'] ]\n gp_lines.append(\"plot\" + (\" ,\".join(plot_cmds)))\n\n # Now the points, one tile per line, each lane terminated with an 'e'\n for l in tds['lanes']:\n laneres = tds[l]\n\n for atile in laneres:\n gp_lines.append(\"{},{}\".format(*[atile[p] for p in plot]))\n gp_lines.append('e')\n\n return gp_lines", "def make_table_file(lines, labels, dir_path, filename):\r\n lines.sort()\r\n lines.insert(0, '\\t'.join(labels))\r\n\r\n output = open(os.path.join(dir_path, filename), 'w')\r\n output.write('\\n'.join(lines))\r\n output.close()", "def _generate_all_charts(spec, input_data):\n\n def _generate_chart(_, data_q, graph):\n \"\"\"Generates the chart.\n \"\"\"\n\n logs = list()\n\n logging.info(\" Generating the chart '{0}' ...\".\n format(graph.get(\"title\", \"\")))\n logs.append((\"INFO\", \" Generating the chart '{0}' ...\".\n format(graph.get(\"title\", \"\"))))\n\n job_name = graph[\"data\"].keys()[0]\n\n csv_tbl = list()\n res = list()\n\n # Transform the data\n logs.append((\"INFO\", \" Creating the data set for the {0} '{1}'.\".\n format(graph.get(\"type\", \"\"), graph.get(\"title\", \"\"))))\n data = input_data.filter_data(graph, continue_on_error=True)\n if data is None:\n logging.error(\"No data.\")\n return\n\n chart_data = dict()\n chart_tags = dict()\n for job, job_data in data.iteritems():\n if job != job_name:\n continue\n for index, bld in job_data.items():\n for test_name, test in bld.items():\n if chart_data.get(test_name, None) is None:\n chart_data[test_name] = OrderedDict()\n try:\n chart_data[test_name][int(index)] = \\\n test[\"result\"][\"receive-rate\"]\n chart_tags[test_name] = test.get(\"tags\", None)\n except (KeyError, TypeError):\n pass\n\n # Add items to the csv table:\n for tst_name, tst_data in chart_data.items():\n tst_lst = list()\n for bld in builds_dict[job_name]:\n itm = tst_data.get(int(bld), '')\n if not isinstance(itm, str):\n itm = itm.avg\n tst_lst.append(str(itm))\n csv_tbl.append(\"{0},\".format(tst_name) + \",\".join(tst_lst) + '\\n')\n\n # Generate traces:\n traces = list()\n index = 0\n groups = graph.get(\"groups\", None)\n visibility = list()\n\n if groups:\n for group in groups:\n visible = list()\n for tag in group:\n for test_name, test_data in chart_data.items():\n if not test_data:\n logs.append((\"WARNING\",\n \"No data for the test '{0}'\".\n format(test_name)))\n continue\n if tag in chart_tags[test_name]:\n message = \"index: {index}, test: {test}\".format(\n index=index, test=test_name)\n test_name = test_name.split('.')[-1]\n try:\n trace, rslt = _generate_trending_traces(\n test_data,\n job_name=job_name,\n build_info=build_info,\n name='-'.join(test_name.split('-')[2:-1]),\n color=COLORS[index])\n except IndexError:\n message = \"Out of colors: {}\".format(message)\n logs.append((\"ERROR\", message))\n logging.error(message)\n index += 1\n continue\n traces.extend(trace)\n visible.extend([True for _ in range(len(trace))])\n res.append(rslt)\n index += 1\n break\n visibility.append(visible)\n else:\n for test_name, test_data in chart_data.items():\n if not test_data:\n logs.append((\"WARNING\", \"No data for the test '{0}'\".\n format(test_name)))\n continue\n message = \"index: {index}, test: {test}\".format(\n index=index, test=test_name)\n test_name = test_name.split('.')[-1]\n try:\n trace, rslt = _generate_trending_traces(\n test_data,\n job_name=job_name,\n build_info=build_info,\n name='-'.join(test_name.split('-')[2:-1]),\n color=COLORS[index])\n except IndexError:\n message = \"Out of colors: {}\".format(message)\n logs.append((\"ERROR\", message))\n logging.error(message)\n index += 1\n continue\n traces.extend(trace)\n res.append(rslt)\n index += 1\n\n if traces:\n # Generate the chart:\n try:\n layout = deepcopy(graph[\"layout\"])\n except KeyError as err:\n logging.error(\"Finished with error: No layout defined\")\n logging.error(repr(err))\n return\n if groups:\n show = list()\n for i in range(len(visibility)):\n visible = list()\n for r in range(len(visibility)):\n for _ in range(len(visibility[r])):\n visible.append(i == r)\n show.append(visible)\n\n buttons = list()\n buttons.append(dict(\n label=\"All\",\n method=\"update\",\n args=[{\"visible\": [True for _ in range(len(show[0]))]}, ]\n ))\n for i in range(len(groups)):\n try:\n label = graph[\"group-names\"][i]\n except (IndexError, KeyError):\n label = \"Group {num}\".format(num=i + 1)\n buttons.append(dict(\n label=label,\n method=\"update\",\n args=[{\"visible\": show[i]}, ]\n ))\n\n layout['updatemenus'] = list([\n dict(\n active=0,\n type=\"dropdown\",\n direction=\"down\",\n xanchor=\"left\",\n yanchor=\"bottom\",\n x=-0.12,\n y=1.0,\n buttons=buttons\n )\n ])\n\n name_file = \"{0}-{1}{2}\".format(spec.cpta[\"output-file\"],\n graph[\"output-file-name\"],\n spec.cpta[\"output-file-type\"])\n\n logs.append((\"INFO\", \" Writing the file '{0}' ...\".\n format(name_file)))\n plpl = plgo.Figure(data=traces, layout=layout)\n try:\n ploff.plot(plpl, show_link=False, auto_open=False,\n filename=name_file)\n except plerr.PlotlyEmptyDataError:\n logs.append((\"WARNING\", \"No data for the plot. Skipped.\"))\n\n data_out = {\n \"job_name\": job_name,\n \"csv_table\": csv_tbl,\n \"results\": res,\n \"logs\": logs\n }\n data_q.put(data_out)\n\n builds_dict = dict()\n for job in spec.input[\"builds\"].keys():\n if builds_dict.get(job, None) is None:\n builds_dict[job] = list()\n for build in spec.input[\"builds\"][job]:\n status = build[\"status\"]\n if status != \"failed\" and status != \"not found\" and \\\n status != \"removed\":\n builds_dict[job].append(str(build[\"build\"]))\n\n # Create \"build ID\": \"date\" dict:\n build_info = dict()\n tb_tbl = spec.environment.get(\"testbeds\", None)\n for job_name, job_data in builds_dict.items():\n if build_info.get(job_name, None) is None:\n build_info[job_name] = OrderedDict()\n for build in job_data:\n testbed = \"\"\n tb_ip = input_data.metadata(job_name, build).get(\"testbed\", \"\")\n if tb_ip and tb_tbl:\n testbed = tb_tbl.get(tb_ip, \"\")\n build_info[job_name][build] = (\n input_data.metadata(job_name, build).get(\"generated\", \"\"),\n input_data.metadata(job_name, build).get(\"version\", \"\"),\n testbed\n )\n\n work_queue = multiprocessing.JoinableQueue()\n manager = multiprocessing.Manager()\n data_queue = manager.Queue()\n cpus = multiprocessing.cpu_count()\n\n workers = list()\n for cpu in range(cpus):\n worker = Worker(work_queue,\n data_queue,\n _generate_chart)\n worker.daemon = True\n worker.start()\n workers.append(worker)\n os.system(\"taskset -p -c {0} {1} > /dev/null 2>&1\".\n format(cpu, worker.pid))\n\n for chart in spec.cpta[\"plots\"]:\n work_queue.put((chart, ))\n work_queue.join()\n\n anomaly_classifications = list()\n\n # Create the header:\n csv_tables = dict()\n for job_name in builds_dict.keys():\n if csv_tables.get(job_name, None) is None:\n csv_tables[job_name] = list()\n header = \"Build Number:,\" + \",\".join(builds_dict[job_name]) + '\\n'\n csv_tables[job_name].append(header)\n build_dates = [x[0] for x in build_info[job_name].values()]\n header = \"Build Date:,\" + \",\".join(build_dates) + '\\n'\n csv_tables[job_name].append(header)\n versions = [x[1] for x in build_info[job_name].values()]\n header = \"Version:,\" + \",\".join(versions) + '\\n'\n csv_tables[job_name].append(header)\n\n while not data_queue.empty():\n result = data_queue.get()\n\n anomaly_classifications.extend(result[\"results\"])\n csv_tables[result[\"job_name\"]].extend(result[\"csv_table\"])\n\n for item in result[\"logs\"]:\n if item[0] == \"INFO\":\n logging.info(item[1])\n elif item[0] == \"ERROR\":\n logging.error(item[1])\n elif item[0] == \"DEBUG\":\n logging.debug(item[1])\n elif item[0] == \"CRITICAL\":\n logging.critical(item[1])\n elif item[0] == \"WARNING\":\n logging.warning(item[1])\n\n del data_queue\n\n # Terminate all workers\n for worker in workers:\n worker.terminate()\n worker.join()\n\n # Write the tables:\n for job_name, csv_table in csv_tables.items():\n file_name = spec.cpta[\"output-file\"] + \"-\" + job_name + \"-trending\"\n with open(\"{0}.csv\".format(file_name), 'w') as file_handler:\n file_handler.writelines(csv_table)\n\n txt_table = None\n with open(\"{0}.csv\".format(file_name), 'rb') as csv_file:\n csv_content = csv.reader(csv_file, delimiter=',', quotechar='\"')\n line_nr = 0\n for row in csv_content:\n if txt_table is None:\n txt_table = prettytable.PrettyTable(row)\n else:\n if line_nr > 1:\n for idx, item in enumerate(row):\n try:\n row[idx] = str(round(float(item) / 1000000, 2))\n except ValueError:\n pass\n try:\n txt_table.add_row(row)\n except Exception as err:\n logging.warning(\"Error occurred while generating TXT \"\n \"table:\\n{0}\".format(err))\n line_nr += 1\n txt_table.align[\"Build Number:\"] = \"l\"\n with open(\"{0}.txt\".format(file_name), \"w\") as txt_file:\n txt_file.write(str(txt_table))\n\n # Evaluate result:\n if anomaly_classifications:\n result = \"PASS\"\n for classification in anomaly_classifications:\n if classification == \"regression\" or classification == \"outlier\":\n result = \"FAIL\"\n break\n else:\n result = \"FAIL\"\n\n logging.info(\"Partial results: {0}\".format(anomaly_classifications))\n logging.info(\"Result: {0}\".format(result))\n\n return result", "def generate_2D_N_plots(x, y, labels_dict, file_title, plot_title, no_start):\n\n fig = plt.figure()\n\n for sub_y in y:\n sub_y = sub_y[no_start]\n plt.plot(x, sub_y)\n\n if labels_dict:\n plt.xlabel(labels_dict[\"x\"])\n plt.ylabel(labels_dict[\"y\"])\n if plot_title:\n plt.title(plot_title)\n\n plt.savefig(file_title)", "def multi_plot(self, plottitle, gof_fileroot, indir, outdir,\n legends, num_stations):\n # Pick components and labels\n xtick_loc = XTICK_LOC_0_01\n xtick_label = XTICK_LABEL_0_01\n\n # Initialize data arrays\n periods = [[] for _ in range(len(gof_fileroot))]\n bias = [[] for _ in range(len(gof_fileroot))]\n m90 = [[] for _ in range(len(gof_fileroot))]\n p90 = [[] for _ in range(len(gof_fileroot))]\n sigma = [[] for _ in range(len(gof_fileroot))]\n sigma0 = [[] for _ in range(len(gof_fileroot))]\n\n bias_l = [[] for _ in range(len(gof_fileroot))]\n bias_h = [[] for _ in range(len(gof_fileroot))]\n conf_l = [[] for _ in range(len(gof_fileroot))]\n conf_h = [[] for _ in range(len(gof_fileroot))]\n\n # Read data from all input files\n for compnum in xrange(0, len(gof_fileroot)):\n filenamebase = os.path.join(indir, gof_fileroot[compnum])\n #print(\"Reading component files %s.*\" % (filenamebase))\n periods[compnum], bias[compnum] = self.read_data(\"%s.bias\" %\n (filenamebase),\n 0.01)\n periods[compnum], m90[compnum] = self.read_data(\"%s.m90\" %\n (filenamebase),\n 0.01)\n periods[compnum], p90[compnum] = self.read_data(\"%s.p90\" %\n (filenamebase),\n 0.01)\n periods[compnum], sigma[compnum] = self.read_data(\"%s.sigma\" %\n (filenamebase),\n 0.01)\n periods[compnum], sigma0[compnum] = self.read_data(\"%s.sigma0\" %\n (filenamebase),\n 0.01)\n\n # Compute bias and conf interval lower/upper bounds\n for i in xrange(0, len(bias[compnum])):\n bias_l[compnum].append(bias[compnum][i] - sigma0[compnum][i])\n bias_h[compnum].append(bias[compnum][i] + sigma0[compnum][i])\n conf_l[compnum].append(m90[compnum][i])\n conf_h[compnum].append(p90[compnum][i])\n\n num_periods = len(periods[0])\n for comp in periods:\n if len(comp) != num_periods:\n print(\"Number of data points unequal across components\")\n return\n\n # Construct baseline\n baseline = [0.0 for _ in periods[0]]\n\n # Find max, min values\n min_x = min([min(comp) for comp in periods])\n max_x = max([max(comp) for comp in periods])\n min_y = MIN_Y_AXIS\n max_y = MAX_Y_AXIS\n\n # Start plots\n num_plots = len(gof_fileroot)\n # Make 1 column of \"num_plots\" rows\n fig, axs = pylab.plt.subplots(num_plots, 1)\n # Set plot dims\n fig.set_size_inches(6, 10)\n\n #subplots_adjust(left=0.125)\n #subplots_adjust(right=0.9)\n #subplots_adjust(top=0.85)\n fig.subplots_adjust(hspace=0.4)\n fig.subplots_adjust(wspace=0.5)\n\n # Add subplots in a list\n subfigs = []\n for idx in range(0, num_plots):\n subfigs.append(axs[idx])\n\n # Now walk through each subfig\n for (subfig, subplot_title, cur_period,\n cur_bias, cur_bias_h, cur_bias_l,\n cur_conf_h, cur_conf_l) in zip(subfigs,\n legends,\n periods,\n bias,\n bias_h,\n bias_l,\n conf_h,\n conf_l):\n subfig.set_xlim(min_x, max_x)\n subfig.set_ylim(min_y, max_y)\n subfig.set_title(\"%s\" % subplot_title, size=10)\n subfig.plot(cur_period, cur_bias, color='red', label='_nolegend_')\n subfig.fill_between(cur_period, cur_bias_h,\n cur_bias_l, color='cyan',\n label='_nolegend_')\n subfig.fill_between(cur_period, cur_conf_h,\n cur_conf_l, color='yellow',\n label='_nolegend_')\n subfig.plot(cur_period, baseline, color='grey',\n label='_nolegend_')\n # Only put xlabel on bottom plot\n if legends.index(subplot_title) == len(gof_fileroot) - 1:\n subfig.set_xlabel(\"Period (sec)\", size=8)\n subfig.set_ylabel(\"ln (data/model)\", size=8)\n subfig.set_xscale('log')\n # Old way to do it\n # subfig.set_xticks(xtick_loc, xtick_label)\n subfig.set_xticks(xtick_loc)\n subfig.set_xticklabels(xtick_label)\n subfig.tick_params(labelsize=8)\n subfig.minorticks_on()\n\n fig.suptitle('%s\\nNumber of stations: %d' % (plottitle, num_stations),\n size=12)\n # Figure out output filename\n outfile = gof_fileroot[0]\n outfile = outfile[:outfile.rfind(\"-\")]\n outfile = os.path.join(outdir, \"gof-%s.png\" % (outfile))\n print(\"==> Created GoF plot: %s\" % (outfile))\n fig.savefig(outfile, format=\"png\",\n transparent=False, dpi=plot_config.dpi)\n pylab.close()", "def create_tables(self, curs, splat_table, mcl_table):\n\t\tcurs.execute(\"create table %s(\\\n\t\t\t\t\tsplat_id\t\tserial primary key,\\\n\t\t\t\t\tno_of_edges\tinteger,\\\n\t\t\t\t\trecurrence_pattern\tbit varying(200),\\\n\t\t\t\t\trecurrence_array\tfloat[],\\\n\t\t\t\t\tedge_set\tinteger[][],\\\n\t\t\t\t\tconnectivity\tfloat,\\\n\t\t\t\t\tsplat_id_src\tinteger)\"%splat_table)\t#splat_id_src stores the parent splat_id\n\t\t\t\t\t\n\t\tcurs.execute(\"create table %s(\\\n\t\t\tmcl_id\tserial primary key,\\\n\t\t\tsplat_id\tinteger,\\\n\t\t\tvertex_set\tinteger[],\\\n\t\t\tparameter\tvarchar,\\\n\t\t\tconnectivity\tfloat,\\\n\t\t\tp_value_min\tfloat,\\\n\t\t\tgo_no_vector\tinteger[],\\\n\t\t\tunknown_gene_ratio\tfloat,\\\n\t\t\trecurrence_array\tfloat[])\"%mcl_table)", "def __init__(self, tableFilename):\n self.h5file = h5py.File(tableFilename, 'r')\n self.tableShape = numpy.shape(self.h5file['logpress'])\n self.energy_shift = self.h5file['energy_shift'][0]\n #print self.energy_shift\n #Determine the ordering of independent variable axes by identifying with\n # the number of points for that indVar axis\n newOrdering = [None for _ in self.indVars]\n for indVar in self.indVars:\n key = 'points' + indVar.split('log')[-1]\n points = self.h5file[key][0]\n for ithAxis, ithAxesPoints in enumerate(self.tableShape):\n if ithAxesPoints == points:\n newOrdering[ithAxis] = indVar\n break\n self.indVars = tuple(newOrdering)\n self.tableShapeDict = dict([(indVar, self.tableShape[i])\n for i, indVar in enumerate(self.indVars)])", "def init_plot_output(self):\n csvoutfn = Path(self.plot_data_file)\n csvoutfn.unlink(missing_ok=True)\n\n with open(self.plot_data_file, 'a') as f:\n f.write('iter,bestmeangoal,bestallgoal,')\n for i, k in enumerate(list(self.theta0.keys())):\n if i < len(self.theta0) - 1:\n f.write(f'{k},')\n else:\n f.write(f'{k}\\n')", "def defineTABLESECTION(f,layernamelist):\r\n \r\n layercolordict={}\r\n for layername in layernamelist:\r\n t=random.randint(10,17)\r\n layercolordict[layername]=random.randrange(10+t,240+t,10)\r\n \r\n layercolordict[\"Outline\"]=1\r\n layercolordict[\"Mark\"]=5\r\n layercolordict[\"Cutline\"]=2\r\n \r\n f.write(\"0\\nSECTION\\n2\\nTABLES\\n0\\nTABLE\\n2\\nLAYER\\n70\\n2\\n\") \r\n for layername in layernamelist:\r\n f.write(\"0\\nLAYER\\n2\\n\"+layername+\"\\n70\\n0\\n62\\n\"+str(layercolordict[layername])+\"\\n6\\nCONTINUOUS\\n\")\r\n f.write(\"0\\nENDTAB\\n0\\nENDSEC\\n\")", "def __init__(self, plot_factory, df, title=\"Graph\"):\n super().__init__(title=title)\n self.Table = Table(plot_factory, df, \"Show Table\")\n self.totalButtons = 10\n self.plot_factory = plot_factory\n self.df = df\n self.IdTitlePair = [\"id\", \"title\"]", "def generate_plots(type_, data, name, plots_location):\n plt.cla()\n plt.clf()\n plot_type = getattr(sns, type_)\n plot_ = plot_type(data)\n fig = plot_.get_figure()\n fig.savefig('{}/{}_{}.png'.format(plots_location, name, type_))", "def build(self):\r\n self.dirty = 0\r\n \r\n # Files first\r\n for output in self.files.keys():\r\n params = self.files[output]\r\n if (params[1] != -1):\r\n filename = params[0]\r\n freq = params[1]\r\n if (output == 'energies'):\r\n self.myOutputs.append(OutputEnergies.OutputEnergies(filename, freq, 1,0,1.0,0))\r\n elif (output == 'dcdtrajpos'):\r\n if (os.path.exists(filename)): # Continue\r\n self.myOutputs.append(OutputDCDTrajectory.OutputDCDTrajectory(filename, freq, 1, 1))\r\n else: # Overwrite\r\n self.myOutputs.append(OutputDCDTrajectory.OutputDCDTrajectory(filename, freq, 1, 0))\r\n elif (output == 'dcdtrajvel'):\r\n if (os.path.exists(filename)):\r\n self.myOutputs.append(OutputDCDTrajectoryVel.OutputDCDTrajectoryVel(filename, freq, 1, 1))\r\n else:\r\n self.myOutputs.append(OutputDCDTrajectoryVel.OutputDCDTrajectoryVel(filename, freq, 1, 0))\r\n elif (output == 'xyztrajforce'):\r\n self.myOutputs.append(OutputXYZTrajectoryForce.OutputXYZTrajectoryForce(filename, freq))\r\n elif (output == 'xyztrajpos'):\r\n self.myOutputs.append(OutputXYZTrajectoryPos.OutputXYZTrajectoryPos(filename, freq, 1))\r\n elif (output == 'xyztrajvel'):\r\n self.myOutputs.append(OutputXYZTrajectoryVel.OutputXYZTrajectoryVel(filename, freq))\r\n elif (output == 'gui'):\r\n self.myOutputs.append(OutputFAHGUI.OutputFAHGUI(filename, freq, 52753, 1, \"MDL_3.0\", 0.0, 0))\r\n\r\n if (self.screen != -1):\r\n self.myOutputs.append(OutputScreen.OutputScreen(self.screen))\r\n\r\n\r\n # Now plots\r\n for plot in self.plots.keys():\r\n freq = self.plots[plot]\r\n if (freq != -1):\r\n\r\n # Initialize a plot\r\n if (not self.doMPL): # Gnuplot\r\n self.xyData[plot] = []\r\n self.graphs[plot] = Gnuplot(debug=0)\r\n else: # Matplotlib\r\n self.xData[plot] = []\r\n self.yData[plot] = []\r\n self.figures[plot] = 0\r\n\r\n # Add the function to plot the data,\r\n # and the frequency at which to execute it\r\n self.myPlots.append([self.plotFunctions[plot], freq])", "def __init__(self, plot_factory, df, title=\"Table\"):\n super().__init__(title=title)\n self.plot_factory = plot_factory\n self.df = df", "def create_all_tables(self):\n pass", "def generateHtml(self):\n # only the master processor needs to do this\n if not self.master: return\n\n for page in self.layout.pages:\n \n # build the metric dictionary\n metrics = {}\n page.models = []\n for fname in glob.glob(os.path.join(self.output_path,\"*.nc\")):\n with Dataset(fname) as dataset:\n mname = dataset.getncattr(\"name\")\n if mname != \"Benchmark\": page.models.append(mname)\n if not dataset.groups.has_key(page.name): continue\n group = dataset.groups[page.name]\n\n # if the dataset opens, we need to add the model (table row)\n metrics[mname] = {}\n \n # each model will need to have all regions\n for region in self.regions: metrics[mname][region] = {}\n \n # columns in the table will be in the scalars group\n if not group.groups.has_key(\"scalars\"): continue\n \n # we add scalars to the model/region based on the region\n # name being in the variable name. If no region is found,\n # we assume it is the global region.\n grp = group.groups[\"scalars\"]\n for vname in grp.variables.keys():\n found = False\n for region in self.regions:\n if region in vname: \n found = True\n var = grp.variables[vname]\n name = vname.replace(region,\"\")\n metrics[mname][region][name] = Variable(name = name,\n unit = var.units,\n data = var[...])\n if not found:\n var = grp.variables[vname]\n metrics[mname][\"global\"][vname] = Variable(name = vname,\n unit = var.units,\n data = var[...])\n page.setMetrics(metrics)\n \n # write the HTML page\n f = file(os.path.join(self.output_path,\"%s.html\" % (self.name)),\"w\")\n f.write(str(self.layout))\n f.close()", "def generate_plots(path):\n videos = glob(path + '/*.mkv')\n print(path, len(videos), videos)\n\n if len(videos) == 0:\n return\n else:\n videos = videos[0]\n\n metadata_list = glob(path + '/metadata.txt')\n #print(path, len(metadata_list), metadata_list)\n\n if len(metadata_list) == 0:\n return \n\n P = Preprocessor()\n P.import_video(str(videos))\n P.read_metadata(path)\n P.preprocess()\n Im = P.frames_processed\n if len(Im) == 0:\n print(len(Im))\n return\n\n z_start = P.z_start\n z_end = P.z_end\n\n mean, cov = analyze_image(Im)\n\n window_size = 10\n mean_smoothed = smoothing.mean_moving_average(mean, window_size)\n cov_smoothed = smoothing.cov_moving_average(cov, window_size)\n\n c = CubicFitRotated()\n c.fit(mean=mean_smoothed, cov=cov_smoothed, z_start=z_start, z_end=z_end)\n\n try:\n os.mkdir(path + '/analysis')\n path += '/analysis'\n except OSError:\n pass\n\n\n plots.plot_mean(mean, z_start, z_end).savefig(path + '/beam_center.png')\n plots.plot_beta(cov, z_start, z_end).savefig(path + '/sigma_squared.png')\n\n export.export_mean(mean = mean, filename = path + '/center.csv', z_start = z_start, z_end = z_end)\n export.export_cov(cov = cov, filename = path + '/cov.csv', z_start = z_start, z_end = z_end)\n\n plt.close('all')", "def make_dataframe(self):\n logging.info('*** Creating the dataframes from the source files ' )\n \n for k in self.datasets_keys:\n #for k in ['igra2' , 'ncar']:\n \n logging.info('*** Creating the dataframe for the dataset: %s ' , k ) \n \n p_levels = self.data[k]['df']['observations_table']['z_coordinate'][:]\n logging.debug(' Loaded the z_coordinate')\n \n z_type = self.data[k]['df']['observations_table']['z_coordinate_type'][:]\n logging.debug(' Loaded the z_coordinate_type')\n \n obs_variable = self.data[k]['df']['observations_table']['observed_variable'][:]\n logging.debug(' Loaded the observed_variable')\n \n obs_values = self.data[k]['df']['observations_table']['observation_value'][:]\n logging.debug(' Loaded the observation_value')\n \n observation_id = self.data[k]['df']['observations_table']['observation_id'][:]\n logging.debug(' Loaded the observation_id')\n \n units = self.data[k]['df']['observations_table']['units'][:].astype(int)\n logging.debug(' Loaded the units') \n \n report_id = self.data[k]['df']['observations_table']['report_id'][:] \n logging.debug(' Loaded the report_id')\n \n date_time = self.data[k]['df']['observations_table']['date_time'][:]\n logging.debug(' Loaded the date_time (deltas)')\n \n lat , lon = self.data[k]['df']['observations_table']['latitude'][:] , self.data[k]['df']['observations_table']['longitude'][:]\n logging.debug(' Loaded the lat,lon ')\n \n \n self.obs_table_columns = list(self.data[k]['df']['observations_table'].keys() )\n \n self.data[k]['df'].close()\n \n \"\"\" Creating a dataframe \"\"\"\n columns = ['date_time', 'z_coordinate' , 'z_coordinate_type', 'observed_variable' , 'observation_value' , 'report_id' , 'observation_id' , 'latitude' , 'longitude', 'units']\n logging.info(' Loaded the data, creating dataframe ')\n \n df = pd.DataFrame( list(zip( date_time, p_levels, z_type, obs_variable , obs_values, report_id, observation_id , lat , lon, units ) ) , columns = columns ) \n \n \n \"\"\" Storing the dataframe \"\"\" ### try using xarrays ??? \n logging.debug('Storing the DF ' ) \n self.data[k]['dataframe'] = df\n \n logging.debug(' PD dataframe created !!! ')", "def make_plots(srcdir, outdir, include_subdir=False, level=3):\n srcfiles = get_ordered_srcfiles(srcdir, include_subdir)\n nodelist = get_f_nodelist(srcfiles)\n for n in nodelist:\n print(os.path.basename(n.name))\n for m in n.dependencies:\n print(' ' + os.path.basename(m.name))\n print('')\n\n if not os.path.isdir(outdir):\n raise Exception('output directory does not exist')\n\n for n in nodelist:\n filename = os.path.join(outdir, os.path.basename(n.name) + '.png')\n print('Creating ' + filename)\n graph = pydot.Dot(graph_type='digraph')\n node_dict = {}\n ilev = 0\n add_pydot_nodes(graph, node_dict, n, ilev, level)\n edge_set = set()\n ilev = 1\n add_pydot_edges(graph, node_dict, edge_set, n, ilev, level)\n graph.write_png(filename)\n\n return", "def create_tables():\n\tlog_msg4(\"No hay tablas para el año \" + txt_year + \". Creando\")\n\n\tcreate_table('visited')\n\tcreate_table('saved')\n\tcreate_table('actions')\n\n\tglobal new_tables_created\n\tnew_tables_created = True\n\n\tlog_msg_ok4()", "def makeOverviewPage(orbit_list, mtpConstants, paths, occultationObservationDict, nadirObservationDict):\n mtpNumber = mtpConstants[\"mtpNumber\"]\n obsTypeNames = {\"ingress\":\"irIngressLow\", \"egress\":\"irEgressLow\"}\n\n \n #loop through once to find list of all orders measured\n ordersAll = []\n for orbit in orbit_list:\n occultationObsTypes = [occultationType for occultationType in orbit[\"allowedObservationTypes\"][:] if occultationType in [\"ingress\", \"egress\"]] \n for occultationObsType in occultationObsTypes:\n if occultationObsType in orbit.keys():\n obsTypeName = obsTypeNames[occultationObsType]\n \n orders = orbit[\"finalOrbitPlan\"][obsTypeName+\"Orders\"]\n if 0 in orders: #remove darks\n orders.remove(0)\n if \"COP#\" in \"%s\" %orders[0]: #remove manual COP selection\n orders = []\n ordersAll.extend(orders)\n uniqueOccultationOrders = sorted(list(set(ordersAll)))\n \n #loop through again to plot each order on a single graph\n for chosenOrder in uniqueOccultationOrders:\n title = \"Solar occultations for diffraction order %s\" %(chosenOrder)\n fig = plt.figure(figsize=(FIG_X, FIG_Y))\n ax = fig.add_subplot(111, projection=\"mollweide\")\n ax.grid(True)\n plt.title(title)\n \n lonsAll = [] #pre-make list of all observing points of this order, otherwise colourbar scale will be incorrect\n latsAll = []\n altsAll = []\n for orbit in orbit_list:\n occultationObsTypes = [occultationType for occultationType in orbit[\"allowedObservationTypes\"][:] if occultationType in [\"ingress\", \"egress\"]] \n for occultationObsType in occultationObsTypes:\n if occultationObsType in orbit.keys():\n obsTypeName = obsTypeNames[occultationObsType]\n \n orders = orbit[\"finalOrbitPlan\"][obsTypeName+\"Orders\"]\n if chosenOrder in orders:\n occultation = orbit[occultationObsType]\n \n #if lats/lons/alts not yet in orbitList, find and write to list\n if \"alts\" not in occultation.keys():\n #just plot the half of the occultation closest to the surface, not the high altitude bits\n #ignore merged or grazing occs at this point\n if occultationObsType == \"ingress\":\n ets = np.arange(occultation[\"etMidpoint\"], occultation[\"etEnd\"], OCCULTATION_SEARCH_STEP_SIZE)\n elif occultationObsType == \"egress\":\n ets = np.arange(occultation[\"etStart\"], occultation[\"etMidpoint\"], OCCULTATION_SEARCH_STEP_SIZE)\n lonsLatsLsts = np.asfarray([getLonLatLst(et) for et in ets])\n occultation[\"lons\"] = lonsLatsLsts[:, 0]\n occultation[\"lats\"] = lonsLatsLsts[:, 1]\n occultation[\"alts\"] = np.asfarray([getTangentAltitude(et) for et in ets])\n \n #else take lats/lons/alts from orbitList if already exists\n lonsAll.extend(occultation[\"lons\"])\n latsAll.extend(occultation[\"lats\"])\n altsAll.extend(occultation[\"alts\"])\n \n plot1 = ax.scatter(np.asfarray(lonsAll)/sp.dpr(), np.asfarray(latsAll)/sp.dpr(), \\\n c=np.asfarray(altsAll), cmap=plt.cm.jet, marker='o', linewidth=0)\n \n cbar = fig.colorbar(plot1, fraction=0.046, pad=0.04)\n cbar.set_label(\"Tangent Point Altitude (km)\", rotation=270, labelpad=20)\n fig.tight_layout()\n plt.savefig(os.path.join(paths[\"IMG_MTP_PATH\"], \"occultations_mtp%03d_order%i_altitude.png\" %(mtpNumber, chosenOrder)))\n plt.close()\n \n \n \n \"\"\"plot nadir orders\"\"\"\n #find all orders measured\n ordersAll = []\n for orbit in orbit_list:\n if \"dayside\" in orbit[\"irMeasuredObsTypes\"]:\n orders = orbit[\"finalOrbitPlan\"][\"irDaysideOrders\"]\n if 0 in orders: #remove darks\n orders.remove(0)\n if \"COP#\" in \"%s\" %orders[0]: #remove manual COP selection\n orders = []\n ordersAll.extend(orders)\n uniqueNadirOrders = sorted(list(set(ordersAll)))\n \n #plot each order\n for chosenOrder in uniqueNadirOrders:\n title = \"Dayside nadirs for diffraction order %s\" %(chosenOrder)\n fig = plt.figure(figsize=(FIG_X, FIG_Y))\n ax = fig.add_subplot(111, projection=\"mollweide\")\n ax.grid(True)\n plt.title(title)\n \n lonsAll = [] #pre-make list of all observing points of this order, otherwise colourbar scale will be incorrect\n latsAll = []\n anglesAll = []\n for orbit in orbit_list:\n if \"dayside\" in orbit[\"irMeasuredObsTypes\"]:\n orders = orbit[\"finalOrbitPlan\"][\"irDaysideOrders\"]\n if chosenOrder in orders:\n nadir = orbit[\"dayside\"]\n \n #if lats/lons/incidence angles not yet in orbitList, find and write to list\n if \"incidences\" not in nadir.keys():\n# print(orbit[\"orbitNumber\"])\n #nadir start/end times have been modified to fit thermal room\n realStartTime = nadir[\"obsStart\"] + PRECOOLING_TIME + INITIALISATION_TIME\n realEndTime = nadir[\"obsEnd\"]\n ets = np.arange(realStartTime, realEndTime, NADIR_SEARCH_STEP_SIZE)\n lonsLatsIncidencesLsts = np.asfarray([getLonLatIncidenceLst(et) for et in ets])\n nadir[\"lons\"] = lonsLatsIncidencesLsts[:, 0]\n nadir[\"lats\"] = lonsLatsIncidencesLsts[:, 1]\n nadir[\"incidences\"] = lonsLatsIncidencesLsts[:, 2]\n #else take lats/lons/incidence angles from orbitList if already exists\n lonsAll.extend(nadir[\"lons\"])\n latsAll.extend(nadir[\"lats\"])\n anglesAll.extend(nadir[\"incidences\"])\n \n plot1 = ax.scatter(np.asfarray(lonsAll)/sp.dpr(), np.asfarray(latsAll)/sp.dpr(), \\\n c=np.asfarray(anglesAll), cmap=plt.cm.jet, marker='o', linewidth=0)\n \n cbar = fig.colorbar(plot1, fraction=0.046, pad=0.04)\n cbar.set_label(\"Incidence Angle (degrees)\", rotation=270, labelpad=20)\n fig.tight_layout()\n plt.savefig(os.path.join(paths[\"IMG_MTP_PATH\"], \"dayside_nadirs_mtp%03d_order%i_incidence_angle.png\" %(mtpNumber, chosenOrder)))\n plt.close()\n\n \"\"\"write mtp overview page\"\"\"\n h = r\"\"\n h += r\"<h1>MTP%03d Overview</h1>\" %(mtpNumber)\n h += r\"<h2>Geometry</h2>\"+\"\\n\"\n \n imagename = \"mtp%03d_occultation_duration.png\" %(mtpNumber)\n h += r\"<img src='%s'>\" %imagename\n imagename = \"mtp%03d_occultation_lat.png\" %(mtpNumber)\n h += r\"<img src='%s'>\" %imagename\n imagename = \"mtp%03d_nadir_minimum_incidence_angle.png\" %(mtpNumber)\n h += r\"<img src='%s'>\" %imagename\n \n h += r\"<p>UVIS typically operates on all dayside nadirs and all occultations</p>\"+\"\\n\"\n \n h += r\"<h2>Solar Occultations</h2>\"+\"\\n\"\n \n h += r\"Solar occultation diffraction orders measured this MTP: \"+\"\\n\"\n for chosenOrder in sorted(uniqueOccultationOrders):\n h += \"%i, \" %chosenOrder\n h += r\"<br>\"+\"\\n\"\n \n for chosenOrder in sorted(uniqueOccultationOrders):\n h += \"<h3>Solar occultations for diffraction order %i</h3>\" %chosenOrder\n imagename = \"img/occultations_mtp%03d_order%i_altitude.png\" %(mtpNumber, chosenOrder)\n h += r\"<img src='%s'>\" %imagename\n \n h += r\"<br>\"+\"\\n\"\n h += r\"<br>\"+\"\\n\"\n h += r\"<br>\"+\"\\n\"\n h += r\"<br>\"+\"\\n\"\n h += r\"<h2>Dayside Nadirs</h2>\"+\"\\n\"\n \n h += r\"Dayside nadir diffraction orders measured this MTP: \"+\"\\n\"\n for chosenOrder in sorted(uniqueNadirOrders):\n h += \"%i, \" %chosenOrder\n h += r\"<br>\"+\"\\n\"\n \n for chosenOrder in sorted(uniqueNadirOrders):\n h += \"<h3>Dayside nadirs for diffraction order %i</h3>\" %chosenOrder\n imagename = \"img/dayside_nadirs_mtp%03d_order%i_incidence_angle.png\" %(mtpNumber, chosenOrder)\n h += r\"<img src='%s'>\" %imagename\n \n \n \n h += r\"<br>\"+\"\\n\"\n h += r\"<br>\"+\"\\n\"\n# h += r\"<h2>SO/LNO Observation Plan</h2>\"+\"\\n\"\n \n \n h += r\"<br>\"+\"\\n\"\n h += r\"<br>\"+\"\\n\"\n h += r\"<h2>SO/LNO Observation Dictionaries</h2>\"+\"\\n\"\n h += r\"<h3>Solar Occultation</h3>\"+\"\\n\"\n headers = [\"Name\", \"Diffraction Order 1\", \"Diffraction Order 2\", \"Diffraction Order 3\", \"Diffraction Order 4\", \"Diffraction Order 5\", \"Diffraction Order 6\", \"Integration Time\", \"Rhythm\", \"Detector Height\"]\n h += r\"<table border=1>\"+\"\\n\"\n h += r\"<tr>\"+\"\\n\"\n for header in headers:\n h += r\"<th>%s</th>\" %header\n h += r\"</tr>\"+\"\\n\"\n for key in sorted(occultationObservationDict.keys()):\n orders, integrationTime, rhythm, detectorRows, channelCode = getObsParameters(key, occultationObservationDict)\n \n h += r\"<tr>\"+\"\\n\"\n h += r\"<td>%s</td>\" %(key)\n if \"COP\" in orders:\n h += r\"<td>%s (manual mode)</td>\" %(orders)\n for order in range(5):\n h += r\"<td>-</td>\"+\"\\n\"\n else: \n for order in orders:\n h += r\"<td>%s</td>\" %(order)\n for order in range(6-len(orders)):\n h += r\"<td>-</td>\"+\"\\n\"\n \n h += r\"<td>%i</td>\" %(integrationTime)\n h += r\"<td>%i</td>\" %(rhythm)\n h += r\"<td>%i</td>\" %(detectorRows)\n h += r\"</tr>\"+\"\\n\"\n h += r\"</table>\"+\"\\n\"\n \n \n h += r\"<h3>Nadir/Limb</h3>\"+\"\\n\"\n headers = [\"Name\", \"Diffraction Order 1\", \"Diffraction Order 2\", \"Diffraction Order 3\", \"Diffraction Order 4\", \"Diffraction Order 5\", \"Diffraction Order 6\", \"Integration Time\", \"Rhythm\", \"Detector Height\"]\n h += r\"<table border=1>\"+\"\\n\"\n h += r\"<tr>\"+\"\\n\"\n for header in headers:\n h += r\"<th>%s</th>\" %header\n h += r\"</tr>\"\n for key in sorted(nadirObservationDict.keys()):\n orders, integrationTime, rhythm, detectorRows, channelCode = getObsParameters(key, nadirObservationDict)\n \n h += r\"<tr>\"+\"\\n\"\n h += r\"<td>%s</td>\" %(key)\n if \"COP\" in orders:\n h += r\"<td>%s (manual mode)</td>\" %(orders)\n for order in range(5):\n h += r\"<td>-</td>\"+\"\\n\"\n else: \n for order in orders:\n h += r\"<td>%s</td>\" %(order)\n for order in range(6-len(orders)):\n h += r\"<td>-</td>\"+\"\\n\"\n \n h += r\"<td>%i</td>\" %(integrationTime)\n h += r\"<td>%i</td>\" %(rhythm)\n h += r\"<td>%i</td>\" %(detectorRows)\n h += r\"</tr>\"+\"\\n\"\n h += r\"</table>\"+\"\\n\"\n \n \n \n \n h += r\"<br>\"+\"\\n\"\n h += r\"<br>\"+\"\\n\"\n h += r\"<p>Page last modified: %s</p>\" %(datetime.now().strftime('%a, %d %b %Y %H:%M:%S')) +\"\\n\"\n \n with open(os.path.join(paths[\"HTML_MTP_PATH\"], \"nomad_mtp%03d_overview.html\" %(mtpNumber)), 'w') as f:\n f.write(h)", "def __init__(self):\n\n self.write_title = TitleWriter() # TITLE project title\n self.write_options = GeneralWriter() # OPTIONS analysis options\n self.write_report = ReportWriter() # REPORT output reporting instructions\n self.write_files = SectionWriter() # FILES interface file options\n self.write_files.SECTION_NAME = \"[FILES]\"\n self.write_files.section_type = Files\n self.write_backdrop = BackdropOptionsWriter() # BACKDROP bounding rectangle and file name of backdrop image\n self.write_map = MapOptionsWriter() # MAP map's bounding rectangle and units\n self.write_raingages = SectionWriterAsList(\"[RAINGAGES]\", RainGageWriter,\n \";;Name \\tFormat \\tInterval\\tSCF \\tSource \\n\"\n \";;--------------\\t---------\\t--------\\t--------\\t----------\")\n\n self.write_hydrographs = SectionWriterAsList(\"[HYDROGRAPHS]\", UnitHydrographWriter,\n \";;Hydrograph \\tRain Gage/Month \\tResponse\\tR \\tT \\tK \\tDmax \\tDrecov \\tDinit \\n\"\n \";;--------------\\t----------------\\t--------\\t--------\\t--------\\t--------\\t--------\\t--------\\t--------\")\n # unit hydrograph data used to construct RDII inflows\n\n self.write_evaporation = EvaporationWriter() # EVAPORATION evaporation data\n self.write_temperature = TemperatureWriter() # TEMPERATURE air temperature and snow melt data\n self.write_adjustments = AdjustmentsWriter() # ADJUSTMENTS monthly climate adjustments\n self.write_subcatchments = SectionWriterAsList(\"[SUBCATCHMENTS]\", SubcatchmentWriter,\n \";;Name \\tRain Gage \\tOutlet \\tArea \\t%Imperv \\tWidth \\t%Slope \\tCurbLen \\tSnowPack \\n\"\n \";;--------------\\t----------------\\t----------------\\t--------\\t--------\\t--------\\t--------\\t--------\\t----------------\")\n # basic subcatchment information\n\n self.write_subareas = SectionWriterAsList(\"[SUBAREAS]\", SubareaWriter,\n \";;Subcatchment \\tN-Imperv \\tN-Perv \\tS-Imperv \\tS-Perv \\tPctZero \\tRouteTo \\tPctRouted\\n\"\n \";;--------------\\t----------\\t------------\\t--------\\t----------\\t----------\\t----------\\t---------\")\n # subcatchment impervious/pervious sub-area data\n\n #self.write_infiltration = SectionWriterAsListOf(\"[INFILTRATION]\", SectionWriter, None)\n # write_infiltration is set in as_text based on the kind of infiltration being used in the project.\n\n self.write_lid_controls = SectionWriterAsList(\"[LID_CONTROLS]\", LIDControlWriter,\n \";;Name \\tType/Layer\\tParameters\\n\"\n \";;--------------\\t----------\\t----------\")\n # low impact development control information\n\n self.write_lid_usage = SectionWriterAsList(\"[LID_USAGE]\", LIDUsageWriter,\n \";;Subcatchment \\tLID Process \\tNumber \\tArea \\tWidth \\tInitSat \\tFromImp \\tToPerv \\tRptFile \\tDrainTo\\n\"\n \";;--------------\\t----------------\\t-------\\t----------\\t----------\\t----------\\t----------\\t----------\\t------------------------\\t----------------\")\n # assignment of LID controls to subcatchments\n\n self.write_aquifers = SectionWriterAsList(\"[AQUIFERS]\", AquiferWriter,\n \";;Aquifer \\tPhi \\tWP \\tFC \\tHydCon\\tKslope\\tTslope\\tUEF \\tLED \\tLGLR \\tBEL \\tWTEL \\tUZM \\tUEF Pat\\n\"\n \";;--------------\\t------\\t------\\t------\\t------\\t------\\t------\\t------\\t------\\t------\\t------\\t------\\t------\\t-------\")\n # groundwater aquifer parameters\n\n self.write_groundwater = SectionWriterAsList(\"[GROUNDWATER]\", GroundwaterWriter,\n \";;Subcatchment \\tAquifer \\tNode \\tEsurf \\tA1 \\tB1 \\tA2 \\tB2 \\tA3 \\tDsw \\tEgwt \\tEbot \\tWgr \\tUmc \\n\"\n \";;--------------\\t----------------\\t----------------\\t------\\t------\\t------\\t------\\t------\\t------\\t------\\t------\\t------\\t------\\t------\")\n # subcatchment groundwater parameters\n\n self.write_gwf = SectionWriterAsList(\"[GWF]\", GWFWriter,\n \";;Subcatchment \\tFlow \\tEquation\\n\"\n \";;-------------- \\t------- \\t--------\")\n # custom groundwater flow equations\n\n self.write_snowpacks = SectionWriterAsList(\"[SNOWPACKS]\", SnowPackWriter,\n \";;Name \\tSurface \\tParameters\\n\"\n \";;--------------\\t----------\\t----------\")\n # subcatchment snow pack parameters\n\n self.write_junctions = SectionWriterAsList(\"[JUNCTIONS]\", JunctionWriter,\n \";;Name \\tElevation \\tMaxDepth \\tInitDepth \\tSurDepth \\tAponded\\n\"\n \";;--------------\\t----------\\t----------\\t----------\\t----------\\t----------\")\n # junction node information\n\n self.write_outfalls = SectionWriterAsList(\"[OUTFALLS]\", OutfallWriter,\n \";;Name \\tElevation \\tType \\tStage Data \\tGated \\tRoute To\\n\"\n \";;--------------\\t----------\\t----------\\t----------------\\t--------\\t----------------\")\n # outfall node information\n\n self.write_dividers = SectionWriterAsList(\"[DIVIDERS]\", DividerWriter,\n \";;Name \\tElevation \\tDiverted Link \\tType \\tParameters\\n\"\n \";;--------------\\t----------\\t----------------\\t----------\\t----------\")\n # flow divider node information\n\n self.write_storage = SectionWriterAsList(\"[STORAGE]\", StorageWriter,\n \";;Name \\tElev. \\tMaxDepth \\tInitDepth \\tShape \\tCurve Name/Params \\tN/A-Pond\\tFevap \\tPsi \\tKsat \\tIMD\\n\"\n \";;--------------\\t--------\\t----------\\t-----------\\t----------\\t----------------------------\\t--------\\t--------\\t--------\\t--------\\t--------\")\n # storage node information\n\n self.write_conduits = SectionWriterAsList(\"[CONDUITS]\", ConduitWriter,\n \";;Name \\tFrom Node \\tTo Node \\tLength \\tRoughness \\tInOffset \\tOutOffset \\tInitFlow \\tMaxFlow\\n\"\n \";;--------------\\t----------------\\t----------------\\t----------\\t----------\\t----------\\t----------\\t----------\\t----------\")\n # conduit link information\n\n self.write_pumps = SectionWriterAsList(\"[PUMPS]\", PumpWriter,\n \";;Name \\tFrom Node \\tTo Node \\tPump Curve \\tStatus \\tStartup \\tShutoff\\n\"\n \";;--------------\\t----------------\\t----------------\\t----------------\\t--------\\t--------\\t--------\")\n # pump link information\n\n self.write_orifices = SectionWriterAsList(\"[ORIFICES]\", OrificeWriter,\n \";;Name \\tFrom Node \\tTo Node \\tType \\tOffset \\tQcoeff \\tGated \\tCloseTime\\n\"\n \";;--------------\\t----------------\\t----------------\\t------------\\t----------\\t----------\\t--------\\t----------\")\n # orifice link information\n\n self.write_weirs = SectionWriterAsList(\"[WEIRS]\", WeirWriter,\n \";;Name \\tFrom Node \\tTo Node \\tType \\tCrestHt \\tQcoeff \\tGated \\tEndCon \\tEndCoeff \\tSurcharge \\tRoadWidth \\tRoadSurf \\tCoeff. Curve\\n\"\n \";;--------------\\t----------------\\t----------------\\t------------\\t----------\\t----------\\t--------\\t--------\\t----------\\t----------\\t----------\\t----------\\t----------\")\n # weir link information\n\n self.write_outlets = SectionWriterAsList(\"[OUTLETS]\", OutletWriter,\n \";;Name \\tFrom Node \\tTo Node \\tOffset \\tType \\tQTable/Qcoeff \\tQexpon \\tGated\\n\"\n \";;--------------\\t----------------\\t----------------\\t----------\\t---------------\\t----------------\\t----------\\t--------\")\n # outlet link information\n\n self.write_xsections = SectionWriterAsList(\"[XSECTIONS]\", CrossSectionWriter,\n \";;Link \\tShape \\tGeom1 \\tGeom2 \\tGeom3 \\tGeom4 \\tBarrels \\tCulvert \\n\"\n \";;--------------\\t------------\\t----------------\\t----------\\t----------\\t----------\\t----------\\t----------\")\n # conduit, orifice, and weir cross-section geometry\n\n self.write_transects = TransectsWriter() # transect geometry for conduits with irregular cross-sections\n\n self.write_losses = SectionWriterAsList(\"[LOSSES]\", LossWriter,\n \";;Link \\tKentry \\tKexit \\tKavg \\tFlap Gate \\tSeepage \\n\"\n \";;--------------\\t----------\\t----------\\t----------\\t----------\\t----------\")\n # conduit entrance/exit losses and flap valves\n\n self.write_controls = ControlWriter()\n # rules that control pump and regulator operation\n\n self.write_events = SectionWriterAsList(\"[EVENTS]\", EventsWriter,\n \";;Start Date \\tEnd Date\\n\")\n # events\n\n self.write_landuses = SectionWriterAsList(\"[LANDUSES]\", LanduseWriter,\n \";; \\tSweeping \\tFraction \\tLast\\n\"\n \";;Name \\tInterval \\tAvailable \\tSwept\\n\"\n \";;--------------\\t----------\\t----------\\t----------\")\n # land use categories\n\n self.write_buildup = SectionWriterAsList(\"[BUILDUP]\", BuildupWriter,\n \";;Land Use \\tPollutant \\tFunction \\tCoeff1 \\tCoeff2 \\tCoeff3 \\tPer Unit\\n\"\n \";;--------------\\t----------------\\t----------\\t----------\\t----------\\t----------\\t----------\")\n # buildup functions for pollutants and land uses\n\n self.write_washoff = SectionWriterAsList(\"[WASHOFF]\", WashoffWriter,\n \";;Land Use \\tPollutant \\tFunction \\tCoeff1 \\tCoeff2 \\tSweepRmvl \\tBmpRmvl\\n\"\n \";;--------------\\t----------------\\t----------\\t----------\\t----------\\t----------\\t----------\")\n # washoff functions for pollutants and land uses\n\n self.write_pollutants = SectionWriterAsList(\"[POLLUTANTS]\", PollutantWriter,\n \";;Name \\tUnits \\tCrain \\tCgw \\tCrdii \\tKdecay \\tSnowOnly \\tCo-Pollutant \\tCo-Frac \\tCdwf \\tCinit\\n\"\n \";;--------------\\t------\\t----------\\t----------\\t----------\\t----------\\t----------\\t----------------\\t----------\\t----------\\t----------\")\n # pollutant information\n\n self.write_coverages = CoveragesWriter() # COVERAGES # assignment of land uses to subcatchments\n self.write_treatment = SectionWriterAsList(\"[TREATMENT]\", TreatmentWriter,\n \";;Node \\tPollutant \\tFunction\\n\"\n \";;--------------\\t----------------\\t--------\")\n # pollutant removal functions at conveyance system nodes\n\n self.write_inflows = SectionWriterAsList(\"[INFLOWS]\", DirectInflowWriter,\n \";;Node \\tConstituent \\tTime Series \\tType \\tMfactor \\tSfactor \\tBaseline\\tPattern\\n\"\n \";;--------------\\t----------------\\t----------------\\t--------\\t--------\\t--------\\t--------\\t--------\")\n # INFLOWS # external hydrograph/pollutograph inflow at nodes\n\n self.write_dwf = SectionWriterAsList(\"[DWF]\", DryWeatherInflowWriter,\n \";;Node \\tConstituent \\tBaseline \\tPatterns \\n\"\n \";;--------------\\t----------------\\t----------\\t----------\")\n # baseline dry weather sanitary inflow at nodes\n\n self.write_patterns = SectionWriterAsList(\"[PATTERNS]\", PatternWriter,\n \";;Name \\tType \\tMultipliers\\n\"\n \";;--------------\\t----------\\t-----------\")\n # PATTERNS periodic variation in dry weather inflow\n\n self.write_rdii = SectionWriterAsList(\"[RDII]\", RDIInflowWriter,\n \";;Node \\tUnit Hydrograph \\tSewer Area\\n\"\n \";;--------------\\t----------------\\t----------\")\n # rainfall-dependent I/I information at nodes\n\n self.write_loadings = InitialLoadingsWriter()\n # initial pollutant loads on subcatchments\n\n self.write_curves = SectionWriterAsList(\"[CURVES]\", CurveWriter,\n \";;Name \\tType \\tX-Value \\tY-Value \\n\"\n \";;--------------\\t----------\\t----------\\t----------\")\n # CURVES x-y tabular data referenced in other sections\n\n self.write_timeseries = SectionWriterAsList(\"[TIMESERIES]\", TimeSeriesWriter,\n \";;Name \\tDate \\tTime \\tValue\\n\"\n \";;--------------\\t----------\\t----------\\t----------\")\n # time series data referenced in other sections\n\n self.write_labels = SectionWriterAsList(\"[LABELS]\", LabelWriter,\n \";;X-Coord \\tY-Coord \\tLabel\")\n # X, Y coordinates, text, and font details of labels", "def table_maker():\r\n try:\r\n off_copy = off.copy()\r\n man_copy = man.copy()\r\n exe_copy = exe.copy()\r\n ceo_copy = ceo.copy()\r\n list_of_lists = [off_copy, man_copy, exe_copy, ceo_copy]\r\n\r\n for i in list_of_lists:\r\n for j in i:\r\n if type(j) == str:\r\n continue\r\n else:\r\n raise ValueError('All elements must be strings')\r\n\r\n row_num = max(len(off_copy), len(man_copy),\r\n len(exe_copy), len(ceo_copy))\r\n for i in list_of_lists:\r\n if len(i) != row_num:\r\n diff = row_num - len(i)\r\n for j in range(diff):\r\n i.append('')\r\n\r\n t = PrettyTable(\r\n ['Office Workers', 'Managers', 'Executives', 'CEO'])\r\n for i in range(row_num):\r\n t.add_row([off_copy[i], man_copy[i], exe_copy[i], ceo_copy[i]])\r\n\r\n with open('Employee Table.txt', 'w') as f:\r\n f.write(str(t))\r\n\r\n except FileNotFoundError:\r\n print(\"Error: No file entered\")" ]
[ "0.6867469", "0.6389346", "0.63584435", "0.63152426", "0.6292773", "0.6279732", "0.62315065", "0.6176681", "0.6134976", "0.6134058", "0.6121338", "0.6118336", "0.6118336", "0.60832316", "0.6075302", "0.60436225", "0.6041428", "0.6038445", "0.59721166", "0.5969328", "0.5969005", "0.5937278", "0.59241134", "0.5916537", "0.59145856", "0.58961636", "0.5894004", "0.5893478", "0.5880765", "0.58541864", "0.584085", "0.5826711", "0.58230233", "0.58229405", "0.580875", "0.58084476", "0.57900304", "0.57864285", "0.57861096", "0.5783856", "0.5767163", "0.5757048", "0.5753905", "0.5728226", "0.57260627", "0.5716757", "0.57062304", "0.56936693", "0.5682708", "0.5672679", "0.56719255", "0.5669499", "0.56684846", "0.5660687", "0.5658742", "0.5655935", "0.5653929", "0.5644702", "0.56413585", "0.5640109", "0.56385344", "0.56378996", "0.56323904", "0.56281775", "0.56228834", "0.56123304", "0.5602476", "0.5598022", "0.55968994", "0.5593906", "0.5590268", "0.5587004", "0.5577218", "0.55713207", "0.5560852", "0.5560784", "0.55530876", "0.55427366", "0.5540022", "0.55356765", "0.5532378", "0.55316216", "0.5530862", "0.552782", "0.5522993", "0.55228966", "0.55214196", "0.55080706", "0.5507482", "0.5506832", "0.55000013", "0.5492104", "0.54889333", "0.54881436", "0.5487568", "0.5486483", "0.54841816", "0.5480075", "0.5478333", "0.5472638", "0.54717565" ]
0.0
-1
Initialize function which will be called to create the base frame upon which the animation takes place. This is used for blitting to create smoother animations
def func_init(self): self.points.set_data([], []) for line in self.lines: line.set_data([],[]) self.annotation.set_text('') return tuple(self.lines) + (self.points, self.annotation)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _init_frame(self : \"animation\",\n init_frame : \"matplotlib.figure.Figure\",\n init_ax : \"matplotlib.axes._subplots.AxesSubplot\"\n ):\n self._cframe = init_frame.canvas.copy_from_bbox(init_ax.bbox)", "def _init_anim(self):\n pass", "def setupFrame(self, frame_width, frame_height):\n x, y = 0.0, 0.4\n self.x0 = int(frame_width*x)\n self.y0 = int(frame_height*y)\n self.width = 260\n self.height = 260", "def __init__(self, frame):\n super().__init__(frame)\n self.frames = None\n self.delay = None", "def __init__(self, img, width, height, animations=None, frame=0, speed=0.125, start_animation=E_ANIM):\n super().__init__(img, 0, 0, width, height)\n self.img = img\n\n self.current_animation = start_animation\n self.frame = frame\n self.speed = speed\n self.timer = 0\n self.direction = (0,1)\n\n if animations:\n self.anims = animations\n else:\n self.anims = { E_ANIM: (0,1) }", "def __init__(self):\n super().__init__()\n self._active = False\n # Counter, used in the animation\n self._time = 0\n # Store the current image id, initially it's 'default'\n self._image = 'default'", "def __init__(\n self : \"animation\",\n filename : \"str\",\n size : \"Tuple[int,int]\" = None,\n pbar : \"bool\" = False,\n mbs : \"int\" = 16,\n dpi : \"int\" = 150,\n init_frame : \"matplotlib.figure.Figure\" = None,\n init_ax : \"matplotlib.axes._subplots.AxesSubplot\" = None,\n fps : \"int\" = 5,\n interactive : \"bool\" = False,\n autoSmooth : \"bool\" = False,\n smoothingFrames : \"int\" = 5,\n saveFinalFrame : \"int\" = False,\n smoothingTime : float = None,\n smoothingFunction : \"Callable\" = None\n ):\n self.filename = filename\n self.size = size\n self._mbs = mbs\n self._writer = imageio.get_writer(\n self.filename,\n mode='I',\n macro_block_size=self._mbs,\n fps=fps\n )\n self.fps = fps\n self.pbar = pbar\n self._frame_number = 0\n self._closed = False\n self.dpi = dpi\n self._cframe = None\n if init_frame and init_ax:\n self._init_frame(init_frame, init_ax)\n\n self._init_interactive = matplotlib.is_interactive()\n if self._init_interactive and not interactive:\n matplotlib.interactive(False)\n else:\n matplotlib.interactive(interactive)\n if autoSmooth:\n assert smoothingFrames > 0\n\n self._autosmooth = autoSmooth\n self._prevFrame = None\n\n\n # Set up smoothing\n if smoothingTime is None:\n self._smoothingFrames = smoothingFrames\n else:\n self._smoothingFrames = int(smoothingTime*fps)\n\n if smoothingFunction is None:\n self._smoothingFunction = self._linear_interpolation\n else:\n self._smoothingFunction = smoothingFunction\n\n self._saveFinalFrame = saveFinalFrame", "def __init__(self):\n\n #create initial tile array and animation dictionary for walkonto animations \n self.array = []\n self.animations = {}", "def __init__(self, frame):\n self.frame = frame", "def __init__(self, parent):\n super(Demo5, self).__init__(parent)\n self.angle = 0.0\n self.replication = 1.0\n self.offset = 0.0\n self.deltaRep = 1\n self.revolution = 0\n self.stepsPer90 = 180\n self.stepsLeft = self.stepsPer90\n self.deltaAng = 90.0\n self.deltaOff = 0.15\n self.spin = True\n self.x2yAspect = 1.0\n self.texture = None", "def __init__(self, frames):\n self._frames = frames", "def __init__(self, frames):\n self._frames = frames", "def __init__(self, frames):\n self._frames = frames", "def __init__(self, frames):\n self._frames = frames", "def __init__(self, x: int, y: int, width: int, height: int):\n pygame.sprite.Sprite.__init__(self)\n self.x = x\n self.y = y\n self.width = width\n self.height = height\n self.curr_frame = 1", "def init_graphics(self):\n if type(self.image_ref) is Surface:\n # This is the case for the special visual effect\n self.image = self.image_ref\n else:\n image = GLOBAL.img(self.image_ref)\n if type(image) is tuple:\n # for decode purpose\n self.image = Surface(TILESIZE_SCREEN)\n self.image.fill(image)\n elif type(image) is list or type(image) is dict:\n self.animated = True\n self.current_frame = 0\n self.last_update = 0\n if type(image) is list:\n self.list_image = image\n self.image = self.list_image[self.current_frame]\n else:\n self.last_direction = (1, 0)\n self.dict_image = image\n self.image = self.dict_image['E'][self.current_frame]\n else:\n self.image = image\n self._reposition_rect()", "def __init__(self, frame):\n self.frame = frame\n self._configure()", "def __init__(self):\n # Attributes from GUI or caller\n self.file_path = None\n self.override = None\n self.start_frame = None\n self.render_length = None\n self.timecode_in = None\n self.timecode_out = None\n\n # Attributes from Pico file in memory\n self.file_buffer = None\n\n self.header = None\n self.base_timecode = None\n\n self.channels = None # Not entirely sure we need this - Scratch that, we do need it.\n self.jam_timecode = None\n self.raw_fps = None\n self.frame_in = None\n self.frame_out = None\n self.frame_zero = None\n self.frame_offset = None # This one eventually turns into the frame index\n self.frame_start = None # This one is not used anymore as the value comes from the GUI\n self.frame_padding = None\n self.total_frames = None\n\n # Attributes for render action\n self.output_name = None\n self.render_fps = None\n self.ref_timecode = None", "def __init__(self):\n\n self.frameCount = 0\n self._initScreen()\n self._initObjects()\n self._initControls()\n self._initLevel()\n self._start()\n print \"DEBUG: Initializing Game\"\n pass", "def __init__(self, shape):\n self.eyes = [(), ()]\n self.shape = shape\n self.state = 0\n self.new_frame()", "def __init__(self, frame=1):\n self._frame = frame\n self._ticks = []", "def __init__(self, image, scr, view_point, cols=1, rows=1):\r\n self.z = 1\r\n super().__init__(image, scr, view_point, cols, rows)\r\n self.frame = randint(0, self.get_max_frame())", "def __init__(self, canvas_dimensions):\n self.__color_palette = ColorPalette()\n self.frame_number = 0\n\n self.canvas_width, self.canvas_height = canvas_dimensions\n self.shadow_offset = 8\n\n # Import resources\n self.__title_font = pygame.font.Font(str(PurePath(\"res/Fonts/04B_19.TTF\")), 70)\n self.__prompt_font = pygame.font.Font(str(PurePath(\"res/Fonts/04B_19.TTF\")), 30)\n\n # Initialize title text\n self.title_surface = self.__title_font.render(\"Flappy Bird\", False, self.__color_palette.medium_sea_green)\n self.title_surface_shadow = self.__title_font.render(\"Flappy Bird\", False, self.__color_palette.white_smoke)\n\n # Initialize prompt text\n self.prompt_surface = self.__prompt_font.render(\"Press anything to play\", False, self.__color_palette.dark_golden_rod)\n self.prompt_surface_shadow = self.__prompt_font.render(\"Press anything to play\", False, self.__color_palette.white_smoke)", "def __init__(self, camera_func, width, height):\n self.camera_func = camera_func\n self.state = pygame.Rect(0, 0, width, height)", "def __init__(self,b=5,alpha=1.0):\n wanimation.__init__(self,lambda x:float(b*x),self.render_function,alpha)", "def __init__(self, image, frameImage, x=0, y=0, w=0, h=0):\n self.x = x \n self.y = y\n self.image = image\n self.frameImage = frameImage\n self.dial = pygame.Surface(self.frameImage.get_rect()[2:4])\n self.dial.fill((255,255,0))\n if(w==0):\n w = self.frameImage.get_rect()[2]\n if(h==0):\n h = self.frameImage.get_rect()[3]\n self.w = w\n self.h = h\n self.pos = self.dial.get_rect()\n self.pos = self.pos.move(x, y)", "def __init__(self):\n super().__init__()\n self.texture = arcade.load_texture(\":resources:/images/enemies/slimeBlue.png\")\n\n # Reset the viewport, necessary if we have a scrolling game and we need\n # to reset the viewport back to the start so we can see what we draw.\n arcade.set_viewport(0, constants.SCREEN_WIDTH - 1, 0, constants.SCREEN_HEIGHT - 1)", "def __init__(self, dim):\n self.surface = pygame.Surface(dim)\n self.p_array = pygame.PixelArray(self.surface)\n self.p_array[0, 0] = (255, 255, 255)\n print(self.p_array.shape)\n # set some values\n self.width = self.surface.get_width()\n self.height = self.surface.get_height()\n # start in center of surface\n self.center = (self.width // 2, self.height // 2)\n self.fibonacci = get_fibonacci()\n self.framecount = 0\n self.initialize()", "def __init__(self):\n self._start = None\n self._end = None\n self._num_frames = 0", "def __init__(self, x=0, y=0, w=0, h=0, resource='resources/battery2.png'):\n self.icon = pygame.image.load('resources/battery2.png').convert()\n Generic.__init__(self, x, y, w, h)\n self.frameImage = pygame.image.load(resource).convert()", "def _animation_init(self):\n\n self.animation_ax.set_xlim(self.plant.workspace_range[0][0],\n self.plant.workspace_range[0][1])\n self.animation_ax.set_ylim(self.plant.workspace_range[1][0],\n self.plant.workspace_range[1][1])\n self.animation_ax.set_xlabel(\"x position [m]\")\n self.animation_ax.set_ylabel(\"y position [m]\")\n for ap in self.animation_plots[:-1]:\n ap.set_data([], [])\n self.animation_plots[-1].set_text(\"t = 0.000\")\n\n self.tau_arrowarcs = []\n self.tau_arrowheads = []\n for link in range(self.plant.n_links):\n arc, head = get_arrow(radius=0.001,\n centX=0,\n centY=0,\n angle_=110,\n theta2_=320,\n color_=\"red\")\n self.tau_arrowarcs.append(arc)\n self.tau_arrowheads.append(head)\n self.animation_ax.add_patch(arc)\n self.animation_ax.add_patch(head)\n\n return self.animation_plots + self.tau_arrowarcs + self.tau_arrowheads", "def __init___(self, x, y, width, height):\n super(GRect, self).__init__()\n frameWidth = width\n frameHeight = height\n setLocation(x, y)", "def __init__(self):\n self.colorPalette = ColorPalette()\n self.frame_number = 0\n\n # initialization of Pygame components\n pygame.init()\n self.__icon = self.__make_icon()\n self.screen_size = (600, 800) # width x height\n self.canvas = pygame.display.set_mode(self.screen_size, 0, 32)\n pygame.display.set_caption(\"Flappy Bird\")\n pygame.display.set_icon(self.__icon)\n self.clock = pygame.time.Clock()\n\n # Initialization of game models\n self.ground = Ground(self.screen_size)\n self.background = Backdrop(self.ground.offset)\n self.bird = Bird(ground_offset=self.ground.offset, y_coord=self.screen_size[1] // 2)\n self.pipes = [PipeSet()]\n self.menu_pipes = [PipeSet()]\n\n self.main_menu_screen = Menu(canvas_dimensions=self.screen_size)\n\n self.player_points = 0\n self.scoreboard = Scoreboard(canvas_dimensions=self.screen_size)\n self.game_over_screen = GameOverMenu(canvas_dimensions=self.screen_size)\n\n # Game control flow manipulation variables\n self.__play_game = False\n self.__just_launched = True\n self.player_dead = False\n self.scroll_speed = 2", "def __init__(self):\n\n # Call the parent class (sprite) constructor\n super().__init__()\n # Create image of block and fill with color.\n self.image = pygame.Surface([20, 20])\n self.image.fill(BLACK)\n\n # Fetch rectangle object that has dimensions of image. Update position of object by setting values\n # of rect.x and rect.y\n self.rect = self.image.get_rect()", "def __init__(self, x=0, y=0, w=0, h=0):\n self.image = pygame.image.load('resources/AirSpeedNeedle.png').convert()\n self.frameImage = pygame.image.load('resources/Indicator_Background.png').convert()\n Dial.__init__(self, self.image, self.frameImage, x, y, w, h)", "def __init__(self):\n # Screen settings\n self.screen_width = 2400\n self.screen_height = 1600\n self.bg_color = (0, 0, 0)\n\n # Raindrop settings\n self.r_y_speed = 10", "def __init__(self):\n\n # Call the parent class (Sprite) constructor\n super().__init__()\n\n # Create an image of the block, and fill it with a color.\n # This could also be an image loaded from the disk.\n self.image = pygame.image.load('assets/' + 'singleLaser.png')\n\n # Fetch the rectangle object that has the dimensions of the image\n # image.\n # Update the position of this object by setting the values\n # of rect.x and rect.y\n self.rect = self.image.get_rect()\n self.rect.center = (settings.SCREEN_WIDTH / 2, settings.SCREEN_HEIGHT / 2)", "def __init__(self):\n Frame.__init__(self, spec.FRAME_HEARTBEAT, 0)", "def __init__(self, x_0, y_0, initX, initY,h=5):\n self.x_0=x_0\n self.y_0=y_0\n self.x_init=initX\n self.y_init=initY\n self.step=h", "def __init__(self):\n self.size = 16\n self.color = COLOR\n self.pos = self.spawn()", "def start(self):\n self.frame = 0\n self._init_level(1)\n self.reward = 0\n self.pcontinue = 1\n self.ghost_speed = self.ghost_speed_init\n return self._make_image(), self.reward, self.pcontinue", "def init():\n uanim.set_data([],[])\n return uanim,", "def __init__(self):\n\n # Screen's settings\n self.screen_width = 1200\n self.screen_height = 800\n self.bg_color = (230, 230, 230)\n\n # Bluebee Settings\n self.bb_speed = 1.0\n\n # Moving test.\n self.counter = 0\n self.max_left = 400\n self.max_up = 300\n self.max_right = 400\n self.max_down = 300", "def __init__(self,master,width,height):\n\n #Save inherited arguments\n self.master = master\n self.width = width\n self.height = height\n\n #Call parent's constructor method\n super(Main_Frame,self).__init__(master,\n width = width,\n height = height)\n\n #List of images\n self.__pics = self.pics_dict\n\n #Tuple of acceptable frame names\n self.__frame_names = (\"image_frame\",\n \"audio_frame\",\n \"doc_frame\",\n \"video_frame\")\n\n #Add the UI elements to the frame\n self.build_UI()", "def __init__(self, screen_width:int, screen_height:int, screen, sensitivity:int, max_fps:int, debug:bool):\n\n #Call the superclass init method\n super().__init__(screen_width, screen_height, screen, State(102), sensitivity, max_fps, 0.1, debug)\n\n #Commander brief image\n self.bg = ImageObject(300, 285, 600, 570, StoryTemplate.sprites['commander_brief'], debug)\n\n #Image of figure head (To be replaced with the actual image)\n self.marco = ImageObject(300, 215, 217, 217, StoryTemplate.sprites['yuckerberg'], debug)\n self.marco.scale(217,217)\n\n #Image of the commander\n self.commander = ImageObject(300, 210, 217, 217, StoryTemplate.sprites['silloette_commander'], debug)\n self.commander.scale(217,217)\n\n #Textbox\n self.tb = ImageObject(300, 685, 600, 230, StoryTemplate.sprites['textbox'], debug)", "def __init__(self):\n pygame.init()\n self.rain_settings = RSettings()\n\n self.screen = pygame.display.set_mode((0, 0), pygame.FULLSCREEN)\n self.rain_settings.screen_width = self.screen.get_rect().width\n self.rain_settings.screen_height = self.screen.get_rect().height\n pygame.display.set_caption(\"Raindrops\")\n\n self.rain = pygame.sprite.Group()\n\n self._create_rain()", "def __init__(self, *args, **kwargs):\n super(Ball, self).__init__(*args, **kwargs)\n self.speed = kwargs.get('speed', 5)\n self.ball_image = pyglet.image.load(os.path.join(config.ASSETS_DIR, 'ball.png'))\n self.width = self.ball_image.width\n self.height = self.ball_image.height\n self.ball_sprite = pyglet.sprite.Sprite(self.ball_image, self.x, self.y)\n self.x_direction = 1\n self.y_direction = 1\n\n print('Ball Created')", "def initialize(self):\n # self.drawsurface.fill((0, 0, 0))\n self.array2d = pygame.surfarray.array2d(self.drawsurface)\n self.fire = numpy.zeros((self.width, self.height))\n # generate palette\n self.palette = numpy.zeros(255)\n # aplette should be something from black to yellow red\n self.palette[0] = pygame.Color(0, 0, 0, 255)\n for index in range(1, 255):\n color = pygame.Color(0, 0, 0, 255)\n # original C Comments\n # Hue goes from 0 to 85: red to yellow\n # Saturation is always the maximum: 255\n # Lightness is 0..100 for x=0..128, and 255 for x=128..255\n # color = HSLtoRGB(ColorHSL(x / 3, 255, std::min(255, x * 2)));\n color.hsla = (index, 100, index / 2.55, 10)\n self.palette[index] = color", "def __init__(self, y: int):\n self.y = y\n self.x = 0\n self.first_x = 0\n self.second_x = Base.Width\n self.rect = pygame.Rect(self.x, self.y, Base.Width, Base.Height)", "def __init__(self, frames):\n self._frames = frames\n self._out = None", "def __init__(self, frames):\n self._frames = frames\n self._out = None", "def __init__(self, frames):\n self._frames = frames\n self._out = None", "def __init__(self, frames):\n self._frames = frames\n self._out = None", "def __init__(self, frames):\n self._frames = frames\n self._out = None", "def __init__(self, frames):\n self._frames = frames\n self._out = None", "def new_frame(self):\n self.eyes_frame = numpy.zeros(self.shape, numpy.uint8)", "def __init__(self, straight):\n BaseFigureCanvas.__init__(self)\n self.straight = straight\n self.fill1 = None\n self.fill2 = None\n self.ax = self.fig_setup()\n self.beams = self.data_setup()\n self.anim = animation.FuncAnimation(self.figure, self.animate,\n init_func=self.init_data, frames=1000, interval=20)", "def __init__(self):\n # Screen settings\n self.screen_width = 400\n self.screen_height = 300\n self.bg_color = (230, 230, 230)\n\n self.rocket_speed_factor= 1.5", "def __init__(self, x, y, msec_to_climb, images):\n super(Bird, self).__init__()\n self.x, self.y = x, y\n self.msec_to_climb = msec_to_climb\n self._img_wingup, self._img_wingdown = images\n self._mask_wingup = pygame.mask.from_surface(self._img_wingup)\n self._mask_wingdown = pygame.mask.from_surface(self._img_wingdown)\n self.dead = False", "def __init__(self, initial_x:int, initial_y:int, width:int, height:int, power_type:str, time_to_live:int, debug:bool = False):\n\n #Call the superclass contructor\n super().__init__(initial_x, initial_y, width, height, PowerUp.sprites[power_type], debug)\n\n #Store variables\n self.power_type = power_type\n self.ttl = time_to_live\n\n #Scale the image\n self.scale(30,30)", "def __init__(self, color, width, height, speed=3, direction=random.choice([-1, 1])):\n\n # Call the parent class (Sprite) constructor\n super().__init__()\n\n # Create an image of the block, and fill it with a color.\n # This could also be an image loaded from the disk.\n self.speed = speed\n self.image = pygame.Surface([width, height])\n self.image.fill(color)\n self.width = width\n self.height = height\n self.direction = direction\n\n # Fetch the rectangle object that has the dimensions of the image\n # image.\n # Update the position of this object by setting the values\n # of rect.x and rect.y\n self.rect = self.image.get_rect()", "def __init__(self, x, y, width, height):\n # Call the parent's constructor\n super().__init__()\n \n # Make a blue wall, of the size specified in the parameters\n self.image = pygame.Surface([width, height])\n self.image.fill([255, 123, 123])\n \n # Make our top-left corner the passed-in location.\n self.rect = self.image.get_rect()\n self.rect.y = y\n self.rect.x = x", "def __init__(self, x, y, width, height, move_speed):\n super().__init__()\n self.image = self.create_image(os.path.join(\"assets\", \"bullet.png\"), width, height)\n self.rect = self.image.get_rect()\n self.rect.x = x\n self.rect.y = y\n self.move_speed = move_speed", "def __init__(self, frames=[], loop = 0):\n\t\t\n\t\tif isinstance(frames, (list, tuple)):\n\t\t\tself.frames = frames\n\t\telse:\n\t\t\traise TypeError\n\t\t\t\n\t\tif not loop:\n\t\t\tself.loop = 0\n\t\telse:\n\t\t\tself.loop = 1\n\t\t\t\n\t\tself.present_frame = None", "def __init__(self):\n\n self.width = 10\n self.height = 10\n self.new_game()", "def __init__ (self, game):\r\n\r\n pygame.sprite.Sprite.__init__(self)\r\n self.game_ref = game\r\n self.initialise()", "def __init__(self, color, x, y):\r\n\r\n # Call the parent class (Sprite) constructor\r\n super().__init__()\r\n\r\n # Create the image of the block of appropriate size\r\n # The width and height are sent as a list for the first parameter.\r\n self.image = pygame.Surface([block_width, block_height])\r\n\r\n # Fill the image with the appropriate color\r\n self.image.fill(color)\r\n\r\n # Fetch the rectangle object that has the dimensions of the image\r\n self.rect = self.image.get_rect()\r\n\r\n # Move the top left of the rectangle to x,y.\r\n # This is where our block will appear..\r\n self.rect.x = x\r\n self.rect.y = y", "def initialize(self) -> None:\n self.simulation = self.initialize_simulation()\n width, height = get_window_resolution()\n display_dim = ((0, width), (0, height))\n self.coord_mapper = CoordinateMapper2D(*self.simulation.dim, *display_dim)\n self.simple_pygame.all_sprites.empty()\n self.initialize_visualization()", "def __init__(self):\n self.size = width, height = pygame.display.Info().current_w, pygame.display.Info().current_h\n self.screen = pygame.display.set_mode(self.size)\n self.x = int((width - 910) / 2)\n self.y = int((height - 675) / 2)", "def create_frame(self, x: int, y: int):\n if self.mouse_on:\n x, y, w, h = self.rect\n self.frame = pygame.Rect(x - 5, y - 5, w + 10, h + 10)\n else:\n self.frame = None", "def setUp(self):\r\n pos =[0,0]\r\n vel = [0,0]\r\n ang = 0.0\r\n ang_vel= [0,0]\r\n image = None\r\n\r\n center = [1,1]\r\n size = 1\r\n info = ImageInfo(center, size)\r\n\r\n self.sprite = Sprite(pos, vel, ang, ang_vel, image, info)", "def setUp(self):\r\n pos =[0,0]\r\n vel = [0,0]\r\n ang = 0.0\r\n ang_vel= [0,0]\r\n image = None\r\n\r\n center = [1,1]\r\n size = 1\r\n info = ImageInfo(center, size)\r\n\r\n self.sprite = Sprite(pos, vel, ang, ang_vel, image, info)", "def setUp(self):\r\n self.caption = \"mirra extending classes\" # window name\r\n self.size = 640, 480 #window size\r\n self.pos = 100,100 # window top left location\r\n self.fullScreen = 0 # if fullScreen is on it will overwrite your pos and size to match the display's resolution\r\n self.frameRate = 15 # set refresh framerate\r", "def init(self):\n self._frame_idx = 0\n if self.initialization is not None:\n del self.initialization\n self.initialization = None\n\n if self.config.initialization is not None:\n self.initialization = INITIALIZATION.load(self.config.initialization, **self.__kwargs)\n self.initialization.init()\n\n if self.preprocessing is not None:\n del self.preprocessing\n self.preprocessing = None\n\n if self.config.preprocessing is not None:\n self.preprocessing = Preprocessing(self.config.preprocessing, **self.__kwargs)\n\n if self.odometry is None:\n assert self.config.odometry is not None\n self.odometry = ODOMETRY.load(self.config.odometry, **self.__kwargs)\n\n assert self.odometry is not None\n self.odometry.init()\n if self.loop_closure is None and self.config.loop_closure is not None:\n self.loop_closure = LOOP_CLOSURE.load(self.config.loop_closure, **self.__kwargs)\n if self.loop_closure is not None:\n self.loop_closure.init()\n if self.config.backend is not None:\n self.backend = BACKEND.load(self.config.backend, **self.__kwargs)\n if self.backend is not None:\n self.backend.init()\n else:\n logging.warning(\"[SLAMAlgorithm]Defined a Loop Closure Algorithm Without a Backend\")", "def __init__(self, *args, **kwargs):\n super().__init__(**kwargs)\n self.speed = kwargs.get('speed', 5)\n\n # Loading image file\n self.ball_image = image.load(config.resources_path + 'ball.png')\n self.width = self.ball_image.width\n self.height = self.ball_image.height\n self.ball_sprite = sprite.Sprite(self.ball_image, self.x, self.y)\n\n self.ball_image.rotation = randint(0, 360) # Rotates the sprite\n self.ball_image.scale = uniform(0.5, 2)\n\n self.x_direction = 1 # 1 for + axis direction\n self.y_direction = 1", "def __init__(self, static_image_library:ResourceLibrary, animation_library:ResourceLibrary, num_tiles_x:int, num_tiles_y:int, tile_width:int, tile_height:int):\n self.width = num_tiles_x\n self.height = num_tiles_y\n self.graphicsLibrary = static_image_library\n self.animations_library = animation_library\n self.scaleWidth = tile_width\n self.scaleHeight = tile_height\n\n self.active_spawns = 0\n self.spawn_points = [(2,1),(self.width-3, self.height-2),(self.width-3,1),(2, self.height-2)]\n self.spawn_buffer = 3\n self.spawn_buffers = list()\n for spawn in self.spawn_points:\n for i in range(self.spawn_buffer):\n for j in range(self.spawn_buffer):\n self.spawn_buffers.append((spawn[0]+i, spawn[1]+j))\n self.spawn_buffers.append((spawn[0]-i, spawn[1]+j))\n self.spawn_buffers.append((spawn[0]+i, spawn[1]-j))\n self.spawn_buffers.append((spawn[0]-i, spawn[1]-j))\n\n self.reset()", "def __init__(self, pygame, master):\n \n self._pygame = pygame\n self._master = master\n self._display = self._pygame.display\n self._interface = None\n self._state = None\n self._cycle_colour = (200, 0, 0)\n self._white = (255, 255, 255)", "def __init__(self, color, x, y):\n \n # Call the parent class (Sprite) constructor\n super().__init__()\n \n # Create the image of the block of appropriate size\n # The width and height are sent as a list for the first parameter.\n self.image = pygame.Surface([block_width, block_height])\n \n # Fill the image with the appropriate color\n self.image.fill(color)\n \n # Fetch the rectangle object that has the dimensions of the image\n self.rect = self.image.get_rect()\n \n # Move the top left of the rectangle to x,y.\n # This is where our block will appear..\n self.rect.x = x\n self.rect.y = y", "def __init__(self, x = 140, y = 140):\r\n super(Ball, self).__init__(image = Ball.image,\r\n x = 600, y = 240,\r\n dx = -3, dy = 1)", "def initialize(self):\n result = pygame.init()\n pygame.font.init()\n pygame.display.set_caption('gomoku TDD')\n self.screen = pygame.display.set_mode((WINDOWWIDTH, WINDOWHEIGHT))\n self.clock = pygame.time.Clock()\n self.smallfont = pygame.font.Font(None, 40)\n self.isinitialized = True", "def __init__(self):\n self.t_height = 291\n self.t_left = 65\n self.t_right = 144\n self.points = Pix()", "def init():\n \n # General parameters\n exp_path = '/home/laura/Documents/stacks tif/1705_regMovie.tif' # experimental tif stack (grayscale)\n bin_path = '/home/laura/Documents/stacks tif/1705/1705_binarizedMovie.tif' # binarized tif stack\n vect_path = '/home/laura/Documents/STAGE3/1705_NET/' # gpickle directory\n dest_path = '/home/laura/Documents/STAGE3/1705_NET/superposition' # output directory\n verbose = True\n debug = True\n invert = True \n main_params = [exp_path, bin_path, vect_path, dest_path, verbose, debug, invert]\n \n # Output options\n doImg = -1 # image index\n doStack = False \n doVideo = False \n compress = 3 # advice: no more than 5\n output_params = [doImg, doStack, doVideo, compress]\n \n # Drawing options (colors as BGR)\n line = True # edges drawing\n line_color = (0, 255, 0) # green \n line_size = 1 \n apex_color = (0, 0, 255) # red\n apex_size = 5\n node_color = (255, 0, 0) # blue\n node_size = 5\n body_color = (0, 255, 0) # green\n body_size = 3\n drawing_params = [line, line_color, line_size, apex_color, apex_size,\n node_color, node_size, body_color, body_size]\n \n return main_params, output_params, drawing_params", "def __init__(self):\n self.hmd = None\n self.vr_render_models = None\n self.render_width = 0\n self.render_height = 0", "def __init__(self, x, y, reload_time, spritegroup, toast_size, toaster, screen):\r\n self.spritegroup = spritegroup\r\n self.x = x\r\n self.y = y\r\n self.reload_time = reload_time\r\n self.active_time = reload_time\r\n self.toast = None\r\n self.toast_size = toast_size\r\n self.toaster = toaster\r\n self.screen = screen", "def _initialize_(self):\n x_max = self.cond_cal[\"x_max\"]\n dx = self.cond_cal[\"dx\"]\n self.fld_name = datetime.now().strftime(\"%Y_%m%d_%H%M%S\") # folder name which contain animation and figure of calculation result\n self.img_list = [] # define the list in wihch images of plot figure are stacked\n self.t_history = np.array([])\n self.Pc_history = np.array([])\n self.r_history = np.empty([0, int(round((x_max+dx)/dx,0))])\n self.rdot_history = np.empty([0, int(round((x_max+dx)/dx,0))])\n self.rdotn_history = np.empty([0, int(round((x_max+dx)/dx,0))])\n self.Vf_history = np.array([])\n self.Vox_history = np.array([])\n self.mf_history = np.array([])\n self.mox_history = np.array([])\n self.cstr_history = np.array([])\n self.of_history = np.array([])", "def __init__(self, center, waypoints, firepoints):\n super().__init__(center, MallFighter.MALL_FIGHTER_SPEED, MallFighter.ANIMATION_DELAY, *MallFighter.FILE_PATH)\n self.fire_idx = 0\n self.way_idx = 0\n self.waypoints = waypoints\n self.firepoints = firepoints", "def __init__(self, x, y):\n # Call the parent's constructor\n super().__init__()\n width = 10\n height = 10\n \n # Make a blue wall, of the size specified in the parameters\n self.image = pygame.Surface([width, height])\n self.image.fill(BLUE)\n \n # Make our top-left corner the passed-in location.\n self.rect = self.image.get_rect()\n self.rect.y = y\n self.rect.x = x", "def __init__(self):\r\n self.position=(0,0)\r\n self.direction = 0.\r\n self.speed = 0\r\n self.state = 0\r\n pass", "def __init__(self, env, n_frames):\n gym.Wrapper.__init__(self, env)\n self.n_frames = n_frames\n self.frames = deque([], maxlen=n_frames)\n shp = env.observation_space.shape\n self.observation_space = gym.spaces.Box(low=0, high=255, shape=(shp[0], shp[1], shp[2] * n_frames),\n dtype=env.observation_space.dtype)", "def __init__(self, angle, x, y):\n pygame.sprite.Sprite.__init__(self)\n self.image = pygame.image.load(\"Image/Trafico.png\")\n self.rect = self.image.get_rect()\n self.image_orig = self.image\n self.speed = 2\n self.direction = angle\n self.steering = 90\n self.x = x\n self.y = y", "def start_animation(self) -> None:\n increment_values = {0: 1, self.original_height: -1}\n self.increment = increment_values.get(self.current_height, 0) # Compressed if", "def __init__(self, x, y, dx, dy):\r\n super(pong2, self).__init__(image=pong2.bball, x=x, y=y, dx=dx, dy=dy)\r\n self.dx1=self.dx\r\n self.dy1=self.dy\r\n self.dx, self.dy=0, 0\r\n self.count=1\r\n self.picture=0", "def __init__(self, width, height, img):\n super().__init__()\n \n self.walking_frames_l = []\n self.walking_frames_r = []\n\n self.direction = \"R\"\n \n # Load all the right facing images into a list\n image = pygame.image.load(img)\n self.walking_frames_r.append(image)\n\n #left facing\n image = pygame.image.load(img)\n image = pygame.transform.flip(image, True, False)\n self.walking_frames_l.append(image)\n\n # Set the image the player starts with\n self.image = self.walking_frames_r[0]\n \n \n self.rect = self.image.get_rect()", "def __init__(self, x, y, largo, alto):\n # Llama al constructor padre\n pygame.sprite.Sprite.__init__(self)\n \n # Construye una pared azul con las dimensiones especificadas por los parametros\n self.image = pygame.Surface([21, 21])\n self.image.fill(VERDE)\n \n # Establece como origen la esquina superior izquierda.\n self.rect = self.image.get_rect()\n self.rect.y = y\n self.rect.x = x", "def __init__(self,imagename,delay,n):\n\t\tself.imagename = imagename\n\t\tself.nframes = n\n\t\tself.delay = delay\n\t\tself.frame = 0\n\t\tself.pause = 0\n\t\tself.looped = False\n\t\tself.initialize()", "def __init__(self):\n self.monsters_images = pg.sprite.Group()\n self.font_23 = pg.font.Font(prepare.FONTS['Timeless-Bold'], 23)\n self.font_20 = pg.font.Font(prepare.FONTS['Timeless'], 20)\n self.font_18 = pg.font.Font(prepare.FONTS['Timeless'], 18)\n self.bold_font = pg.font.Font(prepare.FONTS['Timeless-Bold'], 17)\n self.font_15 = pg.font.Font(prepare.FONTS['Timeless'], 15)\n\n self.init_left_zone()\n self.init_middle_zone()\n self.init_right_zone()", "def __init__(self, color, width, height):\n \n # Call the parent class (Sprite) constructor\n super().__init__()\n \n # Create an image of the block, and fill it with a color.\n # This could also be an image loaded from the disk.\n self.image = pygame.Surface([width, height])\n self.image.fill(color)\n \n # Fetch the rectangle object that has the dimensions of the image\n # image.\n # Update the position of this object by setting the values\n # of rect.x and rect.y\n self.rect = self.image.get_rect()", "def __init__(self, color, width, height):\n # Call the parent class (Sprite) constructor\n super().__init__()\n \n # Create an image of the block, and fill it with a color.\n # This could also be an image loaded from the disk.\n self.image = pygame.Surface([width, height])\n self.image.fill(color)\n \n # Fetch the rectangle object that has the dimensions of the image\n # image.\n # Update the position of this object by setting the values\n # of rect.x and rect.y\n self.rect = self.image.get_rect()", "def __init__(self):\n # Initialisation de la bibliothèque Pygame\n pg.init()\n\n # Création de l'écran principal de taille 640px x 480px\n self.screen = pg.display.set_mode((settings.WIDTH, settings.HEIGHT))\n\n # Chargement et collage du fond\n self.background = pg.image.load(settings.BACKGROUND).convert()\n self.screen.blit(self.background, (0, 0))\n\n # Création d'une variable indiquant si le jeu est en cours\n self.running = False\n\n # On termine cette méthode d'initialisation par une mise à jour de \n # l'écran principal\n pg.display.update()", "def __init__(self, ai_settings, screen, sprites):\n super(Saucer, self).__init__()\n self.screen = screen\n self.ai_settings = ai_settings\n self.max_frames = 80.0\n self.cur_frame = 0.0\n self.move_toggle1 = True\n self.toggle_death = False\n self.dead = False\n\n sprite_info = sprites.sprite_infos[\"invader4_1.png\"]\n self.image = sprites.sprite_sheet.subsurface(\n pygame.Rect(sprite_info.x, sprite_info.y, sprite_info.w, sprite_info.h))\n\n sprite_info = sprites.sprite_infos[\"invader4_2.png\"]\n self.image2 = sprites.sprite_sheet.subsurface(\n pygame.Rect(sprite_info.x, sprite_info.y, sprite_info.w, sprite_info.h))\n\n self.image1 = self.image\n self.rect = self.image.get_rect()\n\n # Start each new alien near the top left of the screen.\n self.rect.x = self.rect.width\n self.rect.y = self.rect.height\n\n # Store the alien's exact position.\n self.x = float(self.rect.x)\n\n # The saucer song.\n self.saucer_song = pygame.mixer.music.load(\"audio/saucer.wav\")\n pygame.mixer.music.play(-1)\n\n self.text_color = (230, 230, 230)\n self.font = pygame.font.Font(\"fonts/BPdotsPlusBold.otf\", 48)\n\n self.score_image = 0\n self.score_rect = 0\n\n self.cur_frame_score = 0.0\n self.max_frame_score = 40.0", "def init_paramters(self):\r\n carb_bg_ratio = 5.0\r\n time_to_breakdown = 45.0\r\n insulin_bg_ratio = 50.0\r\n time_to_peak = 45.0\r\n basal_rate = 0.0\r\n digestion_speed = 1.0\r\n activation_speed = 1.0\r\n\r\n # set state to initial\r\n self.S = [self.carb_bg_ratio, self.time_to_breakdown,\r\n self.insulin_bg_ratio, self.time_to_peak,\r\n self.basal_rate, self.digestion_speed,\r\n self.activation_speed]" ]
[ "0.723336", "0.72250724", "0.70384264", "0.70372236", "0.6962778", "0.69174445", "0.6851278", "0.6720123", "0.6609182", "0.6556792", "0.65386903", "0.65386903", "0.65386903", "0.65386903", "0.6523569", "0.64849484", "0.64721674", "0.6449021", "0.64271384", "0.64042795", "0.6402971", "0.64007986", "0.6365245", "0.6338629", "0.6322399", "0.6321787", "0.63214874", "0.6305337", "0.62689435", "0.62682825", "0.6267794", "0.6256791", "0.6199836", "0.61908567", "0.61892456", "0.6186347", "0.6172444", "0.61688083", "0.6165867", "0.61608803", "0.6117423", "0.6113829", "0.609366", "0.6081203", "0.6071763", "0.60680234", "0.60632735", "0.60587746", "0.6035314", "0.6034172", "0.6034172", "0.6034172", "0.6034172", "0.6034172", "0.6034172", "0.6029439", "0.60239416", "0.6002043", "0.59928006", "0.59888905", "0.597247", "0.5959463", "0.59548753", "0.59457475", "0.59367776", "0.59355843", "0.5923495", "0.5917917", "0.5917336", "0.59129006", "0.59116834", "0.59116834", "0.59099144", "0.59023213", "0.5901452", "0.58958274", "0.58956116", "0.58939916", "0.5884367", "0.5870786", "0.58664954", "0.58501583", "0.58461475", "0.584529", "0.58447343", "0.5844572", "0.584409", "0.5840322", "0.5838486", "0.5832895", "0.5829023", "0.5828308", "0.58268106", "0.58225673", "0.5819086", "0.5817969", "0.58169067", "0.5812752", "0.58111554", "0.5802831", "0.5801933" ]
0.0
-1
Function that is used by matplotlib.animation.FuncAnimation to iteratively plot given a frame
def plot_person(self, frame, plottables, image_h, image_w, zoom=True, pad=3): for person in plottables[frame].keys(): plot_coords = plottables[frame][person] plot_coords[:, 1] = plot_coords[:, 1] + image_h coord_dict = {key: value for key, value in dict(enumerate(plot_coords[:, :2])).items() if not (value == 0).any()} present_keypoints = set(coord_dict.keys()) present_connections = [connection for connection in self.connections if len(present_keypoints & set(connection)) == 2] plot_lines = [np.transpose([coord_dict[a], coord_dict[b]]) for a, b in present_connections] for coords, line in zip_longest(plot_lines, self.lines): if isinstance(coords, np.ndarray): line.set_data(coords[0],coords[1]) else: line.set_data([],[]) plot_coords = plot_coords[~(plot_coords == 0).any(axis=1)] self.points.set_data(plot_coords[:, 0], plot_coords[:, 1]) self.annotation.set_text('Frame: {}'.format(frame)) self.ax.set_xlabel('X coordinate') self.ax.set_ylabel('Y coordinate') if zoom: aspect = image_w / image_h xlow, xhigh = plot_coords[:, 0].min(), plot_coords[:, 0].max() # get x higher and lower limit xdiff = xhigh - xlow # calculate the total range of x xpad = ((self.ydiff * aspect) - xdiff) / 2 # calculate how much the xlimits should be padded on either side to set aspect ratio correctly self.ax.set_xlim(xlow - xpad, xhigh + xpad) # set new limits break return tuple(self.lines) + (self.points, self.annotation)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def animate(i):\r\n plot_x.set_data(history_samples[i][:, 0], history_samples[i][:, 1])", "def run_animation(self):\n\n def _get_frame(frame_index, plots):\n \"\"\" Should be called by run_animations only. \"\"\"\n\n # TODO Using the indices of the self.frames, plot in correct location.\n # Okay right now there is a problem where it's unknown whether the set of coordinates\n # is a line or a dot -- that info got lost up there\n\n for amb_index in range(len(self.frames[frame_index])):\n xs = self.frames[frame_index][amb_index][0]\n ys = self.frames[frame_index][amb_index][1]\n\n # if len(xs) > 1:\n # if xs[0] == xs[1]:\n # plots[amb_index][1].set_data([xs[0]], [ys[0]])\n # if xs[-2] == xs[-1]:\n # plots[amb_index][1].set_data([xs[-1]], [ys[-1]])\n\n plots[amb_index][0].set_data(xs, ys)\n\n print(plots[len(self.ambulance_locations)])\n\n return plots,\n\n fig = plt.figure(figsize=(14, 8))\n\n # TODO need [number of ambulances] x [number of states]\n\n plots = []\n for i in range(len(self.ambulance_locations)):\n new_color = self.ambulance_colors[i]\n\n line_plot, = plt.plot([], [],\n marker='+',\n linestyle='',\n markerfacecolor=new_color,\n markeredgecolor=new_color,\n label=\"Ambulance {} Path\".format(i + 1))\n\n # dot_plot, = plt.plot([], [],\n # marker='o',\n # linestyle='',\n # markerfacecolor=new_color,\n # markeredgecolor=new_color)\n\n # plots.append([line_plot, dot_plot])\n\n plots.append([line_plot])\n\n base_plot = plt.scatter([base.longitude for base in self.bases],\n [base.latitude for base in self.bases],\n marker=\"D\", color=\"black\", label=\"Bases\")\n hospital_plot = plt.scatter([hospital.longitude for hospital in self.hospitals],\n [hospital.latitude for hospital in self.hospitals],\n marker=\"P\", color=\"r\", label=\"Hospitals\")\n\n plots.append(base_plot)\n plots.append(hospital_plot)\n\n # TODO Make boundaries parameters\n\n img = plt.imread(\"./visuals/simple.png\")\n plt.imshow(img, extent=[-117.017637, -117.167672, 32.710484, 32.823033])\n plt.legend(loc=\"upper right\")\n print(\"draw the animation\")\n ani = animation.FuncAnimation(fig, _get_frame, len(self.frames),\n fargs=(plots,), interval=50)\n\n plt.show()\n\n # fps = 15\n # print('save the animation')\n # print(\"it may take up to {}\".format(len(self.frames)/fps))\n # ani.save('regional_vis6.mp4', fps=fps, dpi=150)", "def _nextAnimFrame(step=0):\n lfp_frame.set_data(timestamps[step:step+frame_size], lfp[step:step+frame_size])\n r_raw_frame.set_data(timestamps[step:step+frame_size], raw_ripple[step:step+frame_size])\n r_pow_frame.set_data(timestamps[step:step+frame_size], ripple_power[step:step+frame_size])\n lfp_measure.set_text(txt_template % timestamps[step])\n # Updating the limits is needed still so that the correct range of data\n # is displayed! It doesn't update the axis labels though - That's a\n # different ballgame!\n plot_axes.set_xlim(timestamps[step], timestamps[step+frame_size])\n return lfp_frame, r_raw_frame, r_pow_frame, lfp_measure", "def time_history_animation(self, frame_step=1, magnification=1):\n import matplotlib.pyplot as plt\n import matplotlib.animation as ani\n\n \"\"\"Retrieve maximum displacement for axis limits\"\"\"\n max_list = [max(map(abs, item)) * magnification for item in self.displacement]\n\n \"\"\"Start figure for animation\"\"\"\n fig = plt.figure()\n ax = fig.add_subplot(111)\n\n \"\"\"Define the rectangles that represent the DOFs\"\"\"\n rectangle = []\n for i in range(len(self.coordinates)):\n rectangle.append(plt.Rectangle((self.coordinates[i][0],\n self.coordinates[i][1]),\n self.size[i][0], self.size[i][1], alpha=0.5))\n\n \"\"\"Init function for animation draws the frame, so that blip can be used and the animation runs faster\"\"\"\n\n def init():\n for i in range(len(self.coordinates)):\n ax.add_patch(rectangle[i])\n plt.axis('auto')\n plt.xlim([-max(max_list) + min(self.coordinates[:][0]),\n max(max_list) + max([item[0] for item in self.coordinates]) + max(self.size[:][0])])\n return rectangle\n\n \"\"\"Animation function: only the coordinates of the rectangles are updated here\"\"\"\n\n def motion(t_step):\n for i in range(len(self.coordinates)):\n rectangle[i].set_xy((float(self.coordinates[i][0]\n + self.displacement[i][t_step * frame_step] * magnification),\n float(self.coordinates[i][1])))\n return rectangle\n\n \"\"\"Animation function: inter gives the time delay between frames in milli seconds\"\"\"\n inter = int(1000 * self.dt * frame_step)\n self.anim = ani.FuncAnimation(fig,\n motion,\n init_func=init,\n interval=inter,\n blit=True)\n\n motion(int(len(self.displacement) / frame_step))\n plt.show()", "def animate1Dframes(x, data):\n plt.ion() # Set the plot to animated. \n ax1 = plt.axes()\n line, = plt.plot(data[-1], x , '-*k')\n\n for u in data:\n line.set_xdata(u)\n plt.draw()\n #tm.sleep(0.25)", "def animate(self,frame,im = None):\n # With matplotlib, it's much, much faster to just update the properties\n # of a display object than it is to create a new one, so we'll just update\n # the data and position of the same objects throughout this animation...\n\n # Since we're making an animation with matplotlib, we need \n # ion() instead of show()...\n fig = plt.gcf()\n ax = plt.axes([.25, .55, .6, .4], facecolor='y')\n plt.axis('off')\n\n # Make an image based on the first frame that we'll update later\n # (The first frame is never actually displayed)\n if im is None:\n plt.imshow(frame,cmap='brg')\n else:\n plt.imshow(im)\n plt.title('Image Space')\n\n # Make 4 rectangles that we can later move to the position of each paw\n rects = [Rectangle((0,0), 1,1, fc='none', ec='red') for i in range(4)]\n [ax.add_patch(rect) for rect in rects]\n\n\n # Process and display each frame\n\n paw_slices = self.find_paws(frame)\n\n # Hide any rectangles that might be visible\n [rect.set_visible(False) for rect in rects]\n\n # Set the position and size of a rectangle for each paw and display it\n for slice, rect in zip(paw_slices, rects):\n dy, dx = slice\n rect.set_xy((dx.start, dy.start))\n rect.set_width(dx.stop - dx.start + 1)\n rect.set_height(dy.stop - dy.start + 1)\n rect.set_visible(True)", "def anim_produce_frame(up_to_line, *fargs):\n #unpack *fargs\n axes,running_reward_exists,running_loss_exists,actions_exists,\\\n running_reward_file,running_loss_file,actions_file,actions_to_plot, \\\n actions_per_log,is_tri,actions_ylim = fargs\n #produce the plots for the current frame\n axis_ind = 0\n if running_reward_exists:\n axes[axis_ind].clear()\n plot_running_reward_on_axis(running_reward_file, axes[axis_ind], up_to_line)\n axis_ind += 1\n if running_loss_exists:\n axes[axis_ind].clear()\n axes[axis_ind+1].clear()\n plot_running_loss_on_axis(running_loss_file, axes[axis_ind],axes[axis_ind+1], up_to_line)\n axis_ind += 2\n if actions_exists:\n axes[axis_ind].clear()\n plot_actions_on_axis(actions_file,axes[axis_ind],is_tri,actions_to_plot=actions_to_plot,\n plot_to_file_line=int(up_to_line*actions_per_log),\n actions_ylim=actions_ylim)", "def display_frames_as_gif(frames):\n fig=e.cube.show_layout(frames[0]) \n print(\"Drawn\")\n def animate(i):\n return e.cube.update_plot(frames[i])\n anim = animation.FuncAnimation(fig, animate, frames = len(frames), interval=50,blit=True)", "def animate(self, frames, callback=None):\n if isinstance(frames, numbers.Integral):\n frames = (frames, 30.0)\n\n if isinstance(frames, tuple):\n frame_count, frame_rate = frames\n frames = numpy.linspace(0, frame_count / frame_rate, frame_count + 1, endpoint=True)\n\n for index in range(0, len(frames) - 1):\n frame = AnimationFrame(index, frames[index], frames[index + 1], self._animation)\n if callback:\n callback(frame)\n\n # Record the end-time of the last frame, so backends can calculate frame durations.\n self._animation[frames[-1]]", "def grid_animation_quick(self, frames, iterations=10, fps=0.02, figsize=(6, 6)):\r\n color_map = matplotlib.colors.ListedColormap(['white', 'black'])\r\n\r\n fig, ax = plt.subplots(figsize=figsize)\r\n\r\n for r in np.arange(0, iterations):\r\n ax.cla()\r\n ax.axes.grid(False)\r\n ax.set_axis_off()\r\n im = ax.imshow(frames[0], cmap=color_map, animated=True)\r\n for image, step in zip(frames[1:], np.arange(1, len(frames[1:])+1)):\r\n time.sleep(fps)\r\n ax.title.set_text('Rule 942 | Step ' + str(step) + ' | Active ' + str(int(np.sum(image))))\r\n im.set_data(image)\r\n fig.canvas.draw()", "def anim():\n i = 0\n while 1:\n\n for r in Reprs:\n r.draw(i)\n i = i+ 1\n i = i % len(t)\n yield", "def animate(input_filename):\n # With matplotlib, it's much, much faster to just update the properties\n # of a display object than it is to create a new one, so we'll just update\n # the data and position of the same objects throughout this animation...\n\n infile = paw_file(input_filename)\n\n # Since we're making an animation with matplotlib, we need \n # ion() instead of show()...\n plt.ion()\n fig = plt.figure()\n ax = fig.add_subplot(111)\n fig.suptitle(input_filename)\n\n # Make an image based on the first frame that we'll update later\n # (The first frame is never actually displayed)\n im = ax.imshow(infile.next()[1])\n\n # Make 4 rectangles that we can later move to the position of each paw\n rects = [Rectangle((0,0), 1,1, fc='none', ec='red') for i in range(4)]\n [ax.add_patch(rect) for rect in rects]\n\n title = ax.set_title('Time 0.0 ms')\n\n # Process and display each frame\n for time, frame in infile:\n paw_slices = find_paws(frame)\n\n # Hide any rectangles that might be visible\n [rect.set_visible(False) for rect in rects]\n\n # Set the position and size of a rectangle for each paw and display it\n for slice, rect in zip(paw_slices, rects):\n dy, dx = slice\n rect.set_xy((dx.start, dy.start))\n rect.set_width(dx.stop - dx.start + 1)\n rect.set_height(dy.stop - dy.start + 1)\n rect.set_visible(True)\n\n # Update the image data and title of the plot\n title.set_text('Time %0.2f ms' % time)\n im.set_data(frame)\n im.set_clim([frame.min(), frame.max()])\n fig.canvas.draw()", "def plot(particle_array, num_particles, axees, sim_len, plot_step):\n fig = plt.figure()\n ax1 = plt.axes(xlim=(-axees, axees),\n ylim=(-axees, axees))\n _, = ax1.plot([], [], lw=2)\n plt.xlabel('X [m]')\n plt.ylabel('Y [m]')\n\n Writer = animation.writers['ffmpeg']\n writer = Writer(fps=35, metadata=dict(artist='Me'), bitrate=1800)\n\n colors = ['blue','green','red','black'] # 'cyan','magenta','yellow\n lines = []\n for _ in range(num_particles):\n lobj = ax1.plot([],[],lw=2,color=colors[random.randrange(0,len(colors))])[0]\n lines.append(lobj)\n\n def init():\n for line in lines:\n line.set_data([],[])\n return lines\n\n coord_tuples = [([], []) for _ in range(num_particles)]\n\n def animate(i):\n for index in range(0, num_particles):\n coord_tuples[index][0].append(particle_array[2*index, i])\n coord_tuples[index][1].append(particle_array[2*index+1, i])\n\n xlist = [tup[0] for tup in coord_tuples]\n ylist = [tup[1] for tup in coord_tuples]\n\n for lnum, line in enumerate(lines):\n line.set_data(xlist[lnum][-5:], ylist[lnum][-5:])\n\n return lines\n\n # call the animator. blit=True means only re-draw the parts that have changed.\n anim = animation.FuncAnimation(fig, animate, init_func=init,\n frames=range(0, sim_len, plot_step),\n interval=10, blit=False)\n start = time.perf_counter()\n print('Creating animation ...\\n')\n anim.save('simulation.mp4', writer=writer)\n calc_time = time.perf_counter() - start\n print(f'animation time: {calc_time} s\\n')", "def animate(frame, line):\n\n # Not strictly neccessary, just so we know we are stealing these from\n # the global scope\n global all_x_data, all_y_data\n\n # We want up-to and _including_ the frame'th element\n current_x_data = all_x_data[: frame + 1]\n current_y_data = all_y_data[: frame + 1]\n\n line.set_xdata(current_x_data)\n line.set_ydata(current_y_data)\n\n # This comma is necessary!\n return (line,)", "def animateLFP(timestamps, lfp, raw_ripple, ripple_power, frame_size, statistic=None):\n\n # Turn interactive plotting off. It messes up animation\n plt.ioff()\n\n # Change this to '3d' if the need every arises for a multi-dimensional plot\n lfp_fig = plt.figure()\n plot_axes = plt.axes(projection=None)\n\n # Start with an empty plot, it can be then updated by animation functions\n # NOTE: The way frame is accessed in animation internals forces us to\n # make this an array if nothing else is being passed in. Having text\n # removes this requirement.\n lfp_frame, = plot_axes.plot([], [], animated=True)\n r_raw_frame, = plot_axes.plot([], [], animated=True)\n r_pow_frame, = plot_axes.plot([], [], animated=True)\n txt_template = 't = %.2fs'\n lfp_measure = plot_axes.text(0.5, 0.09, '', transform=plot_axes.transAxes)\n\n # Local functions for setting up animation frames and cycling through them\n def _nextAnimFrame(step=0):\n \"\"\"\n # Making sure that the step index and data are coming in properly\n print(step)\n print(lfp[step])\n \"\"\"\n lfp_frame.set_data(timestamps[step:step+frame_size], lfp[step:step+frame_size])\n r_raw_frame.set_data(timestamps[step:step+frame_size], raw_ripple[step:step+frame_size])\n r_pow_frame.set_data(timestamps[step:step+frame_size], ripple_power[step:step+frame_size])\n lfp_measure.set_text(txt_template % timestamps[step])\n # Updating the limits is needed still so that the correct range of data\n # is displayed! It doesn't update the axis labels though - That's a\n # different ballgame!\n plot_axes.set_xlim(timestamps[step], timestamps[step+frame_size])\n return lfp_frame, r_raw_frame, r_pow_frame, lfp_measure\n\n def _initAnimFrame():\n # NOTE: Init function called twice! I have seen this before but still\n # don't understand why it works this way!\n # print(\"Initializing animation frame...\")\n plot_axes.set_xlabel('Time (s)')\n plot_axes.set_ylabel('EEG (uV)')\n plot_axes.set_ylim(min(lfp), max(lfp))\n plot_axes.set_xlim(timestamps[0], timestamps[frame_size])\n plot_axes.grid(True)\n return _nextAnimFrame()\n\n n_frames = len(timestamps) - frame_size\n lfp_anim = animation.FuncAnimation(lfp_fig, _nextAnimFrame, np.arange(0, n_frames), \\\n init_func=_initAnimFrame, interval=RiD.LFP_ANIMATION_INTERVAL, \\\n blit=True, repeat=False)\n plt.figure(lfp_fig.number)\n\n # Make the filtered ripple thinner\n r_raw_frame.set_linewidth(0.5)\n plt.show(plot_axes)", "def __init__(\n self : \"animation\",\n filename : \"str\",\n size : \"Tuple[int,int]\" = None,\n pbar : \"bool\" = False,\n mbs : \"int\" = 16,\n dpi : \"int\" = 150,\n init_frame : \"matplotlib.figure.Figure\" = None,\n init_ax : \"matplotlib.axes._subplots.AxesSubplot\" = None,\n fps : \"int\" = 5,\n interactive : \"bool\" = False,\n autoSmooth : \"bool\" = False,\n smoothingFrames : \"int\" = 5,\n saveFinalFrame : \"int\" = False,\n smoothingTime : float = None,\n smoothingFunction : \"Callable\" = None\n ):\n self.filename = filename\n self.size = size\n self._mbs = mbs\n self._writer = imageio.get_writer(\n self.filename,\n mode='I',\n macro_block_size=self._mbs,\n fps=fps\n )\n self.fps = fps\n self.pbar = pbar\n self._frame_number = 0\n self._closed = False\n self.dpi = dpi\n self._cframe = None\n if init_frame and init_ax:\n self._init_frame(init_frame, init_ax)\n\n self._init_interactive = matplotlib.is_interactive()\n if self._init_interactive and not interactive:\n matplotlib.interactive(False)\n else:\n matplotlib.interactive(interactive)\n if autoSmooth:\n assert smoothingFrames > 0\n\n self._autosmooth = autoSmooth\n self._prevFrame = None\n\n\n # Set up smoothing\n if smoothingTime is None:\n self._smoothingFrames = smoothingFrames\n else:\n self._smoothingFrames = int(smoothingTime*fps)\n\n if smoothingFunction is None:\n self._smoothingFunction = self._linear_interpolation\n else:\n self._smoothingFunction = smoothingFunction\n\n self._saveFinalFrame = saveFinalFrame", "def animate(frames):\n plt.grid('on')\n ax = plt.gca()\n ax.set_xticks(np.arange(0.5, 10, 1))\n ax.set_yticks(np.arange(0.5, 10, 1))\n ax.set_xticklabels([])\n ax.set_yticklabels([])\n\n for i in range(len(env_list)):\n ax.imshow(env_list[i],cmap='binary')\n plt.pause(0.05)", "def add_frame(\n self : \"animation\",\n frame : \"matplotlib.figure.Figure\",\n facecolor : \"str\" = 'white'\n ):\n self._make_animation_from_raw_list([frame], facecolor=facecolor)", "def animate(i, subplot, xs, ys, cs):\n print(f\" Drawing generation {i}/{len(xs)} \\r\", end=\"\", flush=True)\n subplot.clear()\n subplot.set_title(f\"Gen {i}\")\n subplot.set_ylim(0, 1)\n subplot.set_xlim(0, 1)\n subplot.scatter(xs[i], ys[i], color=cs[i], s=individual_size)", "def run(self):\n if len(self.data_frames) == 0:\n raise RuntimeError(\"No frames data found!\")\n if self._animation is None:\n def infinite_iterator():\n while True: yield 8\n fig = plt.figure(figsize=(6, 8), dpi=160)\n ax = p3.Axes3D(fig)\n ax.axis('off')\n ax.grid(False)\n ax.view_init(-10, 85)\n ax.set_title('Avatar')\n ax.set_zlim3d([100, -60])\n ax.disable_mouse_rotation()\n # create dummy point\n lines = [ax.plot([x], [y], [z], 'k.', animated=True)[0]\n for x, y, z in self._frames.pop()]\n self._animation = animation.FuncAnimation(fig=fig,\n func=lambda num, lines: self._update(lines),\n frames=infinite_iterator(),\n fargs=(lines,),\n interval=int(1000. / self._fps),\n repeat=False,\n repeat_delay=None,\n blit=True)\n plt.ioff(); plt.show(block=False)", "def _write_frame(self : \"animation\",\n frame : \"np.ndarray\"\n ):\n self._writer.append_data(frame)\n self._frame_number += 1\n self._prevFrame = frame", "def next_frame(self, framenumber):\r\n \r\n if Animate==True:\r\n self.__text0.set_text(\"f={:4d}\".format(framenumber))\r\n patches = [self.__text0]\r\n timeallball=[]\r\n for ball in self.__ballList:\r\n time1ball=[]\r\n balls=self.__ballList.copy()\r\n balls.append(self.__cont)\r\n balls.remove(ball)\r\n for otherball in balls:\r\n othertime=ball.time_to_collision(otherball)\r\n if isinstance(othertime,int)==True or isinstance(othertime,float)==True:\r\n time1ball.append(othertime)\r\n if othertime==0:\r\n ball.collide(otherball)\r\n break\r\n timeallball.append(min(time1ball))\r\n if min(time1ball)==0:\r\n break\r\n for b in self.__ballList:\r\n b.move(min(timeallball))\r\n patches.append(b.get_patch())\r\n Gas.timepassed+=min(timeallball)\r\n \r\n #check to see if KE/momentum conserved, and see pressure change\r\n \"\"\"\r\n if Gas.timepassed>5 and Gas.timepassed<10 :\r\n print('Kinetic energy is ', self.kinetic_en())\r\n print('Total momentum is ', self.momentum())\r\n print(self.pressure())\r\n \"\"\" \r\n return patches", "def animate(self, frame):\r\n\r\n self.timer.set_text(str(frame.time + 1))\r\n\r\n # determine new and old objects\r\n pos_dict = {p.id: p.position for p in frame}\r\n prev_items = set(self.shapes.keys())\r\n next_items = set(pos_dict.keys())\r\n\r\n # new nodes\r\n for n_id in next_items - prev_items:\r\n self.add_object(n_id, pos_dict[n_id])\r\n\r\n # updated nodes\r\n for n_id in next_items.intersection(prev_items):\r\n self.move_object(n_id, pos_dict[n_id])\r\n\r\n # exited nodes\r\n for n_id in prev_items - next_items:\r\n self.remove_object(n_id)\r\n\r\n return ([patch for e in self.shapes.values() for patch in e])", "def data_play(Y, visualizer, frame_rate=30):\r\n \r\n\r\n for y in Y:\r\n visualizer.modify(y[None, :])\r\n time.sleep(1./float(frame_rate))", "def animate(self, i):\n try:\n self.lastSpectrum = self.spectrometer.getSpectrum()\n if self.darkReference is not None:\n self.lastSpectrum -= self.darkReference\n if self.whiteReference is not None:\n np.seterr(divide='ignore',invalid='ignore')\n if self.darkReference is not None:\n self.lastSpectrum = self.lastSpectrum / (self.whiteReference-self.darkReference)\n else:\n self.lastSpectrum = self.lastSpectrum / self.whiteReference \n\n self.plotSpectrum(spectrum=self.lastSpectrum)\n except usb.core.USBError as err:\n print(\"The spectrometer was disconnected. Quitting.\")\n self.quitFlag = True\n\n if self.quitFlag:\n self.animation.event_source.stop()\n self.animation = None\n plt.close()", "def animatePreview(loader, boundaries, step):\r\n import plotly.express as px\r\n fig = px.scatter(loader.data[(loader.data['f'] % 10) == 0], \r\n x=\"x\", y=\"y\", \r\n animation_frame=\"f\", animation_group='p', hover_name=\"p\",\r\n range_x=[boundaries[0], boundaries[1]], range_y=[boundaries[2], boundaries[3]],\r\n template=\"plotly_white\", title=\"Animation Preview\")\r\n fig.show()", "def animate(i): \n ax1.clear()\n font_dict = {'family':'sans-serif',\n 'color':'darkred',\n 'size':8}\n for i in range(len(xt)):\n ax1.text(xt[i], yt[i], tt[i], fontdict=font_dict)\n ax1.plot(xs, ys)\n #ax1.scatter(xt, yt, 'yo')\n\n # This is for plotting the coordinates and the class of the detected object\n animated_plot = plt.plot(xt, yt, 'yo')[0]\n animated_plot.set_xdata(xt)\n animated_plot.set_ydata(yt)\n plt.draw()", "def animate_with_numpy_frame_sequence(self, numpy_frame_sequence, frames_per_second=15):\n\n sleep_time = 1/frames_per_second\n for animation_frame in numpy_frame_sequence:\n tic = time.time()\n self.set_image_from_numpy_array(animation_frame)\n self.update()\n toc = time.time()\n frame_generation_time = toc-tic\n if frame_generation_time < sleep_time:\n new_sleep_time = sleep_time - frame_generation_time\n time.sleep(new_sleep_time)\n else:\n pass", "def update_plot(frame):\n global plotdata\n while True:\n try:\n data = q.get_nowait()\n except queue.Empty:\n break\n shift = len(data)\n plotdata = np.roll(plotdata, -shift, axis=0)\n plotdata[-shift:, :] = data\n for column, line in enumerate(lines):\n line.set_ydata(plotdata[:, column])\n return lines", "def plot_animate(self, A):\n fig = plt.figure()\n fig.set_tight_layout(False)\n ax = plt.axes(projection='3d')\n global cnt\n cnt = ax\n\n global cur_A\n global cur_B\n cur_A = A[0]\n cur_B = self.B[0]\n\n def steps(count=1):\n for i in range(count):\n df_A = pd.Series(data=cur_A[i], index=['x', 'y', 'z'])\n df_B = pd.Series(data=cur_B, index=['x', 'y', 'z'])\n x = [df_A.x, df_B.x]\n y = [df_A.y, df_B.y]\n z = [df_A.z, df_B.z]\n cnt.plot(x, y, z)\n\n def animate(frame):\n steps(1)\n return cnt\n anim = animation.FuncAnimation(fig, animate, frames=100)\n plt.show()", "def anim_func(self, i):\n raise NotImplementedError(\n \"anim_func function not reimplemented from base class\")", "def animate(results, draw_func, *args, interval=None):\n plt.figure()\n try:\n for t, state in results.iterrows():\n draw_func(t, state, *args)\n plt.show()\n if interval:\n sleep(interval)\n clear_output(wait=True)\n draw_func(t, state, *args)\n plt.show()\n except KeyboardInterrupt:\n pass", "def animate(self, idx):\n for a in range(self.art_num):\n if idx < len(self.artists[a][0]):\n xc, yc = self.artists[a][0][idx], self.artists[a][1][idx]\n self.lines[a].set_data(self.artists[a][0][:idx],\n self.artists[a][1][:idx])\n self.points[a].set_data(xc, yc)\n self.time_text.set_text(self.time_template % idx)\n return self.lines + self.points + [self.time_text]", "def animateTraj(loader, frame_start, frame_stop, boundaries, wall=False, cor=False, ai=None, path=\"traj_anim.gif\", save=False, step=1, fps=16, title=\"Trajectory Animation\", useFFMPEG=False):\r\n # prepare data for animation\r\n data = []\r\n person = []\r\n colors = []\r\n\r\n p_ = loader.data['p'].unique()\r\n people_count = int(p_[p_ < 1000].max())\r\n print(people_count)\r\n\r\n # load data in data structure for animation\r\n for i in np.arange(frame_start, frame_stop, step):\r\n data.append(loader.frame(i, ret_vel=False, with_id=False)[1])\r\n person.append(loader.frame(i, ret_vel=False, with_id=False)[0])\r\n\r\n #Set the figure for the animation framework\r\n fig = plt.figure(figsize = (10,6))\r\n #creating a subplot \r\n ax1 = fig.add_subplot(1,1,1)\r\n\r\n # figures specds\r\n ax1.set_xlim([boundaries[0], boundaries[1]])\r\n ax1.set_ylim([boundaries[2], boundaries[3]])\r\n\r\n ax1.set_aspect('equal', adjustable='box')\r\n ax1.set_xlabel('x Pos. / cm')\r\n ax1.set_ylabel('y Pos. / cm ')\r\n ax1.set_title(title, loc=\"left\")\r\n\r\n # dataset geometry\r\n if wall:\r\n ax1.vlines(-60, ymin=255, ymax=400, lw=3, color=\"fuchsia\")\r\n ax1.vlines(-60, ymin=-200, ymax=-95, lw=3, color=\"fuchsia\")\r\n\r\n if cor:\r\n # measurement region\r\n ax1.vlines(-200, ymin=0, ymax=180, lw=2, color=\"orange\")\r\n ax1.vlines(200, ymin=0, ymax=180, lw=2, color=\"orange\")\r\n\r\n # Walls\r\n ax1.hlines(0, xmin=-300, xmax=300, lw=2, color=\"fuchsia\")\r\n ax1.hlines(180, xmin=-300, xmax=300, lw=2, color=\"fuchsia\")\r\n\r\n #initialize line objects for plotting\r\n lines = []\r\n vals = []\r\n\r\n if ai is None:\r\n for i in range(people_count):\r\n lobj = ax1.plot([],[], lw=2)[0]\r\n lines.append(lobj)\r\n vals.append([[], []])\r\n else:\r\n for i in range(people_count):\r\n if (i+1001) in ai:\r\n lobj = ax1.plot([],[], color=\"black\", lw=2)[0]\r\n else:\r\n lobj = ax1.plot([],[], color=\"red\", lw=2)[0]\r\n lines.append(lobj)\r\n vals.append([[], []])\r\n\r\n def init():\r\n for line in lines:\r\n line.set_data([],[])\r\n return lines\r\n\r\n #Animation function that is called for each frame\r\n def animate(i):\r\n \r\n #update data for plotting\r\n for (per, dat) in zip(person[i], data[i]):\r\n\r\n if per > 1000:\r\n per -= 1000\r\n\r\n vals[int(per-1)][0].append(dat[0])\r\n vals[int(per-1)][1].append(dat[1])\r\n \r\n #write new data to line objects\r\n for lnum, line in enumerate(lines):\r\n line.set_data(vals[lnum][0], vals[lnum][1])\r\n return lines\r\n\r\n frames = int(np.floor((frame_stop - frame_start)/step))\r\n ani = animation.FuncAnimation(fig = fig, func = animate, frames = frames, interval = int(step*1000/fps), blit=True) \r\n plt.close(fig)\r\n \r\n # save animation by writing frames to .mp4 or .gif via writer\r\n if save:\r\n if useFFMPEG:\r\n writer = animation.FFMpegWriter(fps=fps/step, extra_args=['-vcodec', 'libx264'])\r\n else:\r\n writer = animation.PillowWriter(fps=fps/step, extra_args=['-vcodec', 'libx264'])\r\n ani.save(path, writer=writer)\r\n return ani", "def _animation_step(self, par_dict):\n\n t0 = time.time()\n dt = par_dict[\"dt\"]\n controller = par_dict[\"controller\"]\n integrator = par_dict[\"integrator\"]\n if controller is not None:\n _, _, tau = controller.get_control_output(\n meas_pos=self.x[:self.plant.dof],\n meas_vel=self.x[self.plant.dof:],\n meas_tau=np.zeros(self.plant.dof),\n meas_time=self.t)\n else:\n tau = np.zeros(self.plant.n_actuators)\n self.step(tau, dt, integrator=integrator)\n ee_pos = self.plant.forward_kinematics(self.x[:self.plant.dof])\n ee_pos.insert(0, self.plant.base)\n ani_plot_counter = 0\n for link in range(self.plant.n_links):\n self.animation_plots[ani_plot_counter].set_data(\n [ee_pos[link][0], ee_pos[link+1][0]],\n [ee_pos[link][1], ee_pos[link+1][1]])\n ani_plot_counter += 1\n self.animation_plots[ani_plot_counter].set_data(ee_pos[link+1][0],\n ee_pos[link+1][1])\n ani_plot_counter += 1\n\n set_arrow_properties(self.tau_arrowarcs[link],\n self.tau_arrowheads[link],\n float(np.squeeze(tau)),\n ee_pos[link][0],\n ee_pos[link][1])\n t = float(self.animation_plots[ani_plot_counter].get_text()[4:])\n t = round(t+dt, 3)\n self.animation_plots[ani_plot_counter].set_text(f\"t = {t}\")\n\n # if the animation runs slower than real time\n # the time display will be red\n if time.time() - t0 > dt:\n self.animation_plots[ani_plot_counter].set_color(\"red\")\n else:\n self.animation_plots[ani_plot_counter].set_color(\"black\")\n return self.animation_plots + self.tau_arrowarcs + self.tau_arrowheads", "def animate_1d(x, ys, animate_times, pause, labels=None):\n fig = plt.figure()\n min_y = min(min(np.amin(vals.real) for vals in y) for y in ys)\n max_y = max(max(np.amax(vals.real) for vals in y) for y in ys)\n ax = plt.axes(xlim=(min(x), max(x)), ylim=(min_y, max_y))\n if labels is None:\n labels = []\n labels.extend([\"Plot {}\".format(i) for i in range(len(labels), len(ys))])\n lines = [ax.plot([], [], lw=2, color=color, label=label)[0]\n for _, color, label in zip(range(len(ys)),\n cycle(['r', 'b', 'g', 'k', 'm', 'c', 'y']),\n labels)]\n time_text = ax.text(0.02, 0.95, '', transform=ax.transAxes)\n\n # initialization function: plot the background of each frame\n def init():\n for line in lines:\n line.set_data([], [])\n time_text.set_text('')\n return (*lines), time_text\n\n # animation function. This is called sequentially\n def animate(i):\n for line, y in zip(lines, ys):\n line.set_data(x, y[i].real)\n time_text.set_text(\"Solution at time={:.4}\".format(animate_times[i]))\n return (*lines), time_text\n\n # call the animator. blit=True means only re-draw the parts that have changed.\n\n # because else the object will not get creates and nothing will show! ...\n # noinspection PyUnusedLocal\n anim = animation.FuncAnimation(fig, animate, init_func=init,\n frames=len(animate_times), interval=pause, blit=True)\n plt.legend()\n plt.show()", "def show_animation():\n fig = plt.figure()\n ax = plt.axes(xlim=(can.C.x-can.R, can.R), ylim=(can.C.y-can.R, can.R))\n plt.title(\"Canister with %d particles\\nProducing force of %s\" % (len(can.listOfParticles), can.calP()))\n line, = ax.plot([], [], 'bo', lw=2)\n\n print \"Starting new animation\"\n print \"Canister with radius: \", can.R\n print \"Amount of particles: \", len(can.listOfParticles)\n print \"Force from particles: \", can.calP()\n\n # Here starts the actual animation\n def init():\n circle = plt.Circle((can.C.x, can.C.y), can.R, color='grey')\n fig.gca().add_artist(circle)\n return line,\n\n # Each new frame will plot the current system, then update it for the next frame\n def animate(i):\n x = []\n y = []\n for p in can.listOfParticles:\n plt.plot(p.P.x, p.P.y, '*', color='black')\n x.append(p.P.x)\n y.append(p.P.y)\n line.set_data(x, y)\n can.update(1)\n return line,\n\n anim = animation.FuncAnimation(fig, animate, init_func=init,\n interval=100, blit=True)\n plt.show()", "def render(self, pause=0.05, frame=\"bishop\", save_frames=False):\n if self.fig == None:\n self.init_render()\n\n points1, points2 = self.get_points_on_arc(num_points=100)\n\n while self.ax.lines:\n self.ax.lines.pop() # delete previous plots\n self.ax.plot(points1[:,0], points1[:,1], points1[:,2], label=\"Segment 1\", c=\"black\", linewidth=3)\n self.ax.plot(points2[:,0], points2[:,1], points2[:,2], label=\"Segment 2\", c=\"grey\", linewidth=2)\n self.ax.plot([self.goal[0]], [self.goal[1]], [self.goal[2]], label=\"Goal\", c=\"lime\", marker=\"*\", markersize=15)\n self.ax.legend() # display legend\n\n if frame == \"bishop\":\n tangent_vec1 = self.tangent_vec_bishop1\n normal_vec1 = self.normal_vec_bishop1\n binormal_vec1 = self.binormal_vec_bishop1\n tangent_vec2 = self.tangent_vec_bishop2\n normal_vec2 = self.normal_vec_bishop2\n binormal_vec2 = self.binormal_vec_bishop2\n elif frame == \"frenet\":\n tangent_vec1 = self.tangent_vec_frenet1\n normal_vec1 = self.normal_vec_frenet1\n binormal_vec1 = self.binormal_vec_frenet1\n tangent_vec2 = self.tangent_vec_frenet2\n normal_vec2 = self.normal_vec_frenet2\n binormal_vec2 = self.binormal_vec_frenet2\n\n # add dynamic coordinate frenet frame of segment 1 tip\n while len(self.ax.artists) > 3:\n self.ax.artists.pop() # delete previous arrows, except base frame\n atangent1 = Arrow3D([self.tip_vec1[0], self.tip_vec1[0]+self.arrow_len*tangent_vec1[0]],\n [self.tip_vec1[1], self.tip_vec1[1]+self.arrow_len*tangent_vec1[1]],\n [self.tip_vec1[2], self.tip_vec1[2]+self.arrow_len*tangent_vec1[2]],\n arrowstyle=\"-|>\", lw=1, mutation_scale=10, color=\"b\")\n anormal1 = Arrow3D([self.tip_vec1[0], self.tip_vec1[0]+self.arrow_len*normal_vec1[0]],\n [self.tip_vec1[1], self.tip_vec1[1]+self.arrow_len*normal_vec1[1]],\n [self.tip_vec1[2], self.tip_vec1[2]+self.arrow_len*normal_vec1[2]],\n arrowstyle=\"-|>\", lw=1, mutation_scale=10, color=\"r\")\n abinormal1 = Arrow3D([self.tip_vec1[0], self.tip_vec1[0]+self.arrow_len*binormal_vec1[0]],\n [self.tip_vec1[1], self.tip_vec1[1]+self.arrow_len*binormal_vec1[1]],\n [self.tip_vec1[2], self.tip_vec1[2]+self.arrow_len*binormal_vec1[2]],\n arrowstyle=\"-|>\", lw=1, mutation_scale=10, color=\"g\")\n self.ax.add_artist(atangent1)\n self.ax.add_artist(anormal1)\n self.ax.add_artist(abinormal1)\n # add dynamic coordinate frenet frame of segment 2 tip\n atangent2 = Arrow3D([self.tip_vec2[0], self.tip_vec2[0]+self.arrow_len*tangent_vec2[0]],\n [self.tip_vec2[1], self.tip_vec2[1]+self.arrow_len*tangent_vec2[1]],\n [self.tip_vec2[2], self.tip_vec2[2]+self.arrow_len*tangent_vec2[2]],\n arrowstyle=\"-|>\", lw=1, mutation_scale=10, color=\"b\")\n anormal2 = Arrow3D([self.tip_vec2[0], self.tip_vec2[0]+self.arrow_len*normal_vec2[0]],\n [self.tip_vec2[1], self.tip_vec2[1]+self.arrow_len*normal_vec2[1]],\n [self.tip_vec2[2], self.tip_vec2[2]+self.arrow_len*normal_vec2[2]],\n arrowstyle=\"-|>\", lw=1, mutation_scale=10, color=\"r\")\n abinormal2 = Arrow3D([self.tip_vec2[0], self.tip_vec2[0]+self.arrow_len*binormal_vec2[0]],\n [self.tip_vec2[1], self.tip_vec2[1]+self.arrow_len*binormal_vec2[1]],\n [self.tip_vec2[2], self.tip_vec2[2]+self.arrow_len*binormal_vec2[2]],\n arrowstyle=\"-|>\", lw=1, mutation_scale=10, color=\"g\")\n self.ax.add_artist(atangent2)\n self.ax.add_artist(anormal2)\n self.ax.add_artist(abinormal2)\n# goal_vec = (self.goal-self.tip_vec2)/np.linalg.norm(self.goal-self.tip_vec2)\n# agoal = Arrow3D([self.tip_vec2[0], self.tip_vec2[0]+self.arrow_len*goal_vec[0]],\n# [self.tip_vec2[1], self.tip_vec2[1]+self.arrow_len*goal_vec[1]],\n# [self.tip_vec2[2], self.tip_vec2[2]+self.arrow_len*goal_vec[2]],\n# arrowstyle=\"fancy\", lw=0.5, mutation_scale=15, color=\"magenta\")\n# self.ax.add_artist(agoal)\n mypause(pause) # pause video without losing focus of current window\n # save frames of plot if asked\n if save_frames == True:\n filename = \"figures/frame\"+str(self.frame)[1:]+\".png\"\n self.fig.savefig(filename)\n self.frame += 1", "def animate(i):\n plot_viz_x = []\n plot_viz_y = []\n for node in graph:\n node_x, node_y = node.position\n agent_count = node.agent_count\n\n for _ in range(0, agent_count):\n plot_viz_x.append(self._get_visual_position(node_x))\n plot_viz_y.append(self._get_visual_position(node_y))\n\n self.plots[0].set_data(plot_viz_x, plot_viz_y)\n self.plots[1].set_data(leader.visual[0], leader.visual[1])\n return self.plots", "def add_frames(\n self : \"animation\",\n frameList : \"list[matplotlib.figure.Figure]\",\n facecolor : \"str\" = 'white'\n ):\n self._make_animation_from_raw_list(frameList, facecolor=facecolor)", "def animate(self, save_path=None):\n interval = 100 # 1 frame per interval ms\n # frames = int(len(self.map_paths) / self.frame_skip) # times to call updatefig\n frames = int(min(len(self.map_paths) / self.frame_skip, len(self.experienced_states_kancks_paths) / self.frame_skip / 2))\n frames -= 1 # because of my bad logging\n blit = True # acceralate computation\n ani = animation.FuncAnimation(self.fig, self.updateifig, frames=frames,\n interval=interval, blit=blit)\n if save_path is not None:\n ani.save(os.path.join(save_path, self.save_name), writer='ffmpeg')\n else:\n plt.show()", "def animate(self,i): # Animate function is called iteratively\n\n for x in self.list_patient: # Traversing through each patient\n x.Update()\n # updating each\n self.Computational_part()\n # invoking obj's function\n self.boot_induvidual_plot()\n # invoking obj's function", "def grid_animation(self, steps, figure_size=(12, 12), speed=100):\r\n\r\n steps -= 1\r\n x = self.seed\r\n\r\n fig, ax = plt.subplots(figsize=figure_size)\r\n ax.grid(False)\r\n ax.get_xaxis().set_visible(False)\r\n ax.get_yaxis().set_visible(False)\r\n color_map = matplotlib.colors.ListedColormap(['white', 'black'])\r\n im = plt.imshow(x[1:-1:1, 1:-1:1], interpolation='nearest', cmap=color_map, animated=True)\r\n counter = 0\r\n\r\n def update_figure(*args):\r\n nonlocal x, counter, fig\r\n\r\n counter += 1\r\n x, stats = self.update_grid(x)\r\n plt.title(self.title + ' | Step ' + str(counter), fontsize=14)\r\n im.set_array(x[1:-1:1, 1:-1:1])\r\n\r\n return im, # why is this comma necessary?\r\n\r\n ani = animation.FuncAnimation(fig, update_figure, frames=steps,\r\n interval=speed, blit=False, repeat=False)\r\n\r\n return ani", "def drawPathToFrame( oVideo, oPathXY, iFrame=1, iFrameSize=(40,40) ):\n oPathXY_t = oPathXY[:iFrame,:]\n showImage( oVideo[...,iFrame], 'Pot do okvirja %d' % iFrame )\n for i in range(1,oPathXY_t.shape[0]):\n plt.plot(oPathXY_t[i-1:i+1,0],oPathXY_t[i-1:i+1,1],'--r')\n if i==1 or (i%5)==0:\n plt.plot( oPathXY_t[i,0],oPathXY_t[i,1],'xr',markersize=3)\n \n dx = iFrameSize[0]/2; dy = iFrameSize[1]/2\n plt.plot( (oPathXY_t[-1,0]-dx,oPathXY_t[-1,0]+dx),(oPathXY_t[-1,1]+dy,oPathXY_t[-1,1]+dy),'-g') \n plt.plot( (oPathXY_t[-1,0]+dx,oPathXY_t[-1,0]+dx),(oPathXY_t[-1,1]-dy,oPathXY_t[-1,1]+dy),'-g') \n plt.plot( (oPathXY_t[-1,0]-dx,oPathXY_t[-1,0]-dx),(oPathXY_t[-1,1]-dy,oPathXY_t[-1,1]+dy),'-g')\n plt.plot( (oPathXY_t[-1,0]-dx,oPathXY_t[-1,0]+dx),(oPathXY_t[-1,1]-dy,oPathXY_t[-1,1]-dy),'-g')", "def anim_scatter_plot(points_list, values, \n fname=\"anim_scatter.mpg\", fps=2, *args, **kwargs):\n print \"Genrating temp images\"\n for idx, pts in enumerate(points_list):\n print \"\\tPlot %i of %i\" % (idx, len(points_list))\n scatter_plot(pts, values, \"_tmp_%i.png\" % idx, *args, **kwargs)\n print \"Creating animation\" \n os.system(\"mencoder 'mf://_tmp_*.png' -mf type=png:fps=%i -ovc\\\n lavc -lavcopts vcodec=wmv2 -oac copy -o %s\" % (fps, fname))\n print \"Removing temp files\"\n os.system(\"rm -f _tmp_*.png\")", "def display_frames_as_gif(frames, video_name):\n Writer = animation.writers['ffmpeg']\n writer = Writer(fps=15, metadata=dict(artist='Me'), bitrate=1800)\n #plt.figure(figsize=(frames[0].shape[1] / 72.0, frames[0].shape[0] / 72.0), dpi = 72)\n patch = plt.imshow(frames[0])\n plt.axis('off')\n\n def animate(i):\n patch.set_data(frames[i])\n\n anim = animation.FuncAnimation(plt.gcf(), animate, frames = len(frames), interval=50)\n# display(display_animation(anim, default_mode='loop'))\n anim.save(result_floder + '/' + video_name, writer=writer)", "def cycle(effect):\n\n def animate(thing):\n frames = (list(frame) for frame in effect(thing))\n yield from cycle(frames)\n return animate", "def runFrame(self):\n self._drawFrame(self._advanceTime())", "def update_anim(frame, self):\n self.step()\n self.im.set_data(self.array)\n self.im2.set_data(self.array2)", "def onTimer(self, evt): \r\n # restore the clean background, saved at the beginning \r\n self.canvas.restore_region(self.bg) \r\n # update the data \r\n self.r1 = self.r1[1:] + [t1.getRatio()] \r\n self.r2 = self.r2[1:] + [t2.getRatio()]\r\n self.v1 = self.v1[1:] + [t1.getSpeed()] \r\n self.v2 = self.v2[1:] + [t2.getSpeed()] \r\n \r\n # update the plot \r\n self.pr1.set_ydata(self.r1) \r\n self.pr2.set_ydata(self.r2)\r\n self.pv1.set_ydata(self.v1) \r\n self.pv2.set_ydata(self.v2) \r\n # just draw the \"animated\" objects \r\n self.ax.draw_artist(self.pr1) \r\n self.ax.draw_artist(self.pr2)\r\n self.ax.draw_artist(self.pv1) \r\n self.ax.draw_artist(self.pv2) \r\n self.canvas.blit(self.ax.bbox)", "def animate(self, i):\n # save the cooridnates of the first particle as local vaiables\n self.time_text.set_text(\"Time = %.1f\" % self.sy.short_timer[i])\n self.KE_text.set_text(\"KE = %.6f\" % self.sy.Total_Energies[\"Kinetic\"][i])\n self.GPE_text.set_text(\n \"GPE = %.6f\" % self.sy.Total_Energies[\"Gravitational\"][i]\n )\n self.energy_text.set_text(\"E = %.6f\" % self.sy.Total_Energies[\"Total\"][i])\n plt.tight_layout() # This supposedly makes stops the label from falling off.\n a = self.sy.coordinate_grid[0, 0, i]\n b = self.sy.coordinate_grid[1, 0, i]\n if self.move_with: # inertial frame\n self.line.set_data(\n self.sy.coordinate_grid[0, :, i], self.sy.coordinate_grid[1, :, i]\n )\n self.galactic_centre.set_data(a, b)\n self.impactor.set_data(\n self.sy.coordinate_grid[0, -1, i], self.sy.coordinate_grid[1, -1, i]\n )\n if self.move_with: # comoving frame\n self.line.set_data(\n self.sy.coordinate_grid[0, :, i] - a,\n self.sy.coordinate_grid[1, :, i] - b,\n )\n self.galactic_centre.set_data(0.0, 0.0)\n self.impactor.set_data(\n self.sy.coordinate_grid[0, -1, i] - a,\n self.sy.coordinate_grid[1, -1, i] - b,\n )\n return (\n self.line,\n self.galactic_centre,\n self.impactor,\n self.time_text,\n self.KE_text,\n self.GPE_text,\n self.energy_text,\n )", "def create_animations(figure, filename=None, sharing=\"public\", auto_open=True):\n # This function is no longer needed since plot now supports figures with\n # frames. Delegate to this implementation for compatibility\n return plot(figure, filename=filename, sharing=sharing, auto_open=auto_open)", "def _plot(\n self, \n frame_idx: int, \n scores: List[float], \n losses: List[float],\n ):\n clear_output(True)\n plt.figure(figsize=(20, 5))\n plt.subplot(131)\n plt.title('frame %s. score: %s' % (frame_idx, np.mean(scores[-10:])))\n plt.plot(scores)\n plt.subplot(132)\n plt.title('loss')\n plt.plot(losses)\n plt.show()", "def showVideo( oVideo, oPathXY=np.array([]) ):\n global oVideo_t, iFrame, oPathXY_t\n fig = plt.figure()\n # prikazi prvi okvir\n iFrame = 0\n oPathXY_t = oPathXY\n oVideo_t = oVideo\n print(oVideo.shape)\n im = plt.imshow(oVideo[...,iFrame], cmap=plt.get_cmap('Greys_r'))\n # definiraj funkcijo za osvezevanje prikaza\n def updatefig(*args):\n global oVideo_t, iFrame, oPathXY_t\n iFrame = ( iFrame + 1 ) % oVideo_t.shape[-1]\n im.set_array( oVideo_t[...,iFrame] ) \n if iFrame < oPathXY.shape[0]:\n plt.plot( oPathXY[iFrame,0], oPathXY[iFrame,1], 'xr' ,markersize=3 ) \n return im,\n # prikazi animacijo poti\n ani = animation.FuncAnimation(fig, updatefig, interval=25, blit=True)\n plt.show()", "def animate_scatters(iteration, ax, pos):\n \n # Change viewing angle\n ax.view_init(pos[iteration][0], pos[iteration][1])", "def _update_anim(self):\n if self._skip_frames > 1:\n # Do not render while _skip_frames is > 1\n self._skip_frames -= 1\n else:\n # Render frame\n self._visualization.taskMgr.step()\n # Calculate number of frames that need to be skipped\n self._skip_frames = int(1 / self._fps / self._dt)", "def fdplot(self, imx):\n fig = plt.figure()\n maxval = np.max(imx)\n ims = list(map(lambda im: [plt.imshow(np.fabs(im),norm=colors.Normalize(0.0,maxval))], imx))\n animation = anim.ArtistAnimation(fig,ims,interval=50)\n plt.show()", "def animate(times: np.ndarray, angles: np.ndarray) -> None:\n x = np.sin(angles)\n y = -np.cos(angles)\n\n fig = plt.figure()\n ax = fig.add_subplot(111, autoscale_on=False, xlim=(-2, 2), ylim=(-2, 2))\n ax.grid()\n\n line, = ax.plot([], [], \"o-\", lw=2)\n\n def init():\n line.set_data([], [])\n return (line,)\n\n def animate(i):\n thisx = [0, x[i]]\n thisy = [0, y[i]]\n\n line.set_data(thisx, thisy)\n return (line,)\n\n ani = animation.FuncAnimation(\n fig, animate, np.arange(1, len(y)), interval=25, blit=True, init_func=init\n )\n plt.show()", "def _append_frame(self, observation):\n if self._counter % self._record_every == 0:\n self._frames.append(self._render_frame(observation[self.visual_key]))", "def draw(self, frame):\n for prediction in self.predictions:\n prediction.draw(frame)", "def plot_frames(beads, sim, ti, tf, savebase, save_eps):\n \n ### normalize variables for plotting purposes\n \n lx = sim.lx/sim.bl\n ly = sim.ly/sim.bl\n \n ### set general plot properties\n\n savebase += 'eps_' + str(sim.eps) + '_fp_' + str(sim.fp) + '_areak_' + str(sim.areak) + '/'\n os.system(\"mkdir -p \" + savebase)\n quant_steps = 2056\n norm = mpl.colors.Normalize(vmin=0, vmax=sim.ncells) \n downlim = -2\n uplim = lx+2\n num_ticks = 5\n ax_len = 1.0 # Length of one subplot square box\n ax_b = 0.0 # Beginning/offset of the subplot in the box\n ax_sep = 0.0 # Separation length between two subplots\n total_subplots_in_x = 1 # Total number of subplots \n fig = plt.figure()\n subp = misc_tools.Subplots(fig, ax_len, ax_sep, ax_b, total_subplots_in_x) \n ax0 = subp.addSubplot()\n \n ### plot the frames\n \n for step in range(ti, tf):\n \n ### normalize variables for plotting purposes\n \n beads.xi[step, 0, :] /= sim.bl\n beads.xi[step, 1, :] /= sim.bl\n \n time = step*sim.dt\n print 'Step / Total : ', step, tf\n \n ### plot \n\n subp = misc_tools.Subplots(fig, ax_len, ax_sep, ax_b, total_subplots_in_x) \n ax0 = subp.addSubplot()\n \n line0 = ax0.scatter(beads.xi[step, 0, :], beads.xi[step, 1, :], s=1, c=beads.cid, \\\n cmap=plt.cm.get_cmap('jet',quant_steps), \\\n edgecolors='None', alpha=0.7, vmin=0, vmax=sim.ncells, \\\n norm=norm, rasterized=True)\n \n ax0.axis('scaled')\n \n ### title\n \n ax0.set_title(\"$t/\\\\tau_{D}$ = \" + \"{0:.2f}\".format(time/sim.tau_D) + \\\n \", $t/\\\\tau_{A}$ = \" + \"{0:.2f}\".format(time/sim.tau_A), fontsize=30)\n \n ### labels\n \n ax0.set_xlabel(\"$x/r_{0}$\", fontsize=40)\n ax0.set_ylabel(\"$y/r_{0}$\", fontsize=40)\n\n ### limits\n\n ax0.set_xlim((downlim, uplim))\n ax0.set_ylim((downlim, uplim))\n \n ### ticks\n \n ax0.xaxis.set_ticks(np.linspace(0, uplim, num_ticks, endpoint=True))\n ax0.yaxis.set_ticks(np.linspace(0, uplim, num_ticks, endpoint=True))\n ax0.tick_params(axis='both', which='major', labelsize=30)\n \n ### save\n\n savepath1 = savebase + \"frame-\" + \"{0:05d}\".format(int(step)) + \".png\"\n if save_eps:\n savepath2 = savebase + \"frame-\" + \"{0:05d}\".format(int(step)) + \".eps\"\n \n plt.savefig(savepath1, dpi=200, bbox_inches='tight', pad_inches=0.08)\n if save_eps:\n plt.savefig(savepath2, dpi=200, bbox_inches='tight', pad_inches=0.08) \n fig.clf() \n \n return", "def _get_frame(frame_index, plots):\n\n # TODO Using the indices of the self.frames, plot in correct location.\n # Okay right now there is a problem where it's unknown whether the set of coordinates\n # is a line or a dot -- that info got lost up there\n\n for amb_index in range(len(self.frames[frame_index])):\n xs = self.frames[frame_index][amb_index][0]\n ys = self.frames[frame_index][amb_index][1]\n\n # if len(xs) > 1:\n # if xs[0] == xs[1]:\n # plots[amb_index][1].set_data([xs[0]], [ys[0]])\n # if xs[-2] == xs[-1]:\n # plots[amb_index][1].set_data([xs[-1]], [ys[-1]])\n\n plots[amb_index][0].set_data(xs, ys)\n\n print(plots[len(self.ambulance_locations)])\n\n return plots,", "def animation(self, freq=100):\n if (self.current_time - self.timer) > freq:\n if self.index < (len(self.image_list) - 1):\n self.index += 1\n else:\n self.index = 0\n self.timer = self.current_time\n self.image = self.image_list[self.index]", "def random_walk_draw(self,num_plots,animated=False,show=True):\n\t\t\n\t\tt_x_arrays = []\n\t\tt_max = self.n\n\t\tfor _ in range(num_plots):\n\t\t\tcurrent_x = self.x_initial\n\t\t\tx_array = [current_x]\n\t\t\tt_array = range(t_max + 1)\n\t\t\tsteps = self._random_walk_simulation()\n\t\t\tfor s in steps:\n\t\t\t\tcurrent_x += s\n\t\t\t\tx_array.append(current_x)\n\t\t\tt_x_arrays.append( [x_array,t_array] )\n\t\t\n\t\t\n\t\tfig = plt.figure('Random walk simulation')\n\t\tax = fig.add_subplot(1,1,1)\n\t\tax.set_ylim([(round(min(x_array) - np.sqrt(self.n)*3)),round(max(x_array) + np.sqrt(self.n)*3)])\n\t\tax.set_xlim([-(round(np.sqrt(self.n))),self.n+(round(np.sqrt(self.n)))])\n\t\t\n\t\tif animated == True: # THIS CASE CURRENTLY HAS BUG FOR SOME REASON. CODE IS IDENTICAL TO 2D ANIMATION?\n\t\t\tfig.suptitle('Simulation of 1D random walk, live')\n\t\t\tself.index = 0\n\t\t\tdef update(i):\n\t\t\t\tax.clear()\n\t\t\t\tax.set_ylim([(round(min(x_array) - np.sqrt(self.n)*3)), round(max(x_array) + np.sqrt(self.n)*3)])\n\t\t\t\tax.set_xlim([-(round(np.sqrt(self.n))), self.n+(round(np.sqrt(self.n)))])\n\t\t\t\tfor i in t_x_arrays:\n\t\t\t\t\tx_vals,t_vals = i \n\t\t\t\t\tax.plot(t_vals[:self.index], x_vals[:self.index])\n\t\t\t\tself.index += 1\n\t\t\ta = anim.FuncAnimation(fig, update, frames=self.n, repeat=False,interval=10)\n\t\telse:\n\t\t\tfig.suptitle('Simulation of 1D random walk, static')\n\t\t\tfor i in t_x_arrays:\n\t\t\t\tx_vals,t_vals = i\n\t\t\t\tax.plot(t_vals, x_vals)\n\t\t\t\n\t\tif show == True:\n\t\t\tplt.show()", "def do_animation(self, save=False):\n\n #fig, self.ax = plt.subplots()\n fig1 = plt.figure(constrained_layout=False)\n spec1 = gridspec.GridSpec(ncols=11, nrows=12, figure=fig1)\n self.ax = fig1.add_subplot(spec1[0:6, :])\n self.ax2 = fig1.add_subplot(spec1[7:, 0:5])\n self.ax3 = fig1.add_subplot(spec1[7:, 6:])\n #self.ax3 = fig1.add_subplot(spec1[3, ])\n for s in ['top','bottom','left','right']:\n self.ax.spines[s].set_linewidth(2)\n self.ax.set_aspect('equal', 'box')\n self.ax.set_xlim(0, self.box_length)\n self.ax.set_ylim(0, self.box_length/2)\n self.ax2.set_xlim(0, 10000)\n self.ax2.set_ylim(0, self.n+10)\n self.ax3.set_xlim(0, 10000)\n self.ax3.set_ylim(0, self.n+10)\n self.ax.xaxis.set_ticks([])\n self.ax.yaxis.set_ticks([])\n self.ax2.xaxis.set_ticks([])\n self.ax2.yaxis.set_ticks([])\n self.ax3.yaxis.set_ticks([50,100,150])\n self.ax3.xaxis.set_ticks([])\n\n #self.ax3.yaxis.set_ticks([50,100,150])\n #followed by '+str(social_dist)+'% people.\n anim = animation.FuncAnimation(fig1, self.animate, init_func=self.init, frames=10000, interval=2, blit=False)\n\n if save:\n Writer = animation.writers['ffmpeg']\n writer = Writer(fps=100, bitrate=1800)\n anim.save('Final_'+str(self.social_dist)+'_social.mp4', writer=writer)\n plt.close()\n else:\n plt.show()", "def animation(self, Y=None, file_name=None, save_file=None, figure_size=5, scale=1, elevation=30, num_frame=700):\n import imageio\n from matplotlib import pyplot as plt, animation\n from mpl_toolkits.mplot3d import Axes3D\n plt.switch_backend(\"agg\") # for compatibility\n\n coordinates = self.solver.coordinates\n dim = coordinates.shape[1]\n if dim != 3:\n raise ValueError(\"Can't animate %dD data\" % dim)\n\n if file_name:\n if not (Y is None):\n raise ValueError(\"Evaluation data and file should not be provided at the same time\")\n with open(file_name, \"r\") as fin:\n Y = [line.strip() for line in fin]\n elif Y is None:\n Y = [\"unknown\"] * self.graph.num_vertex\n Y = np.asarray(Y)\n\n mean = np.mean(coordinates, axis=0)\n std = np.std(coordinates, axis=0)\n inside = np.abs(coordinates - mean) < self.OUTLIER_THRESHOLD * std\n indexes, = np.where(np.all(inside, axis=1))\n # discard outliers\n coordinates = coordinates[indexes]\n Y = Y[indexes]\n\n settings = []\n degrees = np.linspace(0, 360, num_frame, endpoint=False)\n for degree in degrees:\n settings.append((coordinates, Y, degree, figure_size, scale, elevation))\n pool = multiprocessing.Pool(self.solver.num_worker + self.solver.num_sampler)\n frames = pool.map(render_animation, settings)\n logger.warning(\"save animation to `%s`\" % save_file)\n imageio.mimsave(save_file, frames, fps=num_frame / 70, subrectangles=True) # 70 seconds\n\n return {}", "def run_anime(self, inval=10, rep=True, blitit=False):\n ani = animation.FuncAnimation(self.fig, self.animate,\n len(self.artists[0][0]), repeat=rep,\n interval=inval, blit=blitit,\n init_func=self.init_anime)\n plt.show()", "def callback(indata, frames, time, status):\n\tif status:\n\t\tprint(status, flush=True)\n\n\tif (plotting):\n\t\ttime = np.linspace(time.inputBufferAdcTime, time.currentTime, 512)\n\t\ttimes.put(time)\n\t\tqueue.put(np.frombuffer(indata, dtype='int32'))\n\n\telse:\n\t\tqueue.put(indata)", "def animate(self, i):\n self.advance_animation(0.01)\n self.get_status()\n self.ax2.clear()\n self.ax3.clear()\n self.ax2.plot(np.ones(len(self.healthy))*self.total_beds, 'k--')\n samples = range(0, len(self.healthy))\n self.ax2.stackplot(samples, self.sick, self.healthy, self.recovered, self.dead, labels=['Sick: ' +str(self.sick[-1]),'Healthy: '+str(self.healthy[-1]),'Recovered: '+str(self.recovered[-1]), 'Dead: '+str(self.dead[-1])], colors=['orangered','forestgreen', 'deepskyblue', 'black'])\n #self.ax2.legend(bbox_to_anchor=(1.04,0), loc=\"lower left\", borderaxespad=0)\n self.ax2.legend(loc='upper center', bbox_to_anchor=(0.5, 1.05), ncol=2, fancybox=True, fontsize=6, shadow=True)\n self.ax2.xaxis.set_ticks([])\n self.ax2.set_ylim(0, self.n+30)\n self.ax3.plot(samples, self.healthy,'forestgreen', label = 'Healthy')\n self.ax3.plot(samples, self.sick, 'orangered', label = 'Sick')\n self.ax3.plot(samples, self.recovered, 'deepskyblue', label = 'Recovered')\n self.ax3.plot(samples, self.dead, 'black', label = 'Dead')\n self.ax3.legend(loc='upper center', bbox_to_anchor=(0.5, 1.05), ncol=2, fancybox=True, fontsize=6, shadow=True)\n self.ax3.set_ylim(0, self.n+30)\n self.ax3.xaxis.set_ticks([])\n self.ax2.yaxis.set_ticks([])\n self.ax2.set_xlabel('Change over time')\n self.ax3.set_xlabel('Change over time')\n self.ax.set_title('Social Distancing Followed By '+str(self.social_dist)+'% People.')\n self.ax2.set_title('Stacked area graph for each category', fontsize = 8)\n self.ax3.set_title('Percentage graph for each category', fontsize = 8)\n self.exit_animate()", "def makeMovie(self, animation, filename=\"brainmovie%07d.png\", offset=0,\n fps=30, size=(1920, 1080), interpolation=\"linear\"):\n # build up two variables: State and Anim.\n # state is a dict of all values being modified at any time\n state = dict()\n # anim is a list of transitions between keyframes\n anim = []\n setfunc = self.ui.set\n for f in sorted(animation, key=lambda x:x['idx']):\n if f['idx'] == 0:\n setfunc(f['state'], f['value'])\n state[f['state']] = dict(idx=f['idx'], val=f['value'])\n else:\n if f['state'] not in state:\n state[f['state']] = dict(idx=0, val=self.getState(f['state'])[0])\n start = dict(idx=state[f['state']]['idx'],\n state=f['state'],\n value=state[f['state']]['val'])\n end = dict(idx=f['idx'], state=f['state'], value=f['value'])\n state[f['state']]['idx'] = f['idx']\n state[f['state']]['val'] = f['value']\n if start['value'] != end['value']:\n anim.append((start, end))\n\n for i, sec in enumerate(np.arange(0, anim[-1][1]['idx']+1./fps, 1./fps)):\n for start, end in anim:\n if start['idx'] < sec <= end['idx']:\n idx = (sec - start['idx']) / float(end['idx'] - start['idx'])\n if start['state'] == 'frame':\n func = mixes['linear']\n else:\n func = mixes[interpolation]\n\n val = func(np.array(start['value']), np.array(end['value']), idx)\n if isinstance(val, np.ndarray):\n setfunc(start['state'], val.ravel().tolist())\n else:\n setfunc(start['state'], val)\n self.getImage(filename%(i+offset), size=size)", "def animate(x,y,title=None,step=None,pause=0.001):\n plt.clf()\n\n if step is not None and title is not None:\n plt.title(\"%s (Step %d)\" %(title,step))\n\n plt.plot(x,y)\n plt.draw()\n plt.pause(pause)\n plt.show(block=False)", "def animate(self, save_path=None):\n interval = 100 # 1 frame per interval ms\n frames = int(20 * len(self.experienced_states_kancks_paths) / self.frame_skip) # times to call updatefig\n blit = True # acceralate computation\n ani = animation.FuncAnimation(self.fig, self.updateifig, frames=frames,\n interval=interval, blit=blit)\n if save_path is not None:\n ani.save(os.path.join(save_path, 'anim.mp4'), writer='ffmpeg')\n else:\n plt.show()", "def __init__(self, frame=1):\n self._frame = frame\n self._ticks = []", "def animate_configuration(self, fps=30, **kwargs):\n\n if self.config_plot_update_func is None:\n msg = ('No ploting update function has been assigned to '\n 'config_plot_update_func.')\n raise ValueError(msg)\n\n kwargs.pop('interval', None) # ignore the user's supplied interval\n try:\n sample_rate = int(1.0 / np.diff(self.result.index).mean())\n except AttributeError:\n msg = (\"No trajectory has been computed yet, so the animation \"\n \"can't run. Run one of the response functions.\")\n raise AttributeError(msg)\n\n fps = int(fps)\n if sample_rate != fps:\n trajectories, interval = self._resample_trajectories(fps)\n else:\n trajectories, interval = self.result, 1000 / sample_rate\n\n # TODO : Could be:\n # axes, *objs_to_modify = ..\n # try:\n # fig = axes.figure\n # except AttributeError:\n # fig = axes[0].figure\n try:\n fig, *objs_to_modify = self.plot_configuration()\n except TypeError:\n msg = ('The configuration plot function does not return any objects'\n ' to modify in the update function.')\n raise ValueError(msg)\n\n def gen_frame(row_tuple, pop_list):\n time = row_tuple[0]\n row = row_tuple[1]\n # Don't mutate the orginal list.\n pop_list = pop_list.copy()\n args = []\n for k in getfullargspec(self.config_plot_update_func).args:\n if k == 'time':\n args.append(time)\n elif k == 'time__hist':\n args.append(trajectories[:time].index)\n elif k == 'time__futr':\n args.append(trajectories[time:].index)\n elif k.endswith('__hist'):\n args.append(trajectories[k[:-6]][:time])\n elif k.endswith('__futr'):\n args.append(trajectories[k[:-6]][time:])\n elif k in trajectories: # constants, coordinates, measurements\n args.append(row[k])\n elif k in self.constants:\n args.append(self.constants[k])\n else: # must be matplotlib object\n # TODO : This last part is quite fragile. It requires these\n # remaining args to be in the same order as the returned\n # tuple from the plot function and there is no way to know\n # if these are the correct objects to append if the order\n # isn't correct.\n args.append(pop_list.pop(0))\n self.config_plot_update_func(*args)\n\n # TODO : Run this with the initial conditions so that errors will\n # propogate before the animation is run.\n # NOTE : This is useful to uncomment in debugging because the errors\n # push to the top if in the FuncAnimation.\n #gen_frame((1.0, self.result.iloc[0]), list(objs_to_modify))\n\n # NOTE : If the iterrows() generator is not converted to a list then\n # matplotlib will throw a StopIteration error when the animation\n # reaches the last frame instead of repeating. This causes headaches in\n # the notebook and elsewhere. See issue #39 in the resonance repo.\n return animation.FuncAnimation(fig, gen_frame,\n fargs=(objs_to_modify, ),\n frames=list(trajectories.iterrows()),\n interval=interval,\n **kwargs)", "def anime(df,filename,save=False):\n walls = gc[\"walls\"]\n obstacles = gc[\"obstacles\"]\n destinations = gc[\"destinations\"]\n\n step_index = {}\n for step in df.STEP:\n step_index[step] = df.index[df.STEP == step]\n\n fig = plt.figure(figsize=matplotlib.figure.figaspect(1))\n plt.gca().set_aspect('equal', adjustable='box')\n ax = fig.add_subplot(111)\n # contour\n for wall in walls:\n wall = plt.Polygon(wall.vertices,color=\"red\",alpha=0.2)\n ax.add_patch(wall)\n # obstacles\n for obstacle in obstacles:\n obst = plt.Circle(obstacle.center,obstacle.r,color=\"green\",alpha = 0.5)\n ax.add_patch(obst)\n # destinations\n for dest in destinations:\n dest_range = plt.Polygon(dest.dest_range,color=\"black\",alpha=0.5)\n dest = plt.Polygon(dest.vertices,color=\"black\",alpha=0.2)\n ax.add_patch(dest_range)\n ax.add_patch(dest)\n\n ims = []\n colors = [\"red\",\"blue\"]\n for step in range(len(step_index)):\n im = plt.scatter(df.y[step_index[step + 1]],\n df.x[step_index[step + 1]],\n color=[colors[i] for i in df.KIND[step_index[step+1]]],\n s=80,\n alpha=0.5)\n ims.append([im])\n\n ani = animation.ArtistAnimation(fig,ims,interval=10)\n plt.xlim(gc[\"min_xy\"][0],gc[\"max_xy\"][0])\n plt.ylim(gc[\"min_xy\"][1],gc[\"max_xy\"][1])\n # if save:\n # print(\"start saving animation\")\n # ani.save(filename)\n # print(\"finish saving animation\")\n plt.show()", "def set_animated(self,val):\n self.XP_Plotter.set_animated(val)\n # theoretical lines \n for line in self.lines_theory:\n line.set_animated(val)", "def animate_func(self, i):\n if i > 0:\n self.step(iters=100)\n\n self.imu.set_array(self.viewee.array)\n self.imv.set_array(self.viewee.array2)\n return (self.imu, self.imv)", "def animation(self, t):\n self.program['u_clock'] = 2*t\n gloo.clear('black')\n self.program.draw('points')\n return _screenshot((0, 0, self.size[0], self.size[1]))[:,:,:3]", "def _animate(foo):\n simulator.evolve(0.01)\n X = [particle.x for particle in simulator.particles]\n Y = [particle.y for particle in simulator.particles]\n\n line.set_data(X, Y)\n return line,", "def animate_result(vehicles, setup):\n fig = plt.figure()\n lines = []\n t, delta_t, n_steps, t_max = get_time_values_from_setup(setup)\n ax = plt.axes(xlim=(-10, setup.get('lanes').get('lane_length') + 10),\n ylim=(-1, setup.get('lanes').get('n_lanes')))\n\n Writer = animation.writers['ffmpeg']\n writer = Writer(fps=15, metadata=dict(\n artist='Philipp Froehlich'), bitrate=1800)\n\n for i in range(len(vehicles)):\n lobj = ax.plot([], [], linewidth=4)[0]\n lines.append(lobj)\n\n def init():\n for line in lines:\n line.set_data([], [])\n return lines\n\n def animate(frame_number):\n for lnum, line in enumerate(lines):\n act = np.zeros([2, 2])\n # position\n act[:, 0] = vehicles[lnum].position_archive[frame_number, 0]\n # lane\n act[:, 1] = vehicles[lnum].position_archive[frame_number, 1]\n # add saftey_distance\n act[1, 0] += vehicles[lnum].position_archive[frame_number, 2]\n # print(act)\n line.set_data(np.transpose(act))\n return tuple(lines)\n\n plt.xlabel('position [m]')\n\n plt.ylabel(' lane number')\n\n plt.title('animation of the safety bouding box of every car')\n\n anim = animation.FuncAnimation(\n fig, animate, init_func=init, frames=n_steps, interval=50, blit=True)\n\n plt.show()\n print('please wait for plot to save')\n anim.save('result/latest.mp4', writer=writer)", "def static(fps, duration):\n\n frames = int(duration * fps)\n def animate(thing):\n thing = list(thing)\n yield from repeat(thing, frames)\n return animate", "def animate_GRN(self, n_frames=100, file_name=None, dir_name=\"plots\"):\n if not os.path.exists(dir_name):\n os.makedirs(dir_name)\n fig = plt.figure()\n ax1 = fig.add_subplot(1, 1, 1)\n\n skip = int((self.x_save.shape[0]) / n_frames)\n E_sample = self.E_save[::skip]\n E_min,E_max = E_sample.min(),E_sample.max()\n\n def animate(i):\n ax1.cla()\n cmap = plt.cm.plasma(self.normalize(E_sample[i],E_min,E_max))\n self.plot_vor_colored(self.x_save[skip * i], ax1,cmap)\n ax1.set(aspect=1, xlim=(0, self.L), ylim=(0, self.L))\n\n Writer = animation.writers['ffmpeg']\n writer = Writer(fps=15, bitrate=1800)\n if file_name is None:\n file_name = \"animation %d\" % time.time()\n an = animation.FuncAnimation(fig, animate, frames=n_frames, interval=200)\n an.save(\"%s/%s.mp4\" % (dir_name, file_name), writer=writer, dpi=264)", "def start(self):\n for i in xrange(self.num_pulses):\n self.fillColor = \"white\"\n for j in xrange(self.num_frames_on):\n self.draw()\n self.win.flip()\n if j == 0:\n # Only store the time of the first occuring on frame.\n self.utc_timestamps.append(datetime.strftime(datetime.utcnow(), '%Y-%m-%dT%H:%M:%S.%fZ'))\n self.fillColor = \"black\"\n for j in xrange(self.num_frames_off):\n self.draw()\n self.win.flip()", "def animate(self,delay=100):\n curves = self.g.element_show()\n for index in range(self.npoints):\n for y in range(self.ncurves):\n self.vector_y[y][index] = random.uniform(0,8)\n # changing vector_y[y] affects the graph directly,\n # but not until the function returns\n # introduce a delay and call update (to force Tk to update\n # pending events, i.e., here the graph)\n self.master.after(delay)\n self.master.update()", "def animate(self, movement, f=None, index_col=0, index_row=0, texture=None, cmap='jet',\n plotter=None, title='', font_size=10, font_color='black', gif_path=None, camera=None):\n\n if not plotter:\n plotter = pv.Plotter()\n\n plotter.subplot(index_column=index_col, index_row=index_row)\n plotter.add_text(title, position=\"upper_edge\", font_size=font_size, color=font_color)\n if camera is not None: # it gives .any() crushes without..\n plotter.set_position(camera[0])\n plotter.set_focus(camera[1])\n plotter.set_viewup(camera[2])\n if not texture:\n plotter.add_mesh(self.pv_mesh, scalars=f, cmap=cmap, texture=texture)\n else:\n if isinstance(texture, np.ndarray):\n tex = pv.numpy_to_texture(texture)\n else:\n tex = pv.read_texture(texture)\n self.pv_mesh.texture_map_to_plane(inplace=True)\n plotter.add_mesh(self.pv_mesh, texture=tex)\n plotter.show(auto_close=False)\n if gif_path:\n plotter.open_gif(gif_path)\n for item in movement:\n plotter.update_coordinates(item, mesh=self.pv_mesh)\n if gif_path:\n plotter.write_frame()\n\n plotter.close()", "def animate_with_pil_frame_sequence(self, pil_frame_sequence, frames_per_second=15):\n\n sleep_time = 1/frames_per_second\n for animation_frame in pil_frame_sequence:\n tic = time.time()\n self._set_image_from_pil_image(animation_frame)\n self.update()\n toc = time.time()\n frame_generation_time = toc-tic\n if frame_generation_time < sleep_time:\n new_sleep_time = sleep_time - frame_generation_time\n time.sleep(new_sleep_time)\n else:\n pass", "def plotOfSlice(self,index=0):\n\t\tj=index;\n\t\t[n,m]=_np.shape(self._data)\n\t\ty=_np.zeros(n);\n\t\tfor i in range(0,n):\n\t\t\ty[i]=self._data[i][j]*1e4\n\t\tp1=_plot.plot(title='t=%.3f ms. %s ' % (self.time[j]*1000, self.title),\n\t\t\t\t\t shotno=self.shotno)\n\t\ttheta=_np.linspace(self._theta[0],self._theta[-1],100)\n#\t\tm0Fit=self._x[0,j]\n\t\tm1Fit=self._x[0,j]+self._x[1,j]*_np.sin(theta)+self._x[2,j]*_np.cos(theta)\n\t\tm2Fit=self._x[0,j]+self._x[3,j]*_np.sin(2*theta)+self._x[4,j]*_np.cos(2*theta)\n\t\tm3Fit=self._x[0,j]+self._x[5,j]*_np.sin(3*theta)+self._x[6,j]*_np.cos(3*theta)\n\t\tm4Fit=self._x[0,j]+self._x[7,j]*_np.sin(4*theta)+self._x[8,j]*_np.cos(4*theta)\n\t\tm5Fit=self._x[0,j]+self._x[9,j]*_np.sin(5*theta)+self._x[10,j]*_np.cos(5*theta)\n\t\tfitTotal=(-4.)*self._x[0,j]+m1Fit+m2Fit+m3Fit+m4Fit+m5Fit # the -4 corrects for the 4 extra offsets added from the preview 5 fits\n\t\t\n\t\tp1.addTrace(yData=y,xData=self._theta,\n\t\t\t\t\tlinestyle='',marker='.',yLegendLabel='raw')\n\t\tp1.addTrace(yData=m1Fit,xData=theta,\n\t\t\t\t\tyLegendLabel='m=1')\n\t\tp1.addTrace(yData=m2Fit,xData=theta,\n\t\t\t\t\tyLegendLabel='m=2')\n\t\tp1.addTrace(yData=m3Fit,xData=theta,\n\t\t\t\t\tyLegendLabel='m=3')\n\t\tp1.addTrace(yData=m4Fit,xData=theta,\n\t\t\t\t\tyLegendLabel='m=4')\n\t\tp1.addTrace(yData=m5Fit,xData=theta,\n\t\t\t\t\tyLegendLabel='m=5')\n\t\tp1.addTrace(yData=fitTotal,xData=theta,\n\t\t\t\t\tyLegendLabel='m=1-5')\n\t\treturn p1", "def Start(self): # this is used to start the object\n ani = anim.FuncAnimation(self.f, self.animate, interval=1000)\n # animating object wth 1 sec gap\n self.plt_0.tight_layout()\n self.plt_0.show()\n # showing the plot", "def start_frame(self):\n\n # Check whether we're supposed to make a frame on this iteration:\n if self.frame_count % self.stride != 0:\n return\n\n # Check whether we're already making a frame. \n if self.in_scope:\n print(\"The Gif object for {} has encountered 'start_frame' twice\\\n without an intervening 'end_frame'\".format(self.filename))\n raise SyntaxError\n\n # Construct a new figure\n fig = plt.figure(figsize=(self.width,self.height), **(self.kwargs))\n self.current_frame = fig\n\n # Set the \"in_scope\" member True\n self.in_scope = True\n\n return self.current_frame", "def _init_frame(self : \"animation\",\n init_frame : \"matplotlib.figure.Figure\",\n init_ax : \"matplotlib.axes._subplots.AxesSubplot\"\n ):\n self._cframe = init_frame.canvas.copy_from_bbox(init_ax.bbox)", "def animate(directory,gifname,n_t,step=2,duration=0.2):\n\t# create list of filenames\n\tfnames = dir_fname(directory,\"*\")\n\t# create list of plots\n\timages=[] \n\tfor k in range(0,n_t):\n\t\tk = k*step\n\t\tprint('Mounting Im '+ str(k))\n\t\tFIG_NAME=fnames[k]\n\t\timages.append(imageio.imread(FIG_NAME)) # read\n\t# Now we can assemble the video\n\timageio.mimsave(gifname, images,duration=duration) # create gif\n\tprint('Animation'+gifname+'Ready')\n\treturn True", "def show_next_frame(self):\n if self.frames:\n self.config(image=next(self.frames))\n self.after(self.delay, self.show_next_frame)", "def player_processframe_event(self, frame):\n\t\tpaths = self._panel_path.value.datasets\n\t\tareas = self._panel_area.value.datasets\n\t\tcolors = self._panel_colors.value.datasets\n\t\timages = self._panel_imgs.value.images\n\n\t\t# check if should use the current frame or an image selected to background\n\t\tframe = frame if len(images)!=1 else images[0].image.copy()\n\t\tindex = self._player.video_index-1\n\t\n\t\tfor path in paths:\n\t\t\t# draw the path if the option is selected\n\t\t\tif self._drawpath.value: path.draw_path(frame, None, index)\n\n\t\t\tarea = self.get_object_area(path, areas, index)\n\t\t\tif area is not None:\n\t\t\t\tcolor \t = self.get_object_color(path, colors, index)\n\t\t\t\tposition = path.get_position(index)\n\t\t\t\t\n\t\t\t\tif position is not None and area is not None and color is not None:\n\t\t\t\t\tradius = int(round(math.sqrt(area/math.pi)))\t\t\t\t\n\t\t\t\t\tcv2.circle(frame, position, radius, color, -1)\n\n\t\tself.draw_events(index, frame)\n\n\t\treturn frame", "def plot_animation(event):\n # Pruefen ob richtiger Plotbereich geklickt\n if plt.get_current_fig_manager().toolbar.mode == '' and event.button == 1\\\n and event.inaxes == hist:\n count = len(T_plot)\n # Plots fuer t=0 erstellen\n hist_wert, kanten = np.histogram(teilchen[:,0], bins=bins, density=True)\n breite = np.diff(kanten)\n hist_wert = hist_wert * norm_werte[0]\n\n hist.bar(kanten[:-1], hist_wert, breite, color = \"b\")\n norm_kurve = norm.plot(T_plot[0], norm_werte[0], color=\"b\")\n erwartung_kurve = erwartung.plot(T_plot[0], erwartung_werte[0],\n color=\"b\")\n varianz_kurve = varianz.plot(T_plot[0], varianz_werte[0], color=\"b\")\n\n # Fuer Zeiten t > 0 dynamsicher Plot\n for i in np.arange(1, count):\n hist.patches = [] # Histogramm - Plotbereich leeren\n hist.lines = []\n orte = teilchen[:,i]\n orte = orte[orte < x_abs] # Nicht abs. Teilchen bestimmen\n hist_wert, kanten = np.histogram(orte, bins=bins, density=True)\n breite = np.diff(kanten) # Histogramm erstellen\n hist_wert = hist_wert * norm_werte[i] # und reskalieren\n hist.bar(kanten[:-1], hist_wert, breite, color = \"b\")\n # Theoretische Vorhersage ausgeben\n hist.plot(x_th, orte_theorie[i-1,:], color =\"k\", linewidth=2,\n label=\"theoretisch ohne abs.\")\n hist.plot(x_th, orte_theorie_abs[i-1,:], color =\"c\", linewidth=2,\n label=\"theoretisch mit abs.\")\n hist.legend()\n\n # Statistische Kenngroessen ausgeben\n plt.setp(norm_kurve[0], xdata=T_plot[:i], ydata=norm_werte[:i],\n color=\"b\")\n plt.setp(erwartung_kurve[0], xdata=T_plot[:i],\n ydata=erwartung_werte[:i], color=\"b\")\n plt.setp(varianz_kurve[0], xdata=T_plot[:i],\n ydata=varianz_werte[:i], color=\"b\")\n\n plt.draw()\n time.sleep(0.2)", "def animate(i):\n alpha = i/100\n #Dt - vreme kada telo pada na zemlju\n Dt = (v0*np.sin(alpha) + np.sqrt((v0**2)*(np.sin(alpha))**2 + 2*g*h))/g\n t = np.linspace(0, Dt, 100)\n x = v0*t*np.cos(alpha)\n z = v0*t*np.sin(alpha) - g*t*t/2 + h\n\n line.set_data(x, y)\n line.set_3d_properties(z)\n line.set_color('red')\n return line,", "def Advance(self):\n\t\t\n\t\tself.present_frame += 1\n\t\t\n\t\tif self.present_frame < len(self.frames):\n\t\t\tself.frames[self.present_frame].Draw()\n\t\t\t\n\t\telse if self.loop:\n\t\t\tself.present_frame = 0\n\t\t\tself.frames[self.present_frame].Draw()\n\t\t\n\t\telse:\n\t\t\tself.Stop()", "def animate(self,n_frames = 100,file_name=None, dir_name=\"plots\",an_type=\"periodic\",tri=False):\n if an_type == \"periodic\":\n plot_fn = self.plot_vor\n if an_type == \"boundary\":\n plot_fn = self.plot_vor_boundary\n\n if not os.path.exists(dir_name):\n os.makedirs(dir_name)\n fig = plt.figure()\n ax1 = fig.add_subplot(1, 1, 1)\n\n skip = int((self.x_save.shape[0])/n_frames)\n def animate(i):\n ax1.cla()\n if tri is True:\n plot_fn(self.x_save[skip*i],ax1,tri=self.tri_save[skip*i])\n else:\n plot_fn(self.x_save[skip*i],ax1,tri=False)\n if self.plot_forces is True:\n x = self.x_save[skip*i]\n mask = ~np.isnan(self.x_save[skip*i,:,0]) * ~np.isnan(self.x_save[skip*i+1,:,0])\n x = x[mask]\n F = self.x_save[skip*i+1,mask] - self.x_save[skip*i,mask]\n ax1.quiver(x[:,0],x[:,1],F[:,0],F[:,1])\n ax1.set(aspect=1, xlim=(0, self.L), ylim=(0, self.L))\n\n Writer = animation.writers['ffmpeg']\n writer = Writer(fps=15, bitrate=1800)\n if file_name is None:\n file_name = \"animation %d\" % time.time()\n an = animation.FuncAnimation(fig, animate, frames=n_frames, interval=200)\n an.save(\"%s/%s.mp4\" % (dir_name, file_name), writer=writer, dpi=264)", "def animateLoc(loader, frame_start, frame_stop, boundaries, ai = None, path=\"loc_anim.gif\", save=False, step=1, fps=16, wall=False, cor=False, title=\"Location Animation\", useFFMPEG=False):\r\n #preprocess data\r\n data = []\r\n ai_data = []\r\n\r\n # load data into new data structure for animation\r\n for i in np.arange(frame_start, frame_stop, step):\r\n people, temp = loader.frame(i, ret_vel=False, with_id=False)\r\n data.append(temp)\r\n ai_data.append(temp[np.isin(people, ai)])\r\n\r\n \r\n #Set the figure for the animation framework\r\n fig = plt.figure(figsize = (10,6)) \r\n ax1 = fig.add_subplot(1,1,1)\r\n\r\n\r\n # geometry for different datasets\r\n if wall:\r\n ax1.vlines(-60, ymin=255, ymax=400, lw=3, color=\"fuchsia\")\r\n ax1.vlines(-60, ymin=-200, ymax=-95, lw=3, color=\"fuchsia\")\r\n\r\n if cor:\r\n # measurement region\r\n ax1.vlines(-200, ymin=0, ymax=180, lw=2, color=\"orange\")\r\n ax1.vlines(200, ymin=0, ymax=180, lw=2, color=\"orange\")\r\n\r\n # Walls\r\n ax1.hlines(0, xmin=-300, xmax=300, lw=2, color=\"fuchsia\")\r\n ax1.hlines(180, xmin=-300, xmax=300, lw=2, color=\"fuchsia\")\r\n \r\n scat = ax1.scatter([], [], c=\"red\")\r\n scat_ai = ax1.scatter([], [], c=\"black\")\r\n ax1.set_xlim([boundaries[0], boundaries[1]])\r\n ax1.set_ylim([boundaries[2], boundaries[3]])\r\n\r\n ax1.set_aspect('equal', adjustable='box')\r\n ax1.set_xlabel('x Pos. / cm')\r\n ax1.set_ylabel('y Pos. / cm ')\r\n ax1.set_title(title, loc=\"left\")\r\n\r\n # animation function that is called for each frame\r\n def animate(i):\r\n scat.set_offsets(data[i])\r\n scat_ai.set_offsets(ai_data[i])\r\n return scat,\r\n\r\n frames = int(np.floor((frame_stop - frame_start)/step))\r\n \r\n ani = animation.FuncAnimation(fig = fig, func = animate, frames =frames, interval = int(step*1000/fps), blit=True)\r\n plt.close(fig)\r\n \r\n # save animation to .mp4 or .gif via writer\r\n if save:\r\n if useFFMPEG:\r\n writer = animation.FFMpegWriter(fps=fps/step, extra_args=['-vcodec', 'libx264'])\r\n else:\r\n writer = animation.PillowWriter(fps=fps/step, extra_args=['-vcodec', 'libx264'])\r\n ani.save(path, writer=writer)\r\n return ani", "def _make_animation_from_raw_list(self : \"animation\",\n frameList : \"list\",\n facecolor='white'\n ):\n for frame in tqdm(frameList, disable=not self.pbar):\n if frame.dpi < self.dpi:\n frame.dpi = self.dpi\n frame.patch.set_facecolor(facecolor)\n frame.canvas.draw()\n image = np.array(frame.canvas.renderer._renderer)\n if self._frame_number == 0 and self.size is None:\n image = self._scale_to_mbs_frame(image)\n self.size = image.shape\n if image.size != self.size:\n image = (255*resize(image, self.size)).astype(np.uint8)\n if self._autosmooth:\n self._insert_smoothed_frames(image)\n self._write_frame(image)", "def _get_anim_seq(self, keyframes, fps=30, interpolation='linear'):\n # Misc. setup\n fr = 0\n a = np.array\n func = mixes[interpolation]\n #skip_props = ['surface.{subject}.right', 'surface.{subject}.left', ] #'projection',\n # Get keyframes\n keyframes = sorted(keyframes, key=lambda x:x['time'])\n # Normalize all time to frame rate\n fs = 1./fps\n for k in range(len(keyframes)):\n t = keyframes[k]['time']\n t = np.round(t/fs)*fs\n keyframes[k]['time'] = t\n allframes = []\n for start, end in zip(keyframes[:-1], keyframes[1:]):\n t0 = start['time']\n t1 = end['time']\n tdif = float(t1-t0)\n # Check whether to continue frame sequence to endpoint\n use_endpoint = keyframes[-1]==end\n nvalues = np.round(tdif/fs).astype(int)\n if use_endpoint:\n nvalues += 1\n fr_time = np.linspace(0, 1, nvalues, endpoint=use_endpoint)\n # Interpolate between values\n for t in fr_time:\n frame = {}\n for prop in start.keys():\n if prop=='time':\n continue\n if (start[prop] is None) or (start[prop] == end[prop]) or isinstance(start[prop], (bool, str)):\n frame[prop] = start[prop]\n continue\n val = func(a(start[prop]), a(end[prop]), t)\n if isinstance(val, np.ndarray):\n frame[prop] = val.tolist()\n else:\n frame[prop] = val\n allframes.append(frame)\n return allframes", "def make_movie_views(self, animation, filename=\"brainmovie%07d.png\", \n offset=0, fps=30, size=(1920, 1080), alpha=1, frame_sleep=0.05,\n frame_start=0, interpolation=\"linear\"):\n allframes = self._get_anim_seq(animation, fps, interpolation)\n for fr, frame in enumerate(allframes[frame_start:], frame_start):\n self._set_view(**frame)\n time.sleep(frame_sleep)\n self.getImage(filename%(fr+offset+1), size=size)\n time.sleep(frame_sleep)" ]
[ "0.70611054", "0.69869083", "0.6971987", "0.689324", "0.68860537", "0.6854017", "0.68523353", "0.65770304", "0.65645885", "0.6556076", "0.65512234", "0.6524661", "0.6517934", "0.65143037", "0.6452433", "0.64289904", "0.64282256", "0.6403286", "0.6378409", "0.63592017", "0.6246925", "0.6230509", "0.6227236", "0.6226506", "0.62199605", "0.6194662", "0.61886054", "0.61618495", "0.61204785", "0.61140096", "0.6065182", "0.6063545", "0.5995606", "0.59723437", "0.59663576", "0.5959955", "0.5950537", "0.593969", "0.59297943", "0.5921779", "0.5920602", "0.59112424", "0.5910502", "0.5908072", "0.5907632", "0.58985215", "0.58724153", "0.58400375", "0.58162624", "0.58110577", "0.58050895", "0.5791123", "0.57881373", "0.57808757", "0.5775128", "0.57747555", "0.57653755", "0.5749735", "0.5736585", "0.57247335", "0.5721247", "0.5716765", "0.5716465", "0.57138854", "0.57078904", "0.5697991", "0.569039", "0.56844467", "0.567792", "0.56724423", "0.5662721", "0.56538165", "0.56527954", "0.565076", "0.56491387", "0.5649021", "0.56337076", "0.5630734", "0.56232065", "0.56200415", "0.56142277", "0.5613368", "0.561066", "0.5608609", "0.56076646", "0.56049114", "0.5589637", "0.55852497", "0.55845124", "0.5584361", "0.557508", "0.55719304", "0.55688393", "0.55679286", "0.5550407", "0.55366105", "0.5535327", "0.5529627", "0.55245847", "0.551094", "0.55107296" ]
0.0
-1
Complete pipeline to process and plot data.
def run_animation(self, clip_name, fragment, zoom=True, pad=3, interval=100): preprocessor = self.process_data(clip_name) period_person_division = preprocessor.period_person_division running_person_identifiers = preprocessor.get_running_person_identifiers() running_fragments = preprocessor.get_running_fragments() turning_fragments = preprocessor.get_turning_fragments() period_running_person_division, running_plottables, turning_plottables = self.get_plottables(period_person_division, running_person_identifiers, running_fragments, turning_fragments) if fragment == 'run': plottables = running_plottables elif fragment == 'turn': plottables = turning_plottables else: plottables = period_running_person_division self.set_axis_limits(plottables, preprocessor.height, preprocessor.width, zoom=zoom, pad=pad) animate = animation.FuncAnimation(fig=self.fig, func=self.plot_person, frames=plottables.keys(), fargs=(plottables, preprocessor.height, preprocessor.width, zoom, pad), interval=interval, init_func=self.func_init, blit=False, repeat=False) plt.show()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def run(self):\n self.set_pipeline()\n self.pipeline.fit(self.X, self.y)", "def main():\n df_data = import_clean_process()\n plot_data_matplotlib(df_data)\n return", "def pipeline_finished(self, pipeline):\n\n # Get the sizes of all output adios files\n self._get_adios_metadata(pipeline)\n\n # Free resources used by the pipeline\n with self.free_cv:\n # Return nodes used by the pipeline\n while not pipeline.nodes_assigned.empty():\n pipe_node = pipeline.nodes_assigned.get()\n self.allocated_nodes.put(pipe_node)\n\n _log.debug(\"finished pipeline {}, free nodes {} -> {}\".format(\n pipeline.id, self.free_nodes, self.free_nodes +\n pipeline.total_nodes))\n self.free_nodes += pipeline.total_nodes\n\n self.free_cv.notify()\n\n # Remove pipeline from list of running pipelines\n with self.pipelines_lock:\n self._running_pipelines.remove(pipeline)\n if self._status is not None:\n self._status.set_state(pipeline.get_state())", "def preprocessing_pipeline(self):\n self.__multilabel_processing()\n self.__split_dataset()\n self.__save_datasets()", "def finalize(self):\n if not self._finalized:\n self._do_plot()\n self._finalized = True\n return self", "def finalize(self):\n if not self._finalized:\n self._do_plot()\n super(SVGPlot, self).finalize()\n self._finalized = True\n return self", "def call_back(self):\n\n # Poll the pipe\n while self.pipe.poll():\n # Look inside of the pipe and take the_box\n the_box = self.pipe.recv()\n\n # If the_box is empty, it's game over\n if the_box is None:\n self.terminate()\n return False\n\n # Otherwise, update the plot with the tools in the_box\n else:\n # Get our elapsed time\n elapsed_time = time.time() - the_box[0]\n\n # Add the elements to the plot\n self.ax1.plot(elapsed_time, the_box[1], c='tab:orange',\n marker=r'$\\clubsuit$', alpha=0.5,\n markersize=10)\n self.ax1.plot(elapsed_time, the_box[2], c='tab:blue',\n marker=r'$\\clubsuit$', alpha=0.5,\n markersize=10)\n self.ax2.plot(elapsed_time, the_box[3], c='tab:pink',\n marker=r'$\\clubsuit$', alpha=0.5,\n markersize=10)\n\n current_gen = the_box[-2]\n generations = the_box[-1]\n if current_gen == generations - 1:\n x, y = self.get_path_coordinates(the_box[4], the_box[5], the_box[6])\n self.ax3.plot(y, x, c='tab:olive', marker=r'$\\clubsuit$',\n alpha=0.5, markersize=10)\n\n\n # Redraw the canvas\n self.fig.canvas.draw()\n return True", "def run(self):\n pipeline = set_pipeline()\n pipeline.fit(self.X_train, self.y_train)\n return pipeline", "def run(self):\n self.pipeline = self.set_pipeline()\n self.pipeline.fit(self.X,self.y)\n return self", "def plot_data(self):", "def _one_loop(self):\n frame, processed_data = super()._one_loop()\n\n if processed_data:\n segments = self._process_boxes(processed_data)\n if len(segments):\n self._draw_segments(frame, segments)\n self._notifications.append({\n \"kind\": \"distancing\"\n })\n if self.config.streaming_output:\n self._notifications.append({\n \"kind\": \"frame\"\n })\n return frame, processed_data", "def processing(self):\n pass", "def run_and_plot(self):\n self.raw_processing()\n self.data_averaging_and_cleaning()\n\n print(self.organized_names)\n print(self.organized_film)\n print(self.organized_plank)\n\n height = self.organized_film\n bars = tuple(self.organized_names.copy())\n y_pos = np.arange(len(bars))\n\n plt.bar(y_pos, height)\n plt.xticks(y_pos, bars)\n plt.xlabel('TH% in 100ul water/TH mixture')\n plt.ylabel('CFU/mL count')\n plt.title('Experiment 2.5 (Sucrose Concentration) 7 Aug 2018')\n\n plt.show()\n\n height2 = self.organized_plank\n\n plt.bar(y_pos, height2)\n plt.xticks(y_pos, bars)\n plt.xlabel('TH% in 100ul water/TH mixture')\n plt.ylabel('Proportion of Biofilm CFUs to Planktonic CFUs')\n plt.title('Experiment 2.5 (Sucrose Concentration) 7 Aug 2018')\n\n plt.show()", "def run_pipeline() -> pd.DataFrame:\n\n print('Loading data...')\n data = load_data()\n print('Stage one processing...')\n text = data.text\n text_ = stage_one_preprocessing(text)\n data_ = data.copy()\n data_.text = text_\n #print('Splitting by sentences...')\n #data_ = split_by_sentences(data_)\n print('Stage two processing...')\n text_ = stage_two_preprocessing(data_.text)\n print('Stage three processing...')\n text_ = stage_three_preprocessing(text_)\n data_.text = text_\n print('Saving file...')\n data_.to_csv(r'./data/stage_three_text.csv')\n return data_", "def pipeline_runner():\n # file_parser() # take raw data file and extract columns of interest. remove contaminants.\n entry_parser() # remove duplicates, faulty lines and format the whole thing normally.\n lfq_parser() # replace 0s in lfq reading with random small numbers for t testing purposes\n # open Rstudio and do T testing there\n ROutputFormatter() # reformat R output to something more appealing, add FDR and fold change values", "def run(self):\n self.simulate_test_data()\n self.pipeline_test_data()\n self.plot_jump_flags_image()\n self.plot_groupdq_flags(pixel=[884, 550])\n self.plot_ramps_pre_post_correction(pixel=[884, 550])", "def __call__(self, pipe):\n self.pipe = pipe\n self.fig = plt.figure(tight_layout=True)\n self.gs = gridspec.GridSpec(2, 2)\n self.ax1 = self.fig.add_subplot(self.gs[0, 0])\n self.ax2 = self.fig.add_subplot(self.gs[1, 0])\n self.ax3 = self.fig.add_subplot(self.gs[:, 1])\n self.ax1.set_xlabel(\"Time\")\n self.ax1.set_ylabel(\"Fitness\")\n self.ax2.set_xlabel(\"Time\")\n self.ax2.set_ylabel(\"Standard deviation\")\n self.ax3.set_xlabel(\"Best path\")\n orange_patch = mpatches.Patch(color='tab:orange', label='Best fitness')\n blue_patch = mpatches.Patch(color='tab:blue', label='Mean fitness')\n pink_patch = mpatches.Patch(color='tab:pink', label='Std. dev.')\n self.ax1.legend(handles=[orange_patch, blue_patch])\n self.ax2.legend(handles=[pink_patch])\n timer = self.fig.canvas.new_timer(interval=5)\n timer.add_callback(self.call_back)\n timer.start()\n plt.show()", "def run(self, data: PipeLineDataObject) -> PipeLineDataObject:\n raise NotImplementedError", "def end(self) -> None:\n self.process_event(\n PipelineEvent(\n PipelineEventType.RUN_END,\n )\n )", "def ready(self):\n plt.ion()\n self.figure = plt.figure()\n axes = self.figure.add_subplot(111)\n self.line, = axes.plot(self.xs, self._get_y_data(), self.colour)\n\n if self.y_range is not None:\n plt.ylim(*self.y_range)\n plt.xlim(self.x.lower, self.x.upper)\n\n plt.xlabel(self.x.tex_name if self.use_tex else self.x.name)\n plt.ylabel(self.y.tex_name if self.use_tex else self.y.name)\n\n self.figure.canvas.draw()", "def run(self):\n # fill the x_values,y_values,z_values dictionaries\n if not self.__fillCoordinatesFromSource():\n self.raiseAWarning('Nothing to Plot Yet. Returning.')\n return\n\n self.counter += 1\n if self.counter > 1:\n self.actcm = None\n clusterDict = deepcopy(self.outStreamTypes)\n\n # start plotting.... loop over the plots that need to be included in this figure\n for pltIndex in range(len(self.outStreamTypes)):\n plotSettings = self.options['plotSettings']['plot'][pltIndex]\n if 'gridLocation' in plotSettings:\n x = None\n y = None\n if 'x' in plotSettings['gridLocation']:\n x = list(map(int, plotSettings['gridLocation']['x'].strip().split(' ')))\n else:\n x = None\n if 'y' in plotSettings['gridLocation'].keys():\n y = list(map(int, plotSettings['gridLocation']['y'].strip().split(' ')))\n else:\n y = None\n if pltIndex == 0:\n self.ax.remove() # remove axis so that there is not an extra axis on plot with subplots\n if (len(x) == 1 and len(y) == 1):\n if self.dim == 2:\n self.ax = self.fig.add_subplot(self.gridSpace[x[0], y[0]])\n else:\n self.ax = self.fig.add_subplot(self.gridSpace[x[0], y[0]], projection='3d')\n elif (len(x) == 1 and len(y) != 1):\n if self.dim == 2:\n self.ax = self.fig.add_subplot(self.gridSpace[x[0], y[0]:y[-1]])\n else:\n self.ax = self.fig.add_subplot(self.gridSpace[x[0], y[0]:y[-1]], projection='3d')\n elif (len(x) != 1 and len(y) == 1):\n if self.dim == 2:\n self.ax = self.fig.add_subplot(self.gridSpace[x[0]:x[-1], y[0]])\n else:\n self.ax = self.fig.add_subplot(self.gridSpace[x[0]:x[-1], y[0]], projection='3d')\n else:\n if self.dim == 2:\n self.ax = self.fig.add_subplot(self.gridSpace[x[0]:x[-1], y[0]:y[-1]])\n else:\n self.ax = self.fig.add_subplot(self.gridSpace[x[0]:x[-1], y[0]:y[-1]], projection='3d')\n\n if 'gridSpace' in self.options['plotSettings']:\n self.ax.locator_params(axis='y', nbins=4)\n self.ax.locator_params(axis='x', nbins=2)\n if 'range' in plotSettings:\n axes_range = plotSettings['range']\n if 'ymin' in axes_range:\n self.ax.set_ylim(bottom=ast.literal_eval(axes_range['ymin']))\n if 'ymax' in axes_range:\n self.ax.set_ylim(top=ast.literal_eval(axes_range['ymax']))\n if 'xmin' in axes_range:\n self.ax.set_xlim(left=ast.literal_eval(axes_range['xmin']))\n if 'xmax' in axes_range:\n self.ax.set_xlim(right=ast.literal_eval(axes_range['xmax']))\n if self.dim == 3:\n if 'zmin' in axes_range.options['plotSettings']['plot'][pltIndex]:\n if 'zmax' not in axes_range.options['plotSettings']:\n self.raiseAWarning('zmin inputted but not zmax. zmin ignored! ')\n else:\n self.ax.set_zlim(bottom=ast.literal_eval(axes_range['zmin']), top=ast.literal_eval(self.options['plotSettings']['zmax']))\n if 'zmax' in axes_range:\n if 'zmin' not in axes_range:\n self.raiseAWarning('zmax inputted but not zmin. zmax ignored! ')\n else:\n self.ax.set_zlim(bottom=ast.literal_eval(axes_range['zmin']), top=ast.literal_eval(axes_range['zmax']))\n if 'xlabel' not in plotSettings:\n self.ax.set_xlabel('x')\n else:\n self.ax.set_xlabel(plotSettings['xlabel'])\n if 'ylabel' not in plotSettings:\n self.ax.set_ylabel('y')\n else:\n self.ax.set_ylabel(plotSettings['ylabel'])\n if 'zlabel' in plotSettings:\n if self.dim == 2:\n self.raiseAWarning('zlabel keyword does not make sense in 2-D Plots!')\n elif self.dim == 3 and self.zCoordinates:\n self.ax.set_zlabel(plotSettings['zlabel'])\n elif self.dim == 3 and self.zCoordinates:\n self.ax.set_zlabel('z')\n else:\n if 'xlabel' not in self.options['plotSettings']:\n self.ax.set_xlabel('x')\n else:\n self.ax.set_xlabel(self.options['plotSettings']['xlabel'])\n if 'ylabel' not in self.options['plotSettings']:\n self.ax.set_ylabel('y')\n else:\n self.ax.set_ylabel(self.options['plotSettings']['ylabel'])\n if 'zlabel' in self.options['plotSettings']:\n if self.dim == 2:\n self.raiseAWarning('zlabel keyword does not make sense in 2-D Plots!')\n elif self.dim == 3 and self.zCoordinates:\n self.ax.set_zlabel(self.options['plotSettings']['zlabel'])\n elif self.dim == 3 and self.zCoordinates:\n self.ax.set_zlabel('z')\n\n if 'legend' in self.options['plotSettings']:\n if 'label' not in plotSettings.get('attributes', {}):\n if 'attributes' not in plotSettings:\n plotSettings['attributes'] = {}\n plotSettings['attributes']['label'] = self.outStreamTypes[pltIndex] + ' ' + str(pltIndex)\n #################\n # SCATTER PLOT #\n #################\n self.raiseADebug(f'creating plot {self.name}')\n if self.outStreamTypes[pltIndex] == 'scatter':\n if 's' not in plotSettings:\n plotSettings['s'] = '20'\n if 'c' not in plotSettings:\n plotSettings['c'] = 'b'\n if 'marker' not in plotSettings:\n plotSettings['marker'] = 'o'\n if 'alpha' not in plotSettings:\n plotSettings['alpha'] = 'None'\n if 'linewidths' not in plotSettings:\n plotSettings['linewidths'] = 'None'\n if self.colorMapCoordinates[pltIndex] is not None:\n # Find the max and min colormap values\n firstKey = utils.first(self.xValues[pltIndex].keys())\n vmin = np.amin(self.colorMapValues[pltIndex][firstKey])\n vmax = np.amax(self.colorMapValues[pltIndex][firstKey])\n for key in self.xValues[pltIndex]:\n vmin = min(vmin,np.amin(self.colorMapValues[pltIndex][key]))\n vmax = max(vmax,np.amax(self.colorMapValues[pltIndex][key]))\n plotSettings['norm'] = matplotlib.colors.Normalize(vmin,vmax)\n for key in self.xValues[pltIndex]:\n for xIndex in range(len(self.xValues[pltIndex][key])):\n for yIndex in range(len(self.yValues[pltIndex][key])):\n scatterPlotOptions = {'s': ast.literal_eval(plotSettings['s']),\n 'marker': (plotSettings['marker']),\n 'alpha': ast.literal_eval(plotSettings['alpha']),\n 'linewidths': ast.literal_eval(plotSettings['linewidths'])}\n if self.colorMapCoordinates[pltIndex] is not None:\n scatterPlotOptions['norm'] = plotSettings['norm']\n scatterPlotOptions.update(plotSettings.get('attributes', {}))\n if self.dim == 2:\n if self.colorMapCoordinates[pltIndex] is not None:\n scatterPlotOptions['c'] = self.colorMapValues[pltIndex][key][xIndex]\n scatterPlotOptions['cmap'] = matplotlib.cm.get_cmap(\"winter\")\n if self.actcm:\n first = False\n else:\n first = True\n if plotSettings['cmap'] == 'None':\n self.actPlot = self.ax.scatter(self.xValues[pltIndex][key][xIndex],\n self.yValues[pltIndex][key][yIndex],\n **scatterPlotOptions)\n if 'colorbar' not in self.options or self.options['colorbar']['colorbar'] != 'off':\n if first:\n m = matplotlib.cm.ScalarMappable(cmap=self.actPlot.cmap, norm=self.actPlot.norm)\n m.set_array(self.colorMapValues[pltIndex][key])\n self.actcm = self.fig.colorbar(m)\n self.actcm.set_label(self.colorMapCoordinates[pltIndex][0].split('|')[-1].replace(')', ''))\n else:\n try:\n self.actcm.draw_all()\n # this is not good, what exception will be thrown?\n except:\n m = matplotlib.cm.ScalarMappable(cmap=self.actPlot.cmap, norm=self.actPlot.norm)\n m.set_array(self.colorMapValues[pltIndex][key])\n self.actcm = self.fig.colorbar(m)\n self.actcm.set_label(self.colorMapCoordinates[pltIndex][0].split('|')[-1].replace(')', ''))\n else:\n scatterPlotOptions['cmap'] = plotSettings['cmap']\n self.actPlot = self.ax.scatter(self.xValues[pltIndex][key][xIndex],\n self.yValues[pltIndex][key][yIndex],\n **scatterPlotOptions)\n if 'colorbar' not in self.options or self.options['colorbar']['colorbar'] != 'off':\n if first:\n m = matplotlib.cm.ScalarMappable(cmap=self.actPlot.cmap, norm=self.actPlot.norm)\n m.set_array(self.colorMapValues[pltIndex][key])\n self.actcm = self.fig.colorbar(m, ax=self.ax)\n self.actcm.set_label(self.colorMapCoordinates[pltIndex][0].split('|')[-1].replace(')', ''))\n else:\n m = matplotlib.cm.ScalarMappable(cmap=self.actPlot.cmap, norm=self.actPlot.norm)\n m.set_clim(vmin = min(self.colorMapValues[pltIndex][key][-1]), vmax=max(self.colorMapValues[pltIndex][key][-1]))\n self.actcm.draw_all()\n else:\n if 'color' not in scatterPlotOptions:\n scatterPlotOptions['c'] = plotSettings['c']\n self.actPlot = self.ax.scatter(self.xValues[pltIndex][key][xIndex],\n self.yValues[pltIndex][key][yIndex],\n **scatterPlotOptions)\n else:\n for zIndex in range(len(self.zValues[pltIndex][key])):\n if self.colorMapCoordinates[pltIndex] is not None:\n scatterPlotOptions['c'] = self.colorMapValues[pltIndex][key][zIndex]\n if self.actcm:\n first = False\n else:\n first = True\n if plotSettings['cmap'] == 'None':\n self.actPlot = self.ax.scatter(self.xValues[pltIndex][key][xIndex], self.yValues[pltIndex][key][yIndex], self.zValues[pltIndex][key][zIndex], **scatterPlotOptions)\n if 'colorbar' not in self.options or self.options['colorbar']['colorbar'] != 'off':\n if first:\n m = matplotlib.cm.ScalarMappable(cmap=self.actPlot.cmap, norm=self.actPlot.norm)\n m.set_array(self.colorMapValues[pltIndex][key])\n self.actcm = self.fig.colorbar(m)\n self.actcm.set_label(self.colorMapCoordinates[pltIndex][0].split('|')[-1].replace(')', ''))\n else:\n self.actcm.draw_all()\n else:\n scatterPlotOptions['cmap'] = plotSettings['cmap']\n self.actPlot = self.ax.scatter(self.xValues[pltIndex][key][xIndex], self.yValues[pltIndex][key][yIndex], self.zValues[pltIndex][key][zIndex], **scatterPlotOptions)\n if 'colorbar' not in self.options or self.options['colorbar']['colorbar'] != 'off':\n if first:\n m = matplotlib.cm.ScalarMappable(cmap=self.actPlot.cmap, norm=self.actPlot.norm)\n m.set_array(self.colorMapValues[pltIndex][key])\n self.actcm = self.fig.colorbar(m)\n self.actcm.set_label(self.colorMapCoordinates[pltIndex][0].split('|')[-1].replace(')', ''))\n else:\n m = matplotlib.cm.ScalarMappable(cmap = self.actPlot.cmap, norm = self.actPlot.norm)\n m.set_clim(vmin = min(self.colorMapValues[pltIndex][key][-1]), vmax=max(self.colorMapValues[pltIndex][key][-1]))\n self.actcm.draw_all()\n else:\n if 'color' not in scatterPlotOptions:\n scatterPlotOptions['c'] = plotSettings['c']\n self.actPlot = self.ax.scatter(self.xValues[pltIndex][key][xIndex], self.yValues[pltIndex][key][yIndex], self.zValues[pltIndex][key][zIndex], **scatterPlotOptions)\n #################\n # LINE PLOT #\n #################\n elif self.outStreamTypes[pltIndex] == 'line':\n minV = 0\n maxV = 0\n # If the user does not define an appropriate cmap, then use matplotlib's default.\n if 'cmap' not in plotSettings or plotSettings['cmap'] not in matplotlib.cm.datad:\n plotSettings['cmap'] = None\n if bool(self.colorMapValues):\n for key in self.xValues[pltIndex]:\n minV = min(minV,self.colorMapValues[pltIndex][key][-1][-1])\n maxV = max(maxV,self.colorMapValues[pltIndex][key][-1][-1])\n cmap = matplotlib.cm.ScalarMappable(matplotlib.colors.Normalize(minV, maxV, True), plotSettings['cmap'])\n cmap.set_array([minV,maxV])\n for key in self.xValues[pltIndex]:\n for xIndex in range(len(self.xValues[pltIndex][key])):\n if self.colorMapCoordinates[pltIndex] is not None:\n plotSettings['interpPointsX'] = str(max(200, len(self.xValues[pltIndex][key][xIndex])))\n for yIndex in range(len(self.yValues[pltIndex][key])):\n if self.dim == 2:\n if self.yValues[pltIndex][key][yIndex].size < 2:\n return\n xi, yi = mathUtils.interpolateFunction(self.xValues[pltIndex][key][xIndex], self.yValues[pltIndex][key][yIndex], plotSettings, returnCoordinate=True)\n if self.colorMapCoordinates[pltIndex] is not None:\n self.ax.plot(xi, yi, c=cmap.cmap(self.colorMapValues[pltIndex][key][-1][-1]/(maxV-minV)))\n if 'colorbar' not in self.options or self.options['colorbar']['colorbar'] != 'off':\n if self.actcm is None:\n self.actcm = self.fig.colorbar(cmap)\n self.actcm.set_label(self.colorMapCoordinates[pltIndex][0].split('|')[-1].replace(')', ''))\n else:\n self.actcm.draw_all()\n else:\n self.actPlot = self.ax.plot(xi, yi, **plotSettings.get('attributes', {}))\n else:\n for zIndex in range(len(self.zValues[pltIndex][key])):\n if self.zValues[pltIndex][key][zIndex].size <= 3:\n return\n if self.colorMapCoordinates[pltIndex] is not None:\n self.ax.plot(self.xValues[pltIndex][key][xIndex],\n self.yValues[pltIndex][key][yIndex],\n self.zValues[pltIndex][key][zIndex],\n c=cmap.cmap(self.colorMapValues[pltIndex][key][-1][-1]/(maxV-minV)))\n if 'colorbar' not in self.options or self.options['colorbar']['colorbar'] != 'off':\n if self.actcm is None:\n self.actcm = self.fig.colorbar(cmap)\n self.actcm.set_label(self.colorMapCoordinates[pltIndex][0].split('|')[-1].replace(')', ''))\n else:\n self.actcm.draw_all()\n else:\n self.actPlot = self.ax.plot(self.xValues[pltIndex][key][xIndex],\n self.yValues[pltIndex][key][yIndex],\n self.zValues[pltIndex][key][zIndex],\n **plotSettings.get('attributes', {}))\n ##################\n # HISTOGRAM PLOT #\n ##################\n elif self.outStreamTypes[pltIndex] == 'histogram':\n if 'bins' not in plotSettings:\n if self.dim == 2:\n plotSettings['bins'] = '10'\n else:\n plotSettings['bins'] = '4'\n if 'normed' not in plotSettings:\n plotSettings['normed'] = 'False'\n if 'weights' not in plotSettings:\n plotSettings['weights'] = 'None'\n if 'cumulative' not in plotSettings:\n plotSettings['cumulative'] = 'False'\n if 'histtype' not in plotSettings:\n plotSettings['histtype'] = 'bar'\n if 'align' not in plotSettings:\n plotSettings['align'] = 'mid'\n if 'orientation' not in plotSettings:\n plotSettings['orientation'] = 'vertical'\n if 'rwidth' not in plotSettings:\n plotSettings['rwidth'] = 'None'\n if 'log' not in plotSettings:\n plotSettings['log'] = 'None'\n if 'color' not in plotSettings:\n plotSettings['color'] = 'b'\n if 'stacked' not in plotSettings:\n plotSettings['stacked'] = 'None'\n if self.sourceData[0].type.strip() == 'HistorySet':\n #####################################################################################################################################\n # @MANDD: This 'if' condition has been added in order to allow the user the correctly create an histogram out of an historySet #\n # If the histogram is created out of the input variables, then the plot has an identical meaning of the one generated by a pointSet #\n # However, if the histogram is created out of the output variables, then the plot consider only the last value of the array #\n #####################################################################################################################################\n data = {}\n data['x'] = np.empty(0)\n data['y'] = np.empty(0)\n for index in range(len(self.outStreamTypes)):\n for key in self.xValues[index]:\n data['x'] = np.append(data['x'], self.xValues[index][key][0][-1])\n if self.dim == 3:\n data['y'] = np.append(data['y'], self.yValues[index][key][0][-1])\n del self.xValues[index]\n self.xValues = {}\n self.xValues[index] = {}\n self.xValues[index][0] = []\n self.xValues[index][0].append(deepcopy(data['x']))\n if self.dim == 3:\n del self.yValues[index]\n self.yValues = {}\n self.yValues[index] ={ }\n self.yValues[index][0] = []\n self.yValues[index][0].append(deepcopy(data['y']))\n\n for key in self.xValues[pltIndex]:\n for xIndex in range(len(self.xValues[pltIndex][key])):\n try:\n colorss = ast.literal_eval(plotSettings['color'])\n # unknown what specific error is anticipated here, but I don't like a bare except...\n # ast.literal_eval can raise the exceptions listed below (see library docs):\n except (ValueError, TypeError, SyntaxError, MemoryError, RecursionError):\n colorss = plotSettings['color']\n if self.dim == 2:\n self.ax.hist(self.xValues[pltIndex][key][xIndex],\n bins=ast.literal_eval(plotSettings['bins']),\n density=ast.literal_eval(plotSettings['normed']),\n weights=ast.literal_eval(plotSettings['weights']),\n cumulative=ast.literal_eval(plotSettings['cumulative']),\n histtype=plotSettings['histtype'],\n align=plotSettings['align'],\n orientation=plotSettings['orientation'],\n rwidth=ast.literal_eval(plotSettings['rwidth']),\n log=ast.literal_eval(plotSettings['log']),\n color=colorss,\n stacked=ast.literal_eval(plotSettings['stacked']),\n **plotSettings.get('attributes', {}))\n else:\n for yIndex in range(len(self.yValues[pltIndex][key])):\n hist, xedges, yedges = np.histogram2d(self.xValues[pltIndex][key][xIndex],\n self.yValues[pltIndex][key][yIndex],\n bins=ast.literal_eval(plotSettings['bins']))\n elements = (len(xedges) - 1) * (len(yedges) - 1)\n if 'x_offset' in plotSettings:\n xoffset = float(plotSettings['x_offset'])\n else:\n xoffset = 0.25\n if 'y_offset' in plotSettings:\n yoffset = float(plotSettings['y_offset'])\n else:\n yoffset = 0.25\n if 'dx' in plotSettings:\n dxs = float(plotSettings['dx'])\n else:\n dxs = (self.xValues[pltIndex][key][xIndex].max() - self.xValues[pltIndex][key][xIndex].min()) / float(plotSettings['bins'])\n if 'dy' in plotSettings:\n dys = float(plotSettings['dy'])\n else:\n dys = (self.yValues[pltIndex][key][yIndex].max() - self.yValues[pltIndex][key][yIndex].min()) / float(plotSettings['bins'])\n xpos, ypos = np.meshgrid(xedges[:-1] + xoffset, yedges[:-1] + yoffset)\n self.actPlot = self.ax.bar3d(xpos.flatten(),\n ypos.flatten(),\n np.zeros(elements),\n dxs*np.ones_like(elements),\n dys*np.ones_like(elements),\n hist.flatten(),\n color=colorss,\n zsort='average',\n **plotSettings.get('attributes', {}))\n ##################\n # STEM PLOT #\n ##################\n elif self.outStreamTypes[pltIndex] == 'stem':\n if 'linefmt' not in plotSettings:\n plotSettings['linefmt'] = 'b-'\n if 'markerfmt' not in plotSettings:\n plotSettings['markerfmt'] = 'bo'\n if 'basefmt' not in plotSettings:\n plotSettings['basefmt'] = 'r-'\n for key in self.xValues[pltIndex]:\n for xIndex in range(len(self.xValues[pltIndex][key])):\n for yIndex in range(len(self.yValues[pltIndex][key])):\n if self.dim == 2:\n self.actPlot = self.ax.stem(self.xValues[pltIndex][key][xIndex],\n self.yValues[pltIndex][key][yIndex],\n linefmt=plotSettings['linefmt'],\n markerfmt=plotSettings['markerfmt'],\n basefmt = plotSettings['linefmt'],\n use_line_collection=True,\n **plotSettings.get('attributes', {}))\n else:\n # it is a basic stem plot constructed using a standard line plot. For now we do not use the previous defined keywords...\n for zIndex in range(len(self.zValues[pltIndex][key])):\n for xx, yy, zz in zip(self.xValues[pltIndex][key][xIndex], self.yValues[pltIndex][key][yIndex], self.zValues[pltIndex][key][zIndex]):\n self.ax.plot([xx, xx], [yy, yy], [0, zz], '-')\n ##################\n # STEP PLOT #\n ##################\n elif self.outStreamTypes[pltIndex] == 'step':\n if self.dim == 2:\n if 'where' not in plotSettings:\n plotSettings['where'] = 'mid'\n for key in self.xValues[pltIndex]:\n for xIndex in range(len(self.xValues[pltIndex][key])):\n if self.xValues[pltIndex][key][xIndex].size < 2:\n xi = self.xValues[pltIndex][key][xIndex]\n else:\n xi = np.linspace(self.xValues[pltIndex][key][xIndex].min(), self.xValues[pltIndex][key][xIndex].max(), ast.literal_eval(plotSettings['interpPointsX']))\n for yIndex in range(len(self.yValues[pltIndex][key])):\n if self.yValues[pltIndex][key][yIndex].size <= 3:\n return\n yi = mathUtils.interpolateFunction(self.xValues[pltIndex][key][xIndex], self.yValues[pltIndex][key][yIndex], plotSettings)\n self.actPlot = self.ax.step(xi, yi, where=plotSettings['where'], **plotSettings.get('attributes', {}))\n else:\n self.raiseAWarning('step Plot not available in 3D')\n return\n ########################\n # PSEUDOCOLOR PLOT #\n ########################\n elif self.outStreamTypes[pltIndex] == 'pseudocolor':\n if self.dim == 2:\n for key in self.xValues[pltIndex]:\n for xIndex in range(len(self.xValues[pltIndex][key])):\n # Hopefully, x,y, and z are all the same length, so checking this\n # here should be good enough.\n # The problem is you cannot interpolate any amount of space if\n # you only have a single data point.\n if self.xValues[pltIndex][key][xIndex].size == 1:\n self.raiseAWarning('Nothing to Plot Yet. Continuing to next plot.')\n continue\n for yIndex in range(len(self.yValues[pltIndex][key])):\n if not self.colorMapCoordinates:\n self.raiseAMessage('pseudocolor Plot needs coordinates for color map... Returning without plotting')\n return\n for zIndex in range(len(self.colorMapValues[pltIndex][key])):\n if self.colorMapValues[pltIndex][key][zIndex].size <= 3:\n return\n xig, yig, Ci = mathUtils.interpolateFunction(self.xValues[pltIndex][key][xIndex],\n self.yValues[pltIndex][key][yIndex],\n plotSettings,\n z=self.colorMapValues[pltIndex][key][zIndex],\n returnCoordinate=True)\n if plotSettings['cmap'] == 'None':\n self.actPlot = self.ax.pcolormesh(xig,\n yig,\n ma.masked_where(np.isnan(Ci), Ci),\n **plotSettings.get('attributes', {}))\n m = matplotlib.cm.ScalarMappable(cmap=self.actPlot.cmap, norm=self.actPlot.norm)\n else:\n self.actPlot = self.ax.pcolormesh(xig,\n yig,\n ma.masked_where(np.isnan(Ci), Ci),\n cmap=matplotlib.cm.get_cmap(name = plotSettings['cmap']),\n **plotSettings.get('attributes', {}))\n m = matplotlib.cm.ScalarMappable(cmap=self.actPlot.cmap, norm=self.actPlot.norm)\n m.set_array(ma.masked_where(np.isnan(Ci), Ci))\n if 'colorbar' not in self.options or self.options['colorbar']['colorbar'] != 'off':\n actcm = self.fig.colorbar(m)\n actcm.set_label(self.colorMapCoordinates[pltIndex][0].split('|')[-1].replace(')', ''))\n else:\n self.raiseAWarning('pseudocolor Plot is considered a 2D plot, not a 3D!')\n return\n ########################\n # SURFACE PLOT #\n ########################\n elif self.outStreamTypes[pltIndex] == 'surface':\n if self.dim == 2:\n self.raiseAWarning('surface Plot is NOT available for 2D plots, IT IS A 3D!')\n return\n else:\n if 'rstride' not in plotSettings:\n plotSettings['rstride'] = '1'\n if 'cstride' not in plotSettings:\n plotSettings['cstride'] = '1'\n if 'antialiased' not in plotSettings:\n plotSettings['antialiased'] = 'False'\n if 'linewidth' not in plotSettings:\n plotSettings['linewidth'] = '0'\n for key in self.xValues[pltIndex]:\n for xIndex in range(len(self.xValues[pltIndex][key])):\n # Hopefully, x,y, and z are all the same length, so checking this\n # here should be good enough.\n # The problem is you cannot interpolate any amount of space if\n # you only have a single data point.\n if self.xValues[pltIndex][key][xIndex].size == 1:\n self.raiseAWarning('Nothing to Plot Yet. Continuing to next plot.')\n continue\n for yIndex in range(len(self.yValues[pltIndex][key])):\n for zIndex in range(len(self.zValues[pltIndex][key])):\n if self.zValues[pltIndex][key][zIndex].size <= 3:\n return\n if self.colorMapCoordinates[pltIndex] is not None:\n xig, yig, Ci = mathUtils.interpolateFunction(self.xValues[pltIndex][key][xIndex],\n self.yValues[pltIndex][key][yIndex],\n plotSettings,\n z=self.colorMapValues[pltIndex][key][zIndex],\n returnCoordinate=True)\n xig, yig, zi = mathUtils.interpolateFunction(self.xValues[pltIndex][key][xIndex],\n self.yValues[pltIndex][key][yIndex],\n plotSettings,\n z=self.zValues[pltIndex][key][zIndex],\n returnCoordinate=True)\n if self.colorMapCoordinates[pltIndex] is not None:\n if self.actcm:\n first = False\n else:\n first = True\n if plotSettings['cmap'] == 'None':\n plotSettings['cmap'] = 'jet'\n self.actPlot = self.ax.plot_surface(xig,\n yig,\n ma.masked_where(np.isnan(zi), zi),\n rstride=ast.literal_eval(plotSettings['rstride']),\n cstride=ast.literal_eval(plotSettings['cstride']),\n facecolors=matplotlib.cm.get_cmap(name=plotSettings['cmap'])(ma.masked_where(np.isnan(Ci), Ci)),\n cmap=matplotlib.cm.get_cmap(name = plotSettings['cmap']),\n linewidth=ast.literal_eval(plotSettings['linewidth']),\n antialiased=ast.literal_eval(plotSettings['antialiased']),\n **plotSettings.get('attributes', {}))\n if first:\n self.actPlot.cmap = matplotlib.cm.get_cmap(name=plotSettings['cmap'])\n if 'colorbar' not in self.options or self.options['colorbar']['colorbar'] != 'off':\n if first:\n m = matplotlib.cm.ScalarMappable(cmap = self.actPlot.cmap, norm = self.actPlot.norm)\n m.set_array(self.colorMapValues[pltIndex][key])\n self.actcm = self.fig.colorbar(m)\n self.actcm.set_label(self.colorMapCoordinates[pltIndex][0].split('|')[-1].replace(')', ''))\n else:\n m = matplotlib.cm.ScalarMappable(cmap=self.actPlot.cmap, norm=self.actPlot.norm)\n m.set_clim(vmin=min(self.colorMapValues[pltIndex][key][-1]), vmax=max(self.colorMapValues[pltIndex][key][-1]))\n self.actcm.draw_all()\n else:\n if plotSettings['cmap'] == 'None':\n self.actPlot = self.ax.plot_surface(xig,\n yig,\n ma.masked_where(np.isnan(zi), zi),\n rstride=ast.literal_eval(plotSettings['rstride']),\n cstride=ast.literal_eval(plotSettings['cstride']),\n linewidth=ast.literal_eval(plotSettings['linewidth']),\n antialiased=ast.literal_eval(plotSettings['antialiased']),\n **plotSettings.get('attributes', {}))\n if 'color' in plotSettings.get('attributes', {}):\n self.actPlot.set_color = plotSettings.get('attributes', {})['color']\n else:\n self.actPlot.set_color = 'blue'\n else:\n self.actPlot = self.ax.plot_surface(xig,\n yig,\n ma.masked_where(np.isnan(zi), zi),\n rstride=ast.literal_eval(plotSettings['rstride']),\n cstride=ast.literal_eval(plotSettings['cstride']),\n cmap=matplotlib.cm.get_cmap(name = plotSettings['cmap']),\n linewidth=ast.literal_eval(plotSettings['linewidth']),\n antialiased=ast.literal_eval(plotSettings['antialiased']),\n **plotSettings.get('attributes', {}))\n ########################\n # TRI-SURFACE PLOT #\n ########################\n elif self.outStreamTypes[pltIndex] == 'tri-surface':\n if self.dim == 2:\n self.raiseAWarning('TRI-surface Plot is NOT available for 2D plots, it is 3D!')\n return\n else:\n if 'color' not in plotSettings:\n plotSettings['color'] = 'b'\n if 'shade' not in plotSettings:\n plotSettings['shade'] = 'False'\n for key in self.xValues[pltIndex]:\n for xIndex in range(len(self.xValues[pltIndex][key])):\n # Hopefully, x,y, and z are all the same length, so checking this\n # here should be good enough.\n # The problem is you cannot interpolate any amount of space if\n # you only have a single data point.\n if self.xValues[pltIndex][key][xIndex].size == 1:\n self.raiseAWarning('Nothing to Plot Yet. Continuing to next plot.')\n continue\n for yIndex in range(len(self.yValues[pltIndex][key])):\n for zIndex in range(len(self.zValues[pltIndex][key])):\n metric = (self.xValues[pltIndex][key][xIndex] ** 2 + self.yValues[pltIndex][key][yIndex] ** 2) ** 0.5\n metricIndeces = np.argsort(metric)\n xs = np.zeros(self.xValues[pltIndex][key][xIndex].shape)\n ys = np.zeros(self.yValues[pltIndex][key][yIndex].shape)\n zs = np.zeros(self.zValues[pltIndex][key][zIndex].shape)\n for sindex in range(len(metricIndeces)):\n xs[sindex] = self.xValues[pltIndex][key][xIndex][metricIndeces[sindex]]\n ys[sindex] = self.yValues[pltIndex][key][yIndex][metricIndeces[sindex]]\n zs[sindex] = self.zValues[pltIndex][key][zIndex][metricIndeces[sindex]]\n surfacePlotOptions = {'color': plotSettings['color'],\n 'shade': ast.literal_eval(plotSettings['shade'])}\n surfacePlotOptions.update(plotSettings.get('attributes', {}))\n if self.zValues[pltIndex][key][zIndex].size <= 3:\n return\n if self.colorMapCoordinates[pltIndex] is not None:\n if self.actcm:\n first = False\n else:\n first = True\n if plotSettings['cmap'] == 'None':\n plotSettings['cmap'] = 'jet'\n surfacePlotOptions['cmap'] = matplotlib.cm.get_cmap(name = plotSettings['cmap'])\n self.actPlot = self.ax.plot_trisurf(xs, ys, zs, **surfacePlotOptions)\n if 'colorbar' not in self.options or self.options['colorbar']['colorbar'] != 'off':\n if first:\n self.actPlot.cmap = matplotlib.cm.get_cmap(name=plotSettings['cmap'])\n m = matplotlib.cm.ScalarMappable(cmap=self.actPlot.cmap, norm=self.actPlot.norm)\n m.set_array(self.colorMapValues[pltIndex][key])\n self.actcm = self.fig.colorbar(m)\n self.actcm.set_label(self.colorMapCoordinates[pltIndex][0].split('|')[-1].replace(')', ''))\n else:\n m = matplotlib.cm.ScalarMappable(cmap=self.actPlot.cmap, norm=self.actPlot.norm)\n m.set_clim(vmin=min(self.colorMapValues[pltIndex][key][-1]), vmax=max(self.colorMapValues[pltIndex][key][-1]))\n self.actcm.draw_all()\n else:\n if plotSettings['cmap'] != 'None':\n surfacePlotOptions[\"cmap\"] = matplotlib.cm.get_cmap(name=plotSettings['cmap'])\n self.actPlot = self.ax.plot_trisurf(xs, ys, zs, **surfacePlotOptions)\n ########################\n # WIREFRAME PLOT #\n ########################\n elif self.outStreamTypes[pltIndex] == 'wireframe':\n if self.dim == 2:\n self.raiseAWarning('wireframe Plot is NOT available for 2D plots, IT IS A 3D!')\n return\n else:\n if 'rstride' not in plotSettings:\n plotSettings['rstride'] = '1'\n if 'cstride' not in plotSettings:\n plotSettings['cstride'] = '1'\n for key in self.xValues[pltIndex]:\n for xIndex in range(len(self.xValues[pltIndex][key])):\n # Hopefully, x,y, and z are all the same length, so checking this\n # here should be good enough.\n # The problem is you cannot interpolate any amount of space if\n # you only have a single data point.\n if self.xValues[pltIndex][key][xIndex].size == 1:\n self.raiseAWarning('Nothing to Plot Yet. Continuing to next plot.')\n continue\n for yIndex in range(len(self.yValues[pltIndex][key])):\n for zIndex in range(len(self.zValues[pltIndex][key])):\n if self.zValues[pltIndex][key][zIndex].size <= 3:\n return\n if self.colorMapCoordinates[pltIndex] is not None:\n xig, yig, Ci = mathUtils.interpolateFunction(self.xValues[pltIndex][key][xIndex],\n self.yValues[pltIndex][key][yIndex],\n plotSettings,\n z=self.colorMapValues[pltIndex][key][zIndex],\n returnCoordinate=True)\n xig, yig, zi = mathUtils.interpolateFunction(self.xValues[pltIndex][key][xIndex],\n self.yValues[pltIndex][key][yIndex],\n plotSettings,\n z=self.zValues[pltIndex][key][zIndex],\n returnCoordinate=True)\n if self.colorMapCoordinates[pltIndex] is not None:\n self.raiseAWarning(f'Currently, ax.plot_wireframe() in MatPlotLib version: {matplotlib.__version__} does not support a colormap! Wireframe plotted on a surface plot...')\n if self.actcm:\n first = False\n else:\n first = True\n if plotSettings['cmap'] == 'None':\n plotSettings['cmap'] = 'jet'\n self.actPlot = self.ax.plot_wireframe(xig,\n yig,\n ma.masked_where(np.isnan(zi), zi),\n rstride=ast.literal_eval(plotSettings['rstride']),\n cmap=matplotlib.cm.get_cmap(name = plotSettings['cmap']),\n cstride=ast.literal_eval(plotSettings['cstride']),\n **plotSettings.get('attributes', {}))\n self.actPlot = self.ax.plot_surface(xig,\n yig,\n ma.masked_where(np.isnan(zi), zi),\n alpha=0.4,\n rstride=ast.literal_eval(plotSettings['rstride']),\n cmap=matplotlib.cm.get_cmap(name=plotSettings['cmap']),\n cstride=ast.literal_eval(plotSettings['cstride']),\n **plotSettings.get('attributes', {}))\n if 'colorbar' not in self.options or self.options['colorbar']['colorbar'] != 'off':\n if first:\n m = matplotlib.cm.ScalarMappable(cmap=self.actPlot.cmap, norm=self.actPlot.norm)\n m.set_array(self.colorMapValues[pltIndex][key])\n self.actcm = self.fig.colorbar(m)\n self.actcm.set_label(self.colorMapCoordinates[pltIndex][0].split('|')[-1].replace(')', ''))\n else:\n m = matplotlib.cm.ScalarMappable(cmap=self.actPlot.cmap, norm=self.actPlot.norm)\n m.set_clim(vmin=min(self.colorMapValues[pltIndex][key][-1]), vmax=max(self.colorMapValues[pltIndex][key][-1]))\n self.actcm.draw_all()\n else:\n if plotSettings['cmap'] == 'None':\n self.actPlot = self.ax.plot_wireframe(xig,\n yig,\n ma.masked_where(np.isnan(zi), zi),\n rstride=ast.literal_eval(plotSettings['rstride']),\n cstride=ast.literal_eval(plotSettings['cstride']),\n **plotSettings.get('attributes', {}))\n if 'color' in plotSettings.get('attributes', {}):\n self.actPlot.set_color = plotSettings.get('attributes', {})['color']\n else:\n self.actPlot.set_color = 'blue'\n else:\n self.actPlot = self.ax.plot_wireframe(xig,\n yig,\n ma.masked_where(np.isnan(zi), zi),\n rstride=ast.literal_eval(plotSettings['rstride']),\n cstride=ast.literal_eval(plotSettings['cstride']),\n **plotSettings.get('attributes', {}))\n ########################\n # CONTOUR PLOT #\n ########################\n elif self.outStreamTypes[pltIndex] == 'contour' or self.outStreamTypes[pltIndex] == 'filledContour':\n if self.dim == 2:\n if 'numberBins' in plotSettings:\n nbins = int(plotSettings['numberBins'])\n else:\n nbins = 5\n for key in self.xValues[pltIndex]:\n if not self.colorMapCoordinates:\n self.raiseAWarning(self.outStreamTypes[pltIndex] + ' Plot needs coordinates for color map... Returning without plotting')\n return\n for xIndex in range(len(self.xValues[pltIndex][key])):\n # Hopefully, x,y, and z are all the same length, so checking this\n # here should be good enough.\n # The problem is you cannot interpolate any amount of space if\n # you only have a single data point.\n if self.xValues[pltIndex][key][xIndex].size == 1:\n self.raiseAWarning('Nothing to Plot Yet. Continuing to next plot.')\n continue\n for yIndex in range(len(self.yValues[pltIndex][key])):\n for zIndex in range(len(self.colorMapValues[pltIndex][key])):\n if self.actcm:\n first = False\n else:\n first = True\n xig, yig, Ci = mathUtils.interpolateFunction(self.xValues[pltIndex][key][xIndex],\n self.yValues[pltIndex][key][yIndex],\n plotSettings,\n z=self.colorMapValues[pltIndex][key][zIndex],\n returnCoordinate=True)\n if self.outStreamTypes[pltIndex] == 'contour':\n if plotSettings['cmap'] == 'None':\n if 'color' in plotSettings.get('attributes', {}):\n color = plotSettings.get('attributes', {})['color']\n else:\n color = 'blue'\n self.actPlot = self.ax.contour(xig,\n yig,\n ma.masked_where(np.isnan(Ci), Ci),\n nbins,\n colors=color,\n **plotSettings.get('attributes', {}))\n else:\n self.actPlot = self.ax.contour(xig,\n yig,\n ma.masked_where(np.isnan(Ci), Ci),\n nbins,\n **plotSettings.get('attributes', {}))\n else:\n if plotSettings['cmap'] == 'None':\n plotSettings['cmap'] = 'jet'\n self.actPlot = self.ax.contourf(xig,\n yig,\n ma.masked_where(np.isnan(Ci), Ci),\n nbins,\n **plotSettings.get('attributes', {}))\n self.ax.clabel(self.actPlot, inline=1, fontsize=10)\n if 'colorbar' not in self.options or self.options['colorbar']['colorbar'] != 'off':\n if first:\n self.actcm = self.fig.colorbar(self.actPlot, shrink=0.8, extend='both')\n self.actcm.set_label(self.colorMapCoordinates[pltIndex][0].split('|')[-1].replace(')', ''))\n else:\n m = matplotlib.cm.ScalarMappable(cmap = self.actPlot.cmap, norm = self.actPlot.norm)\n m.set_clim(vmin = min(self.colorMapValues[pltIndex][key][-1]), vmax = max(self.colorMapValues[pltIndex][key][-1]))\n self.actcm.draw_all()\n else:\n self.raiseAWarning('contour/filledContour is a 2-D plot, where x,y are the surface coordinates and colorMap vector is the array to visualize!\\n contour3D/filledContour3D are 3-D! ')\n return\n # These should be combined: ^^^ & vvv\n elif self.outStreamTypes[pltIndex] == 'contour3D' or self.outStreamTypes[pltIndex] == 'filledContour3D':\n if self.dim == 2:\n self.raiseAWarning('contour3D/filledContour3D Plot is NOT available for 2D plots, IT IS A 2D! Check \"contour/filledContour\"!')\n return\n else:\n if 'numberBins' in plotSettings:\n nbins = int(plotSettings['numberBins'])\n else:\n nbins = 5\n if 'extend3D' in plotSettings:\n ext3D = bool(plotSettings['extend3D'])\n else:\n ext3D = False\n for key in self.xValues[pltIndex]:\n for xIndex in range(len(self.xValues[pltIndex][key])):\n # Hopefully, x,y, and z are all the same length, so checking this\n # here should be good enough.\n # The problem is you cannot interpolate any amount of space if\n # you only have a single data point.\n if self.xValues[pltIndex][key][xIndex].size == 1:\n self.raiseAWarning('Nothing to Plot Yet. Continuing to next plot.')\n continue\n for yIndex in range(len(self.yValues[pltIndex][key])):\n for zIndex in range(len(self.colorMapValues[pltIndex][key])):\n if self.actcm:\n first = False\n else:\n first = True\n xig, yig, Ci = mathUtils.interpolateFunction(self.xValues[pltIndex][key][xIndex],\n self.yValues[pltIndex][key][yIndex],\n plotSettings,\n z=self.colorMapValues[pltIndex][key][zIndex],\n returnCoordinate=True)\n if self.outStreamTypes[pltIndex] == 'contour3D':\n if plotSettings['cmap'] == 'None':\n if 'color' in plotSettings.get('attributes', {}):\n color = plotSettings.get('attributes', {})['color']\n else:\n color = 'blue'\n self.actPlot = self.ax.contour3D(xig,\n yig,\n ma.masked_where(np.isnan(Ci), Ci),\n nbins,\n colors=color,\n extend3d=ext3D,\n **plotSettings.get('attributes', {}))\n else:\n self.actPlot = self.ax.contour3D(xig,\n yig,\n ma.masked_where(np.isnan(Ci), Ci),\n nbins,\n extend3d=ext3D,\n cmap=matplotlib.cm.get_cmap(name=plotSettings['cmap']),\n **plotSettings.get('attributes', {}))\n else:\n if plotSettings['cmap'] == 'None':\n plotSettings['cmap'] = 'jet'\n self.actPlot = self.ax.contourf3D(xig,\n yig,\n ma.masked_where(np.isnan(Ci), Ci),\n nbins,\n cmap=matplotlib.cm.get_cmap(name=plotSettings['cmap']),\n **plotSettings.get('attributes', {}))\n self.ax.clabel(self.actPlot, inline=1, fontsize=10)\n if 'colorbar' not in self.options or self.options['colorbar']['colorbar'] != 'off':\n if first:\n self.actcm = self.fig.colorbar(self.actPlot, shrink = 0.8, extend = 'both')\n self.actcm.set_label(self.colorMapCoordinates[pltIndex][0].split('|')[-1].replace(')', ''))\n else:\n m = matplotlib.cm.ScalarMappable(cmap = self.actPlot.cmap, norm = self.actPlot.norm)\n m.set_clim(vmin = min(self.colorMapValues[pltIndex][key][-1]), vmax = max(self.colorMapValues[pltIndex][key][-1]))\n self.actcm.draw_all()\n ########################\n # DataMining PLOT #\n ########################\n elif self.outStreamTypes[pltIndex] == 'dataMining':\n colors = cycle(['#88CCEE', '#DDCC77', '#AA4499', '#117733', '#332288', '#999933', '#44AA99', '#882255', '#CC6677', '#CD6677', '#DC6877', '#886677', '#AA6677', '#556677', '#CD7865'])\n if 's' not in plotSettings:\n plotSettings['s'] = '20'\n if 'c' not in plotSettings:\n plotSettings['c'] = 'b'\n if 'marker' not in plotSettings:\n plotSettings['marker'] = 'o'\n if 'alpha' not in plotSettings:\n plotSettings['alpha'] = 'None'\n if 'linewidths' not in plotSettings:\n plotSettings['linewidths'] = 'None'\n clusterDict[pltIndex] = {}\n for key in self.xValues[pltIndex]:\n for xIndex in range(len(self.xValues[pltIndex][key])):\n for yIndex in range(len(self.yValues[pltIndex][key])):\n dataMiningPlotOptions = {'s': ast.literal_eval(plotSettings['s']),\n 'marker': (plotSettings['marker']),\n 'alpha': ast.literal_eval(plotSettings['alpha']),\n 'linewidths': ast.literal_eval(plotSettings['linewidths'])}\n if self.colorMapCoordinates[pltIndex] is not None:\n self.raiseAWarning('ColorMap values supplied, however DataMining plots do not use colorMap from input.')\n if plotSettings['cmap'] == 'None':\n self.raiseAWarning('ColorSet supplied, however DataMining plots do not use color set from input.')\n if 'cluster' == plotSettings['SKLtype']:\n # TODO: include the cluster Centers to the plot\n if 'noClusters' in plotSettings.get('attributes', {}):\n clusterDict[pltIndex]['noClusters'] = int(plotSettings.get('attributes', {})['noClusters'])\n plotSettings.get('attributes', {}).pop('noClusters')\n else:\n clusterDict[pltIndex]['noClusters'] = np.amax(self.clusterValues[pltIndex][1][0]) + 1\n dataMiningPlotOptions.update(plotSettings.get('attributes', {}))\n if self.dim == 2:\n clusterDict[pltIndex]['clusterValues'] = np.zeros(shape=(len(self.xValues[pltIndex][key][xIndex]), 2))\n else:\n clusterDict[pltIndex]['clusterValues'] = np.zeros(shape=(len(self.xValues[pltIndex][key][xIndex]), 3))\n clusterDict[pltIndex]['clusterValues'][:, 0] = self.xValues[pltIndex][key][xIndex]\n clusterDict[pltIndex]['clusterValues'][:, 1] = self.yValues[pltIndex][key][yIndex]\n if self.dim == 2:\n for k, col in zip(range(int(clusterDict[pltIndex]['noClusters'])), colors):\n myMembers = self.clusterValues[pltIndex][1][0] == k\n self.actPlot = self.ax.scatter(clusterDict[pltIndex]['clusterValues'][myMembers, 0],\n clusterDict[pltIndex]['clusterValues'][myMembers, 1],\n color=col,\n **dataMiningPlotOptions)\n\n # Handle all of the outlying data\n myMembers = self.clusterValues[pltIndex][1][0] == -1\n # resize the points\n dataMiningPlotOptions['s'] /= 2\n # and hollow out their markers\n if 'facecolors' in dataMiningPlotOptions:\n faceColors = dataMiningPlotOptions['facecolors']\n else:\n faceColors = None\n dataMiningPlotOptions['facecolors'] = 'none'\n\n self.actPlot = self.ax.scatter(clusterDict[pltIndex]['clusterValues'][myMembers, 0],\n clusterDict[pltIndex]['clusterValues'][myMembers, 1],\n color='#000000',\n **dataMiningPlotOptions)\n\n # Restore the plot options to their original values\n dataMiningPlotOptions['s'] *= 2\n if faceColors is not None:\n dataMiningPlotOptions['facecolors'] = faceColors\n else:\n del dataMiningPlotOptions['facecolors']\n\n else:\n for zIndex in range(len(self.zValues[pltIndex][key])):\n clusterDict[pltIndex]['clusterValues'][:, 2] = self.zValues[pltIndex][key][zIndex]\n for k, col in zip(range(int(clusterDict[pltIndex]['noClusters'])), colors):\n myMembers = self.clusterValues[pltIndex][1][0] == k\n self.actPlot = self.ax.scatter(clusterDict[pltIndex]['clusterValues'][myMembers, 0],\n clusterDict[pltIndex]['clusterValues'][myMembers, 1],\n clusterDict[pltIndex]['clusterValues'][myMembers, 2],\n color=col,\n **dataMiningPlotOptions)\n\n # Handle all of the outlying data\n myMembers = self.clusterValues[pltIndex][1][0] == -1\n # resize the points\n dataMiningPlotOptions['s'] /= 2\n # and hollow out their markers\n if 'facecolors' in dataMiningPlotOptions:\n faceColors = dataMiningPlotOptions['facecolors']\n else:\n faceColors = None\n dataMiningPlotOptions['facecolors'] = 'none'\n\n self.actPlot = self.ax.scatter(clusterDict[pltIndex]['clusterValues'][myMembers, 0],\n clusterDict[pltIndex]['clusterValues'][myMembers, 1],\n clusterDict[pltIndex]['clusterValues'][myMembers, 2],\n color='#000000',\n **dataMiningPlotOptions)\n\n # Restore the plot options to their original values\n dataMiningPlotOptions['s'] *= 2\n if faceColors is not None:\n dataMiningPlotOptions['facecolors'] = faceColors\n else:\n del dataMiningPlotOptions['facecolors']\n\n elif 'bicluster' == plotSettings['SKLtype']:\n self.raiseAnError(IOError, 'SKLType Bi-Cluster Plots are not implemented yet!..')\n elif 'mixture' == plotSettings['SKLtype']:\n if 'noMixtures' in plotSettings.get('attributes', {}):\n clusterDict[pltIndex]['noMixtures'] = int(plotSettings.get('attributes', {})['noMixtures'])\n plotSettings.get('attributes', {}).pop('noMixtures')\n else:\n clusterDict[pltIndex]['noMixtures'] = np.amax(self.mixtureValues[pltIndex][1][0]) + 1\n if self.dim == 3:\n self.raiseAnError(IOError, 'SKLType Mixture Plots are only available in 2-Dimensions')\n else:\n clusterDict[pltIndex]['mixtureValues'] = np.zeros(shape = (len(self.xValues[pltIndex][key][xIndex]), 2))\n clusterDict[pltIndex]['mixtureValues'][:, 0] = self.xValues[pltIndex][key][xIndex]\n clusterDict[pltIndex]['mixtureValues'][:, 1] = self.yValues[pltIndex][key][yIndex]\n if 'mixtureCovars' in plotSettings.get('attributes', {}):\n split = self.__splitVariableNames('mixtureCovars', (pltIndex, 0))\n # mixtureCovars = self.sourceData[pltIndex].getParam(split[1], split[2], nodeId = 'ending')\n plotSettings.get('attributes', {}).pop('mixtureCovars')\n # else:\n # mixtureCovars = None\n if 'mixtureMeans' in plotSettings.get('attributes', {}):\n split = self.__splitVariableNames('mixtureMeans', (pltIndex, 0))\n # mixtureMeans = self.sourceData[pltIndex].getParam(split[1], split[2], nodeId = 'ending')\n plotSettings.get('attributes', {}).pop('mixtureMeans')\n # else:\n # mixtureMeans = None\n # mixtureCovars.reshape(3, 4)\n # mixtureMeans.reshape(3, 4)\n # for i, (mean, covar, col) in enumerate(zip(mixtureMeans, mixtureCovars, colors)):\n for i, col in zip(range(clusterDict[pltIndex]['noMixtures']), colors):\n if not np.any(self.mixtureValues[pltIndex][1][0] == i):\n continue\n myMembers = self.mixtureValues[pltIndex][1][0] == i\n self.actPlot = self.ax.scatter(clusterDict[pltIndex]['mixtureValues'][myMembers, 0],\n clusterDict[pltIndex]['mixtureValues'][myMembers, 1],\n color=col,\n **dataMiningPlotOptions)\n elif 'manifold' == plotSettings['SKLtype']:\n if self.dim == 2:\n manifoldValues = np.zeros(shape=(len(self.xValues[pltIndex][key][xIndex]), 2))\n else:\n manifoldValues = np.zeros(shape=(len(self.xValues[pltIndex][key][xIndex]), 3))\n manifoldValues[:, 0] = self.xValues[pltIndex][key][xIndex]\n manifoldValues[:, 1] = self.yValues[pltIndex][key][yIndex]\n if 'clusterLabels' in plotSettings.get('attributes', {}):\n split = self.__splitVariableNames('clusterLabels', (pltIndex, 0))\n clusterDict[pltIndex]['clusterLabels'] = self.sourceData[pltIndex].getParam(split[1], split[2], nodeId = 'ending')\n plotSettings.get('attributes', {}).pop('clusterLabels')\n else:\n clusterDict[pltIndex]['clusterLabels'] = None\n if 'noClusters' in plotSettings.get('attributes', {}):\n clusterDict[pltIndex]['noClusters'] = int(plotSettings.get('attributes', {})['noClusters'])\n plotSettings.get('attributes', {}).pop('noClusters')\n else:\n clusterDict[pltIndex]['noClusters'] = np.amax(self.clusterValues[pltIndex][1][0]) + 1\n if self.clusterValues[pltIndex][1][0] is not None:\n if self.dim == 2:\n for k, col in zip(range(clusterDict[pltIndex]['noClusters']), colors):\n myMembers = self.clusterValues[pltIndex][1][0] == k\n self.actPlot = self.ax.scatter(manifoldValues[myMembers, 0],\n manifoldValues[myMembers, 1],\n color=col,\n **dataMiningPlotOptions)\n else:\n for zIndex in range(len(self.zValues[pltIndex][key])):\n manifoldValues[:, 2] = self.zValues[pltIndex][key][zIndex]\n for k, col in zip(range(clusterDict[pltIndex]['noClusters']), colors):\n myMembers = self.clusterValues[pltIndex][1][0] == k\n self.actPlot = self.ax.scatter(manifoldValues[myMembers, 0],\n manifoldValues[myMembers, 1],\n manifoldValues[myMembers, 2],\n color=col,\n **dataMiningPlotOptions)\n else:\n if self.dim == 2:\n self.actPlot = self.ax.scatter(manifoldValues[:, 0],\n manifoldValues[:, 1],\n **dataMiningPlotOptions)\n else:\n for zIndex in range(len(self.zValues[pltIndex][key])):\n manifoldValues[:, 2] = self.zValues[pltIndex][key][zIndex]\n self.actPlot = self.ax.scatter(manifoldValues[:, 0],\n manifoldValues[:, 1],\n manifoldValues[:, 2],\n **dataMiningPlotOptions)\n elif 'decomposition' == plotSettings['SKLtype']:\n if self.dim == 2:\n decompositionValues = np.zeros(shape = (len(self.xValues[pltIndex][key][xIndex]), 2))\n else:\n decompositionValues = np.zeros(shape = (len(self.xValues[pltIndex][key][xIndex]), 3))\n decompositionValues[:, 0] = self.xValues[pltIndex][key][xIndex]\n decompositionValues[:, 1] = self.yValues[pltIndex][key][yIndex]\n if 'noClusters' in plotSettings.get('attributes', {}):\n clusterDict[pltIndex]['noClusters'] = int(plotSettings.get('attributes', {})['noClusters'])\n plotSettings.get('attributes', {}).pop('noClusters')\n else:\n clusterDict[pltIndex]['noClusters'] = np.amax(self.clusterValues[pltIndex][1][0]) + 1\n if self.clusterValues[pltIndex][1][0] is not None:\n if self.dim == 2:\n for k, col in zip(range(clusterDict[pltIndex]['noClusters']), colors):\n myMembers = self.clusterValues[pltIndex][1][0] == k\n self.actPlot = self.ax.scatter(decompositionValues[myMembers, 0],\n decompositionValues[myMembers, 1],\n color=col,\n **dataMiningPlotOptions)\n else:\n for zIndex in range(len(self.zValues[pltIndex][key])):\n decompositionValues[:, 2] = self.zValues[pltIndex][key][zIndex]\n for k, col in zip(range(clusterDict[pltIndex]['noClusters']), colors):\n myMembers = self.clusterValues[pltIndex][1][0] == k\n self.actPlot = self.ax.scatter(decompositionValues[myMembers, 0],\n decompositionValues[myMembers, 1],\n decompositionValues[myMembers, 2],\n color=col,\n **dataMiningPlotOptions)\n else:\n # no ClusterLabels\n if self.dim == 2:\n self.actPlot = self.ax.scatter(decompositionValues[:, 0],\n decompositionValues[:, 1],\n **dataMiningPlotOptions)\n else:\n for zIndex in range(len(self.zValues[pltIndex][key])):\n decompositionValues[:, 2] = self.zValues[pltIndex][key][zIndex]\n self.actPlot = self.ax.scatter(decompositionValues[:, 0],\n decompositionValues[:, 1],\n decompositionValues[:, 2],\n **dataMiningPlotOptions)\n else:\n # Let's try to \"write\" the code for the plot on the fly\n self.raiseAWarning('Trying to create a non-predefined plot of type ' + self.outStreamTypes[pltIndex] + '. If this fails, please refer to the and/or the related matplotlib method specification.')\n kwargs = {}\n for kk in plotSettings:\n if kk != 'attributes' and kk != self.outStreamTypes[pltIndex]:\n try:\n kwargs[kk] = ast.literal_eval(plotSettings[kk])\n except ValueError:\n kwargs[kk] = plotSettings[kk]\n try:\n if self.dim == 2:\n customFunctionCall = getattr(self.ax, self.outStreamTypes[pltIndex])\n else:\n customFunctionCall = getattr(self.ax, self.outStreamTypes[pltIndex])\n self.actPlot = customFunctionCall(**kwargs)\n except AttributeError as ae:\n self.raiseAnError(RuntimeError, '<' + str(ae) + '> -> in execution custom plot \"' + self.outStreamTypes[pltIndex] + '\" in Plot ' + self.name + '.\\nSTREAM MANAGER: ERROR -> command has been called in the following way: ' + 'ax.' + self.outStreamTypes[pltIndex])\n\n if 'legend' in self.options['plotSettings']:\n self.fig.legend(**self.options['plotSettings']['legend'])\n\n # SHOW THE PICTURE\n self.__executeActions()\n self.fig.canvas.draw_idle()\n\n if 'screen' in self.destinations and display:\n def handle_close(event):\n \"\"\"\n This method is aimed to handle the closing of figures (overall when in interactive mode)\n @ In, event, instance, the event to close\n @ Out, None\n \"\"\"\n self.fig.canvas.stop_event_loop()\n self.raiseAMessage('Closed Figure')\n self.fig.canvas.mpl_connect('close_event', handle_close)\n # self.plt.pause(1e-6)\n # The following code is extracted from pyplot.pause without actually\n # needing to force the code to sleep, according to MPL's documentation,\n # this feature is experimental, hopefully by not calling the pause\n # function, we can obtain consistent results.\n # We are skipping a few of the sanity checks done in that function,\n # since we are sure we have an interactive backend and access to the\n # correct type of canvas and figure.\n self.fig.canvas.draw()\n # If your graphs are unresponsive to user input, you may want to consider\n # adjusting this timeout, to allow more time for the input to be handled.\n self.fig.canvas.start_event_loop(1e-3)\n\n # self.fig.canvas.flush_events()\n\n for fileType in self.destinations:\n if fileType == 'screen':\n continue\n\n if not self.overwrite:\n prefix = str(self.counter) + '-'\n else:\n prefix = ''\n\n if len(self.filename) > 0:\n name = self.filename\n else:\n name = prefix + self.name + '_' + str(self.outStreamTypes).replace(\"'\", \"\").replace(\"[\", \"\").replace(\"]\", \"\").replace(\",\", \"-\").replace(\" \", \"\")\n\n if self.subDirectory is not None:\n name = os.path.join(self.subDirectory,name)\n\n self.fig.savefig(name + '.' + fileType, format=fileType)\n\n if 'screen' not in self.destinations:\n plt.close(fig=self.fig)\n\n gc.collect()", "def _process(self, data: np.ndarray) -> np.ndarray:", "def _process(self, data: np.ndarray) -> np.ndarray:", "def _process_plot_data(self, pattern, data, timestamps):\n raise NotImplementedError(\"Implement _process_plot_data(self, pattern, data) in subclass\")", "def plot(self, finished=False):\n send = self.sender.send\n if finished:\n send(None)\n else:\n data = (self.args['time'], self.args['max'],\n self.args['mean'], self.args['sd'],\n self.args['fitness'], self.args['population'],\n self.args['dataset'], self.args['current_gen'],\n self.args['generations'])\n send(data)", "def run(self, data):\n\t\treduced_data = PCA(n_components=2).fit_transform(data)\n\n\t\t# Run the algorithm\n\t\tself.estimator.fit_transform(reduced_data)\n\n\t\t# Save all relevent properties\n\t\tself.input_data = data\n\t\tself.centroids = self.estimator.cluster_centers_\n\t\tself.node_positions = reduced_data\n\t\tself.labels = self.estimator.labels_\n\n\t\t# Enable visualising when debugging\n\t\t# self.visualize(reduced_data)", "def plot(self):\n pass", "def run(self):\n\n try:\n self.parent.setEnabled(False)\n\n # Run DataLab processing; compute and write requested logger statistics and spectrograms\n if self.processing_mode == \"screening\":\n self.processing_hub.run_screening()\n elif self.processing_mode == \"integration\":\n self.processing_hub.run_ts_integration()\n\n # Emit processed results to outside worker to present in gui\n self.signal_screening_output_to_gui.emit(self.processing_hub)\n except ValueError as e:\n self.signal_error.emit(str(e))\n logging.exception(e)\n except TypeError as e:\n self.signal_error.emit(str(e))\n logging.exception(e)\n except ZeroDivisionError as e:\n self.signal_error.emit(str(e))\n logging.exception(e)\n except Exception as e:\n msg = \"Unexpected error during processing\"\n self.signal_error.emit(f\"{msg}:\\n{e}\\n{sys.exc_info()[0]}\")\n logging.exception(e)\n finally:\n self.parent.setEnabled(True)\n self.parent.statusbar.showMessage(\"\")", "def plot_finalize():\n global figure\n global axes\n\n plot_refresh()\n plt.ioff()\n plt.show()\n\n figure, axes = None, None", "def teardown(self):\n self.wf.write_graph(dotfilename = self.test_path / \"wf_diagram\", graph2use=\"orig\")\n self.wf.run()\n \n\n self.helpers.plot_timeseries(\n self.export_path, self.sample_raw_image, \n highlight_ranges=self.highlight_ranges,\n num_figs=1\n )\n\n if self.plot_img:\n self.helpers.plot_4D_img_slice(self.export_path, \"sample_processed.png\")", "def run_pipeline(directory):\n\n # io = IO(path)\n # df = io.load_cleaned_file(download_always=False)\n # df = add_choke_events(df)\n\n # Add calls to features.Xxx here\n\n #directory = main_directory\n site=os.listdir(directory)\n site_dicom={}\n site_dicom_sub={}\n site_sub_files={}\n i,k,j=0,0,0\n for filename in site:\n site_dicom[i]=directory+'/'+filename+'/DICOM-raw'\n temporary_path=os.listdir(site_dicom[i])\n\n for another_file in temporary_path:\n site_dicom_sub[j]=site_dicom[i]+'/'+another_file+'/scans'\n temporary_path_1 = os.listdir(site_dicom_sub[j])\n for another_file_1 in temporary_path_1:\n site_sub_files[k]=site_dicom_sub[j]+'/'+another_file_1+'/'\n k=k+1\n j = j + 1\n i=i+1\n splitted={}\n output_mif={}\n for i in range (len(site_sub_files)):\n splitted[i]=site_sub_files[i].split('/')\n output_mif[i]=directory+'/'+splitted[i][5]+'/MIF-raw/'+splitted[i][5]+'_'+splitted[i][7]+'_'+splitted[i][9]+'.mif'\n\n\n # save (or return) dataframe here?\n return site_sub_files,output_mif", "def fit(self, data: pd.DataFrame) -> None:\n self.pipeline = Pipeline([\n ('pipeline', FeatureUnion([\n ('categorical', self.build_categorical_pipeline()),\n ('numerical', self.build_numerical_pipeline())\n ]))\n ])\n self.pipeline.fit(data)", "def run(self):\r\n self.collect_data()", "def process(self):\n self.extract()\n self.transform()\n self.load()", "def _on_solver_state_end(self, results):\n\n self.results = results\n self._plot()", "def update_pipeline(self):\n mm = self.module_manager\n if mm is None or not self.axes.use_data_bounds:\n self.configure_input_data(self.axes, None)\n return\n src = mm.source\n self.configure_input(self.axes, src.outputs[0])\n self.pipeline_changed = True", "def run(self, context):\n self.start()\n while not context.is_finished():\n self.update_predictions(context)\n new_results, instances = self.evaluate()\n if new_results:\n self.write_results(new_results, instances)\n self.update_plot(new_results, instances)\n\n if self.plot_file:\n self.visualizer.savefig(self.plot_file)\n if self.show_plot:\n self.visualizer.show()\n self.end()", "def plot(self, job):\n # fill PlotJob with needed data if it doesn't exist\n # Plotter will look for the files it needs relative to the work directory\n # If this fails it will fall back to a baseline location if one was \n # Provided to cmake at the time this file was generated\n if job.dataPath == None :\n job.dataPath = \"Scenarios/\" + job.verificationDirectory + \"/baselines/\"\n \n if job.dataFile == None:\n job.dataFile = job.name + \"Results.zip\"\n \n if job.outputFilename==None:\n job.outputFilename=job.titleOverride+\".jpg\"\n \n if len(job.outputFilename.split(\".\"))==1:\n job.outputFilename+=\".jpg\"\n \n if job.imageWidth==None and job.imageHeight==None:\n job.imageWidth=1600\n job.imageHeight=800\n \n if not os.path.exists(job.dataPath):\n job.dataPath = os.path.join(job.basedir,job.dataPath)\n \n if not os.path.isfile(os.path.join(job.dataPath,job.dataFile)):\n job.dataPath = os.path.join(job.basedir,job.dataPath)\n \n if not job.fontSize:\n job.fontSize=22\n \n if not os.path.exists(os.path.dirname(job.outputDir)):\n os.mkdir(os.path.dirname(job.outputDir))\n \n self.drawgraph(job,os.path.join(job.dataPath,job.dataFile),os.path.join(job.outputDir,job.outputFilename))", "def __exit__(self, *args):\n # Do the last (and perhaps only) call's plotting\n self._doPlots()\n self._isSubplot = False\n self.opts.goGlobal()\n if not self.usingAgg:\n self.fig.canvas.mpl_connect('resize_event', self.subplots_adjust)", "def pipeline_test_data(self):\n if self.linearity:\n Detector1Pipeline.call(self.ramp_file, save_results=True, output_dir=self.output_dir, output_use_model=True,\n steps={'ipc': {'skip': True},\n 'rscd': {'skip': True},\n 'lastframe': {'save_results': True,\n 'output_dir': self.output_dir},\n 'dark_current': {'save_results': True,\n 'output_dir': self.output_dir},\n #'linearity': {'skip': True},\n 'jump': {'save_results': True,\n 'output_dir': self.output_dir}})\n else:\n Detector1Pipeline.call(self.ramp_file, save_results=True, output_dir=self.output_dir, output_use_model=True,\n steps={'ipc': {'skip': True},\n 'rscd': {'skip': True},\n 'lastframe': {'save_results': True,\n 'output_dir': self.output_dir},\n 'dark_current': {'save_results': True,\n 'output_dir': self.output_dir},\n 'linearity': {'skip': True},\n 'jump': {'save_results': True,\n 'output_dir': self.output_dir}})\n\n self.pre_dark_file = os.path.join(self.output_dir, 'step_lastframe.fits')\n self.post_dark_file = os.path.join(self.output_dir, 'step_dark_current.fits')\n self.jump_file = os.path.join(self.output_dir, 'step_jump.fits')\n self.rate_file = os.path.join(self.output_dir, 'step_rate.fits')", "def do_process(self):\n for k in self.processor.process():\n self._progress = k\n\n self.output_container = self.processor.target_container # type: converter.containers.Container", "def run(self):\n\t\tself.print_header_information()\n\n\t\t#self.get_number_of_instances_from_user()\n\n\t\t#self.compile_dataframe(self.number_of_instances)\n\n\t\tprint \"\\n{}\".format(self.data)\n\n\t\t# Uncomment these lines for debugging\n\t\tself.compile_dataframe_default()\n\t\t# print \"\\n{}\".format(self.data)\n\n\t\tself.analysis_of_dataframe(self.data)", "def pipeline(self):\n\n self._get_data()\n self._upload_to_raw()", "def visualize(self):\r\n self.aggregator.plot_loss()\r\n self.save_figure()", "def process(self, data) :\n rData = Core.Processlib.Data()\n rData.frameNumber = data.frameNumber\n rData.buffer = self._worker.process(data.buffer)\n if self._writer: #optional HDF5 writer\n self._writer.write(rData.buffer, rData.frameNumber)", "def _process_data(self, pattern, data, timestamps):\n self._plotbuffer[pattern] = (self._plotbuffer[pattern] + data)[-self._plotlength:]\n if timestamps:\n self._timestampbuffer[pattern] = (self._timestampbuffer[pattern] + timestamps)[-self._plotlength:]\n \n data = self._plotbuffer[pattern]\n# if timestamps:\n timestamps = self._timestampbuffer[pattern]\n _length = len( data ) \n\n self._plotbuffer[pattern] = data[-self._plotlength:] \n# if timestamps:\n self._timestampbuffer[pattern] = timestamps[-self._plotlength:]\n\n# if _length > self._plotlength:\n# # plot the last <plotlength> samples\n# self._plotbuffer[pattern] = data[-self._plotlength:] \n# if timestamps:\n# self._timestampbuffer[pattern] = timestamps[-self._plotlength:] \n# else:\n# # plot 0 line before signal starts\n# self._plotbuffer[pattern] = [ 0 for _ in range(self._plotlength - _length)] + data\n# if timestamps:\n# self._timestampbuffer[pattern] = [ 0 for _ in range(self._plotlength - _length)] + timestamps\n\n if not self.changed:\n self.changed = True\n\n self._process_plot_data( pattern, data, timestamps )", "def main():\n data_visualisation()\n write_hyper_params()\n write_result_tables()\n write_box_plots()", "def process(self, data, reset=False):\n data = np.asarray(data)\n self.check_dims(data)\n data = self.highpass_filter(data, reset=reset)\n data = self.lowpass_filter(data, reset=reset)\n data = self.resample(data)\n data = self.reref_data(data)\n data = self.select_channels(data)\n data = self.normalize_data(data)\n data = self.add_context(data)\n return data", "def doAllPlots ():\n #df = processIp (\"18-06-01-1-attack.pcap\", \"ec:1a:59:79:f4:89\")\n #df.to_csv (\"df.csv\", index=False)\n df = pd.read_csv (\"df.csv\")\n attack_df = parseAnnotation (\"ec1a5979f489.csv\")\n ret = plotThresholds (df, attack_df)\n plotEntropyWithThreshold (df, ret[2])\n createUtilityHistogram (ret[0], ret[1]) \n\n \"\"\"\n Traffic flow graph\n \"\"\"\n #df = processTrafficFlow (\"18-06-01-short.pcap\", \"ec:1a:59:79:f4:89\")\n #plotTrafficFlow (df)\n\n \"\"\"\n Entropy for source port\n \"\"\"\n #df = processSrcPort (\"18-06-01-short.pcap\", \"ec:1a:59:79:f4:89\")\n #plotEntropy (df)\n\n \"\"\"\n Entropy for destination port\n \"\"\" \n #df = processDstPort (\"18-06-01-short.pcap\", \"ec:1a:59:79:f4:89\")\n #plotEntropy (df) \n\n \"\"\"\n It will be implemented next day\n df = processPorts (\"18-06-01.pcap\", \"ec:1a:59:79:f4:89\")\n attack_df = parseAnnotation (\"ec1a5979f489.csv\")\n ret = plotThresholds (df, attack_df)\n plotEntropy (df, attack_df, ret[2])\n createUtilityHistogram (ret[0], ret[1]) \n\n df = processProtocols (\"18-06-01.pcap\", \"ec:1a:59:79:f4:89\")\n attack_df = parseAnnotation (\"ec1a5979f489.csv\")\n ret = plotThresholds (df, attack_df)\n plotEntropy (df, attack_df, ret[2])\n createUtilityHistogram (ret[0], ret[1]) \n \"\"\"\n return", "def begin(self, pipeline: osbuild.Pipeline):", "def _process(self):\n export_collect_data(self.kwargs[\"collect\"])", "def test_fit(self, pipeline):\n pipeline.fit(X, Y)", "def main(\n field_data,\n RGB_size,\n HSI_size,\n rgb_dir, \n hyperspectral_dir,\n savedir=\".\", \n chunk_size=200,\n extend_HSI_box=0, \n extend_RGB_box=0,\n hyperspectral_savedir=\".\", \n saved_model=None, \n client=None, \n species_classes_file=None,\n site_classes_file=None,\n domain_classes_file=None, \n shuffle=True): \n df = gpd.read_file(field_data)\n plot_names = df.plotID.unique()\n \n hyperspectral_pool = glob.glob(hyperspectral_dir, recursive=True)\n rgb_pool = glob.glob(rgb_dir, recursive=True)\n \n labels = []\n HSI_crops = []\n RGB_crops = []\n domains = []\n sites = []\n box_indexes = [] \n elevations = []\n heights = []\n if client is not None:\n futures = []\n for plot in plot_names:\n future = client.submit(\n run,\n plot=plot,\n df=df,\n rgb_pool=rgb_pool,\n hyperspectral_pool=hyperspectral_pool,\n extend_HSI_box=extend_HSI_box,\n extend_RGB_box=extend_RGB_box, \n hyperspectral_savedir=hyperspectral_savedir,\n saved_model=saved_model\n )\n futures.append(future)\n \n wait(futures)\n for x in futures:\n try:\n plot_HSI_crops, plot_RGB_crops, plot_labels, plot_domains, plot_sites, plot_heights, plot_elevations, plot_box_index = x.result()\n \n #Append to general plot list\n HSI_crops.extend(plot_HSI_crops)\n RGB_crops.extend(plot_RGB_crops)\n labels.extend(plot_labels)\n domains.extend(plot_domains)\n sites.extend(plot_sites)\n heights.extend(plot_heights)\n elevations.extend(plot_elevations)\n box_indexes.extend(plot_box_index) \n except Exception as e:\n print(\"Future failed with {}\".format(e)) \n traceback.print_exc()\n else:\n from deepforest import deepforest \n deepforest_model = deepforest.deepforest()\n deepforest_model.use_release() \n for plot in plot_names:\n try:\n plot_HSI_crops, plot_RGB_crops, plot_labels, plot_domains, plot_sites, plot_heights, plot_elevations, plot_box_index = run(\n plot=plot,\n df=df,\n rgb_pool=rgb_pool,\n hyperspectral_pool=hyperspectral_pool, \n extend_HSI_box=extend_HSI_box,\n extend_RGB_box=extend_RGB_box, \n hyperspectral_savedir=hyperspectral_savedir,\n saved_model=saved_model,\n deepforest_model=deepforest_model\n )\n except Exception as e:\n print(\"Plot failed with {}\".format(e)) \n traceback.print_exc() \n continue\n \n #Append to general plot list\n HSI_crops.extend(plot_HSI_crops)\n RGB_crops.extend(plot_RGB_crops)\n labels.extend(plot_labels)\n domains.extend(plot_domains)\n sites.extend(plot_sites) \n heights.extend(plot_heights) \n elevations.extend(plot_elevations)\n box_indexes.extend(plot_box_index)\n \n if shuffle:\n z = list(zip(HSI_crops, RGB_crops, domains, sites, heights, elevations, box_indexes, labels))\n random.shuffle(z)\n HSI_crops, RGB_crops, domains, sites, heights, elevations, box_indexes, labels = zip(*z)\n \n #If passes a species label dict\n if species_classes_file is not None:\n species_classdf = pd.read_csv(species_classes_file)\n species_label_dict = species_classdf.set_index(\"taxonID\").label.to_dict()\n else:\n #Create and save a new species and species label dict\n unique_species_labels = np.unique(df.taxonID.unique())\n species_label_dict = {}\n \n for index, label in enumerate(unique_species_labels):\n species_label_dict[label] = index\n pd.DataFrame(species_label_dict.items(), columns=[\"taxonID\",\"label\"]).to_csv(\"{}/species_class_labels.csv\".format(savedir))\n \n #If passes a site label dict\n if site_classes_file is not None:\n site_classdf = pd.read_csv(site_classes_file)\n site_label_dict = site_classdf.set_index(\"siteID\").label.to_dict()\n else:\n #Create and save a new site and site label dict\n unique_site_labels = np.unique(df.siteID.unique())\n site_label_dict = {}\n \n for index, label in enumerate(unique_site_labels):\n site_label_dict[label] = index\n pd.DataFrame(site_label_dict.items(), columns=[\"siteID\",\"label\"]).to_csv(\"{}/site_class_labels.csv\".format(savedir))\n\n #If passes a domain label dict\n if domain_classes_file is not None:\n domain_classdf = pd.read_csv(domain_classes_file)\n domain_label_dict = domain_classdf.set_index(\"domainID\").label.to_dict()\n else:\n #Create and save a new domain and domain label dict\n unique_domain_labels = np.unique(df.domainID.unique())\n domain_label_dict = {}\n \n for index, label in enumerate(unique_domain_labels):\n domain_label_dict[label] = index\n pd.DataFrame(domain_label_dict.items(), columns=[\"domainID\",\"label\"]).to_csv(\"{}/domain_class_labels.csv\".format(savedir))\n \n #Convert labels to numeric\n numeric_labels = [species_label_dict[x] for x in labels]\n numeric_sites = [site_label_dict[x] for x in sites]\n numeric_domains = [domain_label_dict[x] for x in domains]\n \n print(\"Writing records of {} HSI samples, {} RGB samples from {} species and {} sites\".format(\n len(HSI_crops),\n len(RGB_crops),\n len(np.unique(numeric_labels)),\n len(np.unique(numeric_sites))))\n \n #Write tfrecords\n tfrecords = create_records(\n HSI_crops=HSI_crops,\n RGB_crops=RGB_crops,\n labels=numeric_labels, \n sites=numeric_sites, \n domains=numeric_domains,\n number_of_domains=len(domain_label_dict),\n number_of_sites=len(site_label_dict),\n classes=len(species_label_dict),\n elevations=elevations,\n box_index=box_indexes, \n savedir=savedir, \n heights=heights,\n RGB_size=RGB_size,\n HSI_size=HSI_size, \n chunk_size=chunk_size)\n \n return tfrecords", "def update_data(self):\n\n # Update all plots in the figure\n self.data = self.model.measurements.get_bokeh_vis_data()\n self.source.stream(self.data, len(self.data))\n self.line_source.stream(self.data[self.data.agent_type == 'system'])\n self.school_dropdown_func()\n\n # Update the utility histograms\n self.update_histograms()\n\n # Update the composition histograms\n to_update = [self.neighbourhood_composition_quads, \n self.school_composition_quads, self.distance_quads]\n\n for quads in to_update:\n\n # Grab the new data\n if quads == self.neighbourhood_composition_quads:\n hist_data = self.composition_data(agent_type='neighbourhood')\n elif quads == self.school_composition_quads:\n hist_data = self.composition_data(agent_type='school')\n else:\n hist_data = self.composition_data(agent_type='household')\n\n # Update the bars and edges\n for group in hist_data.keys():\n\n hist, edges = np.histogram(hist_data[group],\n density=True,\n bins=20)\n\n # Update histogram\n quads[group].data_source.data['top'] = hist\n quads[group].data_source.data['left'] = edges[:-1]\n quads[group].data_source.data['right'] = edges[1:]", "def plot(self):\n\t\tself.plotOfTF().plot()", "def perform_extraction(self) -> None:\n\n self._process_datasets_all_frames()", "def plot_data():\n \n [X_train, X_dev, X_test, Y_train, Y_dev, Y_test, numOutputNodes] = load_data('regression') \n \n traindev = np.concatenate((Y_train, Y_dev), 1)\n traindevtest = np.concatenate((traindev, Y_test), 1)\n tdt = traindevtest.reshape(traindevtest.shape[1],)\n\n Y_train = Y_train.reshape(Y_train.shape[1],)\n Y_dev = Y_dev.reshape(Y_dev.shape[1],)\n Y_test = Y_test.reshape(Y_test.shape[1],)\n\n sigma = np.round(np.std(tdt), 3)\n mu = np.round(np.mean(tdt), 3)\n\n # plots histogram of all data together, indicating values of mean and standard deviation\n plt.figure(1)\n plt.hist(tdt)\n plt.title(\"{} data points, mu = {}, sigma = {}\".format(tdt.size, mu, sigma))\n plt.xlabel(\"average Fe coordination number\")\n plt.ylabel(\"frequency\")\n plt.show()\n\n # plots histogram where the training, cross-validation, and test sets have separate bars\n plt.figure(2)\n plt.hist([Y_train, Y_dev, Y_test], label = ['training', 'cross-validation', 'test'], density = True)\n plt.xlabel(\"average Fe coordination number\")\n plt.ylabel(\"frequency\")\n plt.legend()\n plt.show()\n\n # below is graphing for the charge data, as opposed to the averaged spectrum data\n [X_train1, X_dev1, X_test1, _, _, _, Y_train1, Y_dev1, Y_test1, numOutputNodes1] = load_data('multi_task')\n traindev1 = np.concatenate((Y_train1, Y_dev1), 1)\n traindevtest1 = np.concatenate((traindev1, Y_test1), 1)\n tdt1 = traindevtest1.reshape(traindevtest1.shape[1],)\n\n Y_train1 = Y_train1.reshape(Y_train1.shape[1],)\n Y_dev1 = Y_dev1.reshape(Y_dev1.shape[1],)\n Y_test1 = Y_test1.reshape(Y_test1.shape[1],)\n\n sigma = np.round(np.std(tdt1), 3)\n mu = np.round(np.mean(tdt1), 3)\n\n # plots histogram of all data together, indicating values of mean and standard deviation\n plt.figure(3)\n plt.hist(tdt1)\n plt.title(\"{} data points, mu = {}, sigma = {}\".format(tdt1.size, mu, sigma))\n plt.xlabel(\"charge\")\n plt.ylabel(\"frequency\")\n plt.show()\n\n # plots histogram where the training, cross-validation, and test sets have separate bars\n plt.figure(4)\n plt.hist([Y_train1, Y_dev1, Y_test1], label = ['training', 'cross-validation', 'test'], density = True)\n plt.xlabel(\"charge\")\n plt.ylabel(\"frequency\")\n plt.legend()\n plt.show()\n\n return None", "def pipeline(ctx):\n asyncio.run(pipeline_impl(ctx.obj[\"config\"]))", "def reset_pipeline(self):\n self.pipeline_X = self.init_pipeline_X\n self.pipeline_y = self.init_pipeline_y", "def stage(self):\n\n # prepare projected land allocation data\n self.prep_projected()\n\n # prepare base land use data\n self.prep_base()\n\n # harmonize grid area between projected and base layer land allocation\n self.harmony()\n\n # apply constraints\n self.set_constraints()\n\n # create kernel density filter if not running multiple jobs\n self.kernel_filter()\n\n # set data for step zero\n self.set_step_zero()", "def done(self):\n if not self._isSubplot:\n raise Exception(\"You are not in a subplotting context!\")\n self.__exit__()", "def process(self):\n pass", "def process(self):\n # Enable the use of the the W3C PROV data model to capture and represent provenance in Nipype\n # config.enable_provenance()\n\n # Process time\n self.now = datetime.datetime.now().strftime(\"%Y%m%d_%H%M\")\n\n cmp_deriv_subject_directory, nipype_deriv_subject_directory, nipype_diffusion_pipeline_subject_dir = \\\n self.init_subject_derivatives_dirs()\n\n # Initialization\n log_file = os.path.join(nipype_diffusion_pipeline_subject_dir, \"pypeline.log\")\n\n if os.path.isfile(log_file):\n os.unlink(log_file)\n\n config.update_config(\n {\n \"logging\": {\n \"workflow_level\": \"INFO\",\n \"interface_level\": \"INFO\",\n \"log_directory\": nipype_diffusion_pipeline_subject_dir,\n \"log_to_file\": True,\n },\n \"execution\": {\n \"remove_unnecessary_outputs\": False,\n \"stop_on_first_crash\": True,\n \"stop_on_first_rerun\": False,\n \"try_hard_link_datasink\": True,\n \"use_relative_paths\": True,\n \"crashfile_format\": \"txt\",\n },\n }\n )\n logging.update_logging(config)\n\n iflogger = logging.getLogger(\"nipype.interface\")\n iflogger.info(\"**** Processing ****\")\n\n flow = self.create_pipeline_flow(\n cmp_deriv_subject_directory=cmp_deriv_subject_directory,\n nipype_deriv_subject_directory=nipype_deriv_subject_directory,\n )\n flow.write_graph(graph2use=\"colored\", format=\"svg\", simple_form=True)\n # Create dictionary of arguments passed to plugin_args\n plugin_args = {\n 'maxtasksperchild': 1,\n 'n_procs': self.number_of_cores,\n 'raise_insufficient': False,\n }\n flow.run(plugin=\"MultiProc\", plugin_args=plugin_args)\n\n iflogger.info(\"**** Processing finished ****\")\n\n return True", "def main():\n (time, heart_rate, pace) = parse_file()\n (hr_filt, v_filt) = smoothing(time, heart_rate, pace)\n plot_data(hr_filt, v_filt)", "def collect_pipeline_runs(self):\n db = self.mongo_client.metalearning\n collection = db.pipeline_runs\n collection_size = collection.count()\n pipeline_cursor = collection.find()\n list_of_experiments = {\"classification\": [], \"regression\": []}\n for index, pipeline_run in enumerate(pipeline_cursor):\n if index % 1000 == 0:\n print(\"At {} out of {} documents\".format(index, collection_size))\n # if index == 2000:\n # # running into memory errors\n # break\n pipeline_run_info = self.get_pipeline_run_info(pipeline_run)\n metafeatures = self.get_metafeature_info(pipeline_run)\n # TODO: get all metafeatures so we don't need this\n if metafeatures != {}:\n experiment_json = dict(pipeline_run_info, **metafeatures)\n list_of_experiments[experiment_json[\"problem_type\"]].append(experiment_json)\n\n for problem_type in list_of_experiments.keys():\n final_data_file = json.dumps(list_of_experiments[problem_type], sort_keys=True, indent=4, default=json_util.default)\n with open(\"data/complete_pipelines_and_metafeatures_test_{}.json\".format(problem_type), \"w\") as file:\n file.write(final_data_file)\n\n return", "def plot(self, *args, **kwargs):\n pass", "def _on_pipeline_init(self) -> None:\n pass", "def plot_parallel():\n import chartify\n\n # Generate example data\n data = chartify.examples.example_data()\n\n total_quantity_by_fruit_and_country = data.groupby([\"fruit\", \"country\"])[\"quantity\"].sum().reset_index()\n print(total_quantity_by_fruit_and_country.head())\n \"\"\"Print break\"\"\"\n _parallel_example_1(total_quantity_by_fruit_and_country)", "def after_each(self, dataset: pydicom.dataset.Dataset) -> None:", "def complete_slot(self, data):\n # Show result\n self.result = data\n self.table_output_data.setModel(pandasModel(data.loc[:, ['date'] + [i for i in self.column_name]])) # 顯示表格在table_output_data\n self.show_plot() # 參照show_plot()\n\n self.label_current_message.setText('模型計算完畢!')\n self.stopped = False\n self.btn_run_model.setEnabled(True)", "def make_pipeline():\n # exchange = Fundamentals.exchange_id.latest\n # nyse_filter = exchange.eq('NYS')\n symbol_filter = StaticSids([TRADING_SID])\n set_benchmark(TRADING_SID) \n # volume_filter = VolumeFilter(\n # inputs=[USEquityPricing.volume],\n # window_length=1,\n # mask=symbol_filter\n # )\n\n # is_setup = volume_filter & alpha_long_weekly & alpha_long_daily\n weekly_high = WeeklyHigh(\n inputs=[USEquityPricing.high],\n mask=symbol_filter\n )\n weekly_low = WeeklyLow(\n inputs=[USEquityPricing.low],\n mask=symbol_filter\n )\n weekly_classifier = WeeklyClassifier(\n inputs=[\n USEquityPricing.open,\n USEquityPricing.high,\n USEquityPricing.low,\n USEquityPricing.close\n ],\n mask=symbol_filter\n )\n daily_classifier = DailyClassifier(\n inputs=[\n USEquityPricing.open,\n USEquityPricing.high,\n USEquityPricing.low,\n USEquityPricing.close\n ],\n mask=symbol_filter\n\n )\n\n pipe = Pipeline(\n screen=symbol_filter, # & (daily_classifier > 0),\n columns={\n 'daily_classifier': daily_classifier,\n 'daily_high': USEquityPricing.high.latest,\n 'daily_low': USEquityPricing.low.latest,\n 'weekly_classifier': weekly_classifier,\n 'weekly_high': weekly_high,\n 'weekly_low': weekly_low\n }\n )\n return pipe", "def main(self, args):\n for plot in args.plots:\n if plot == 'no_plot':\n break\n print \"plotting\", plot\n\n fig = self.plot_figure(plot)\n\n fformat = '{plot}_{index}.{ext}'\n fname = fformat.format(plot=plot, index=self.index, ext='png')\n fpath = os.path.join(plot_dir, fname)\n fig.savefig(fpath)\n\n if args.distributions == 'all':\n distributions = ['Uf', 'Wf', 'uf_abs',\n 'vorticity', 'vertical_shear']\n else:\n distributions = args.distributions\n for dist in distributions:\n range = self.properties[dist]['range']\n name = self.properties[dist]['name']\n print \"plotting distribution\", dist, name\n fig = self.plot_distribution(getattr(self, dist), range, name)\n\n fformat = 'distribution_{q}_{index}.{ext}'\n fname = fformat.format(q=dist, index=self.index, ext='png')\n fpath = os.path.join(plot_dir, fname)\n fig.savefig(fpath)\n\n if args.funcs:\n for func in args.funcs:\n print \"multiprocessing\", func\n f = getattr(self, 'plot_' + func)\n f()", "def end(self, event):\n plt.close()", "def create_data_callback(self):\n self.x_values = None\n \n # ======================================\n # generating random integer data points\n # ======================================\n self.data_points = np.random.randint(-10, 10, size=(4, 2))\n\n # ==================================================================\n # Defining the Target Labels for above generated random data points\n # ==================================================================\n self.targets = np.array([1.0, 1.0, -1.0, -1.0])\n self.plot_graph()", "def after_fit(self):\n plt.close(self.graph_ax.figure)", "def update(self):\n\t\tprint(\"Plotting \" + str(str(self.values[\"Trial1\"][1]) + \" at \" + str(self.values[\"Trial1\"][0]) + \"\\n\"))\n\t\tif self.clear:\n\t\t\tself.stream1.write(dict(x=[], y=[]))\n\t\t\tself.stream2.write(dict(x=[], y=[]))\n\t\t\tself.stream3.write(dict(x=[], y=[]))\n\t\telse:\n\t\t\tself.stream1.write(dict(x=self.values[\"Trial1\"][0], y=self.values[\"Trial1\"][1]))#, trace=Bar)\n\t\t\tself.stream2.write(dict(x=self.values[\"Trial2\"][0], y=self.values[\"Trial2\"][1]))\n\t\t\tself.stream3.write(dict(x=self.values[\"Trial3\"][0], y=self.values[\"Trial3\"][1]))", "def updatePlot(self):\n self.axes.clear()\n self.axes.plot(self.data[0], self.data[1], linestyle='-', color='blue')\n self.axes.plot(self.data[0], self.data[2], linestyle='-', color='red')\n self.axes.plot(self.data[0], self.data[3], linestyle='-', color='gray')\n self.canvas.draw()", "def __init__(self):\n super().__init__()\n\n # general attributes\n self.printTag = 'OUTSTREAM PLOT'\n self.options = {} # outstreaming options # no addl info from original developer\n self.counter = 0 # keeps track of how many times the same plot has been plotted\n self.dim = None # default plot is 2D\n self.sourceName = [] # list of source names\n self.sourceData = None # source of data\n self.outStreamTypes = [] # list of the outstream types\n self.destinations = None # where plots should go (screen, file, etc.)\n\n # plot types key is data dimension, value is list of available plot types\n self.availableOutStreamTypes = {2: ['scatter',\n 'line',\n 'histogram',\n 'stem',\n 'step',\n 'pseudocolor',\n 'dataMining',\n 'contour',\n 'filledContour'],\n 3: ['scatter',\n 'line',\n 'histogram',\n 'stem',\n 'surface',\n 'wireframe',\n 'tri-surface',\n 'contour',\n 'filledContour']}\n\n # interpolators that can be used in plots\n self.availableInterpolators = ['nearest',\n 'linear',\n 'cubic',\n 'multiquadric',\n 'inverse',\n 'gaussian',\n 'Rbflinear',\n 'Rbfcubic',\n 'quintic',\n 'thin_plate']\n\n # plot components\n self.fig = None # figure\n self.ax = None # axes\n self.actPlot = None # plot action, ie., ax.plot()\n self.gridSpace = None # subplot setup\n self.actcm = None # colormap\n self.xCoordinates = None # x coordinate name\n self.yCoordinates = None # y coordinate name\n self.zCoordinates = None # z coordinate name\n self.xValues = None # dictionary of x values\n self.yValues = None # dictionary of y values\n self.zValues = None # dictionary of z values\n self.colorMapCoordinates = {} # color map coordinates\n self.colorMapValues = {} # color map values\n\n # For the data-mining plot, I think?\n self.clusterLabels = None\n self.clusterValues = None\n\n # Gaussian Mixtures\n self.mixtureLabels = None\n self.mixtureValues = None\n self.mixtureMeans = None\n self.mixtureCovars = None", "def updatePlot(self):\n self.axes.clear()\n self.axes.plot(self.data[0], self.data[1], linestyle='-', color='gray')\n self.axes.plot(self.data[0], self.data[2], linestyle='-', color='blue')\n self.axes.plot(self.data[0], self.data[3], linestyle='-', color='darkgreen')\n self.canvas.draw()", "def updatePlot(self):\n self.axes.clear()\n self.axes.plot(self.data[0], self.data[1], linestyle='-', color='gray')\n self.axes.plot(self.data[0], self.data[2], linestyle='-', color='blue')\n self.axes.plot(self.data[0], self.data[3], linestyle='-', color='darkgreen')\n self.canvas.draw()", "def updatePlot(self):\n self.axes.clear()\n self.axes.plot(self.data[0], self.data[1], linestyle='-', color='gray')\n self.axes.plot(self.data[0], self.data[2], linestyle='-', color='blue')\n self.axes.plot(self.data[0], self.data[3], linestyle='-', color='darkgreen')\n self.canvas.draw()", "def updatePlot(self):\n self.axes.clear()\n self.axes.plot(self.data[0], self.data[1], linestyle='-', color='gray')\n self.axes.plot(self.data[0], self.data[2], linestyle='-', color='blue')\n self.axes.plot(self.data[0], self.data[3], linestyle='-', color='darkgreen')\n self.canvas.draw()", "def updatePlot(self):\n self.axes.clear()\n self.axes.plot(self.data[0], self.data[1], linestyle='-', color='gray')\n self.axes.plot(self.data[0], self.data[2], linestyle='-', color='blue')\n self.axes.plot(self.data[0], self.data[3], linestyle='-', color='darkgreen')\n self.canvas.draw()", "def runFullPipeline(path, sigma_badpix, nframes_badpix,\n method_bkg, method_cent, plotting_binsize,\n ramp_time, end_time,\n sigma_clip_cent, iters_cent, nframes_cent,\n radius_photom,\n sigma_clip_phot, iters_photom, nframes_photom,\n x0guess = None, y0guess = None,\n size_bkg_box = None, radius_bkg_ann = None, size_bkg_ann = None,\n size_cent_bary = None, quiet = False, passenger57 = False,\n plot = False, AOR = None, planet = None, channel = None, sysmethod=None, foldext = '', ret = False, binsize = 1):\n # Must be sigma_clip_phot because function is called sigma_clip_photom!!!\n\n # Read in the data\n data_info = read_files(path)\n exptime = data_info.exptime()\n readnoise = data_info.readnoise()\n gain = data_info.gain()\n fluxconv = data_info.fluxconv()\n framtime = data_info.framtime()\n MJysr2lelectrons = exptime*gain/fluxconv\n\n #Create timeseries, midtimes and maskseries\n timeseries = data_info.create_timeseries()\n midtimes = data_info.midtimes()\n\n print \"\\t Exptime = {}, Readnoise = {}, Gain = {}, Fluxconv = {}, Framtime = {}\".format(exptime, readnoise, gain, fluxconv, framtime)\n print \"\\t MJy/sr to electrons conversion factor = {}\".format(MJysr2lelectrons)\n\n stats = {}\n\n #Fix bad pixles\n if ret:\n timeseries, stats['Bad pix %'] = fast_bad_pix_mask(timeseries, sigma_badpix, nframes_badpix, quiet = quiet, foldext=foldext, ret = ret)\n else:\n timeseries = fast_bad_pix_mask(timeseries, sigma_badpix, nframes_badpix, quiet = quiet, foldext=foldext, ret = ret)\n\n print \"\\nBinning timeseries by {}\".format(binsize)\n print \"\\t Shape of old timeseries: {}\".format(timeseries.shape)\n timeseries = custom_bin(timeseries, binsize)\n print \"\\t Shape of new timeseries: {}\".format(timeseries.shape)\n\n #Subtract background\n timeseries, background = bck_subtract(timeseries, method = method_bkg,\n boxsize = size_bkg_box,\n radius = radius_bkg_ann,\n size = size_bkg_ann,\n quiet = quiet, plot = plot, plotting_binsize = plotting_binsize, AOR = AOR, planet = planet, channel = channel,sysmethod=sysmethod, foldext=foldext)\n\n #Get rid of first half hour of observations before any clipping\n timeseries, midtimes, background = discard_ramp(timeseries, midtimes, background, ramp_time, end_time, framtime, quiet = quiet, passenger57 = passenger57, foldext=foldext)\n\n #Centroid barycenter\n centroids = centroid(timeseries, method=method_cent, boxsize = size_cent_bary, quiet = quiet, plot = plot, AOR = AOR, planet = planet, channel = channel,sysmethod=sysmethod, foldext=foldext, x0guess=x0guess, y0guess=y0guess)\n\n #Clip barycenter centroids twice\n timeseries, centroids, midtimes, background = sigma_clip_centroid(timeseries, centroids, midtimes, background, sigma_clip_cent, iters_cent, nframes_cent, quiet = quiet, plot = plot, AOR = AOR, planet = planet, channel = channel,sysmethod=sysmethod, foldext=foldext)\n\n # #Test with fixing the centroids to the mean\n # centroids[:, 0] = np.mean(centroids[:,0])\n # centroids[:, 1] = np.mean(centroids[:,1])\n\n #Aperture photometry to create lightcurve\n lightcurve = aperture_photom(timeseries, centroids, radius_photom, quiet = quiet, foldext=foldext)\n\n #Clip photometry twice\n # Doing this isn't really fair because the points could be valid! Need to do it after the initial fit.\n #lightcurve, timeseries, centroids, midtimes, background = sigma_clip_photom(lightcurve, timeseries, centroids, midtimes, background, sigma_clip_phot, iters_photom, nframes_photom, quiet = quiet, plot = plot, AOR = AOR, planet = planet, channel = channel,sysmethod=sysmethod, foldext=foldext)\n\n if ret:\n lightcurve, timeseries, centroids, midtimes, background, stats['N sigma clip photom'] = sigma_clip_photom(lightcurve, timeseries, centroids, midtimes, background,\n 5, 1, len(lightcurve),\n quiet = quiet, plot = plot, AOR = AOR, planet = planet, channel = channel,sysmethod=sysmethod, foldext=foldext, ret = ret)\n return lightcurve*MJysr2lelectrons, timeseries, centroids, midtimes, background, stats\n else:\n lightcurve, timeseries, centroids, midtimes, background = sigma_clip_photom(lightcurve, timeseries, centroids, midtimes, background,\n 5, 1, len(lightcurve),\n quiet = quiet, plot = plot, AOR = AOR, planet = planet, channel = channel, sysmethod = sysmethod, foldext = foldext)\n return lightcurve*MJysr2lelectrons, timeseries, centroids, midtimes, background", "def plot(self):\n\t\tplot_chain(self.database_path, self.temp_folder)\n\t\tplot_density(self.database_path, self.temp_folder, self.cal_params)", "def output_predictions(pipeline):\n ##### Write code here #######\n X_train, y_train_true = load_data_file(TRAIN_FILE)\n X_dev, y_dev_true = load_data_file(DEV_FILE)\n X_test, y_test_true = load_data_file(TEST_FILE)\n\n #train pipeline with dev and train file\n pipeline.fit(X=X_train, y=y_train_true)\n pipeline.fit(X=X_dev, y=y_dev_true)\n\n y_pred_test = pipeline.predict(X=X_test)\n\n df = pd.DataFrame(y_pred_test)\n with open('predictions.tsv', 'w'):\n df.to_csv('predictions.tsv', sep='\\t', index=False, header=False)\n ##### End of your work ######", "def plot(self):\n\t\tself.plotOfXray().plot()", "def do(self):\n super().do()\n logger.info(\"TrainPipeStep started...\")\n records = self._get_current_step_records()\n logger.debug(\"load pipestep records: {}\".format(records))\n self.num_models = len(records)\n self.num_epochs = self.num_models * TrainerConfig.epochs\n self.update_status(Status.running)\n self.master = create_master()\n self._train_multi_models(records)\n self.master.join()\n ReportServer().output_step_all_records(step_name=self.task.step_name)\n self.master.close()\n ReportServer().backup_output_path()\n self.update_status(Status.finished)", "def make_process_pipelines(\n self, dataset, return_epochs=False, return_raws=False, postprocess_pipeline=None\n ):\n if return_epochs and return_raws:\n message = \"Select only return_epochs or return_raws, not both\"\n raise ValueError(message)\n\n self.prepare_process(dataset)\n\n raw_pipelines = self._get_raw_pipelines()\n epochs_pipeline = self._get_epochs_pipeline(return_epochs, return_raws, dataset)\n array_pipeline = self._get_array_pipeline(\n return_epochs, return_raws, dataset, postprocess_pipeline\n )\n\n if array_pipeline is not None:\n events_pipeline = (\n self._get_events_pipeline(dataset) if return_raws else EpochsToEvents()\n )\n else:\n events_pipeline = None\n\n if events_pipeline is None and array_pipeline is not None:\n log.warning(\n f\"event_id not specified, using all the dataset's \"\n f\"events to generate labels: {dataset.event_id}\"\n )\n events_pipeline = (\n RawToEvents(dataset.event_id)\n if epochs_pipeline is None\n else EpochsToEvents()\n )\n\n process_pipelines = []\n for raw_pipeline in raw_pipelines:\n steps = []\n steps.append((StepType.RAW, SetRawAnnotations(dataset.event_id)))\n if raw_pipeline is not None:\n steps.append((StepType.RAW, raw_pipeline))\n if epochs_pipeline is not None:\n steps.append((StepType.EPOCHS, epochs_pipeline))\n if array_pipeline is not None:\n array_events_pipeline = ForkPipelines(\n [\n (\"X\", array_pipeline),\n (\"events\", events_pipeline),\n ]\n )\n steps.append((StepType.ARRAY, array_events_pipeline))\n process_pipelines.append(Pipeline(steps))\n return process_pipelines", "def process(self, data, output, processes, process):\n slice_list = du.get_grouped_slice_list(data, self.get_filter_frame_type(), self.get_max_frames())\n self._process_chunks(slice_list, data, output, len(processes), process)", "def plotting(self,job,input_zip,output_file,df,my_dpi):\n if len(job.Y2headers)>0:\n X1=df.loc[:,job.X1header].values\n X2=df.loc[:,job.X2header].values\n Y1=df.loc[:,job.Y1headers].values\n if (job.name.startswith(\"Morphine\") or job.name.startswith(\"Sarin\")) and os.path.exists(job.experimentalData):\n df2=pd.read_csv(job.experimentalData)\n Y2=np.array(df2.loc[:,job.Y2headers].values)\n X1=np.array(df2.loc[:,job.X1header].values[:Y2.shape[0]])\n Y1=np.array(df.loc[:,job.Y1headers].values[:Y2.shape[0]])\n X2=np.array(df.loc[:,job.X2header].values[:Y2.shape[0]])\n else:\n Y2=df.loc[:,job.Y2headers].values\n fig,ax = plt.subplots()\n fig.set_size_inches(w=job.imageWidth/my_dpi+3.1,h=job.imageHeight/my_dpi+1)\n \n if not os.path.exists(job.outputDir):\n os.mkdir(job.outputDir)\n \n if job.X1LowerBound and job.X1UpperBound:\n ax.set_xlim(job.X1LowerBound,job.X1UpperBound)\n elif job.X1LowerBound:\n ax.set_xlim(left=job.X1LowerBound)\n elif job.X1UpperBound:\n ax.set_xlim(right=job.X1UpperBound)\n\n if job.Y1LowerBound and job.Y1UpperBound:\n ax.set_ylim(job.Y1LowerBound,job.Y1UpperBound)\n elif job.Y1LowerBound:\n ax.set_ylim(bottom=job.Y1LowerBound)\n elif job.Y1UpperBound:\n ax.set_ylim(top=job.Y1UpperBound) \n \n ax.get_yaxis().set_major_locator(MaxNLocator(nbins=18,min_n_ticks=8))\n ax.get_xaxis().set_major_locator(MaxNLocator(nbins=10,min_n_ticks=5))\n ax.yaxis.set_major_formatter(tick.FuncFormatter(self.y_fmt))\n ax.set_xlabel(job.X1header,fontsize=job.fontSize)\n \n if job.Y1Label!=None:\n ax.set_ylabel(job.Y1Label,color=\"g\",fontsize=job.fontSize)\n else:\n ax.set_ylabel(job.Y1headers[0],color=\"g\",fontsize=job.fontSize)\n p1=ax.plot(X1,Y1,color=\"g\")\n\n ax2=ax.twinx()\n \n if job.Y2LowerBound and job.Y2UpperBound:\n ax2.set_ylim(job.Y2LowerBound,job.Y2UpperBound)\n elif job.Y2LowerBound:\n ax2.set_ylim(bottom=job.Y2LowerBound)\n elif job.Y2UpperBound:\n ax2.set_ylim(top=job.Y2UpperBound)\n \n ax2.get_yaxis().set_major_locator(MaxNLocator(nbins=18,min_n_ticks=8))\n ax2.get_xaxis().set_major_locator(MaxNLocator(nbins=10,min_n_ticks=5))\n ax2.yaxis.set_major_formatter(tick.FuncFormatter(self.y_fmt))\n ax2.set_xlabel(job.X1header,fontsize=job.fontSize)\n \n if job.Y2Label!=None:\n ax2.set_ylabel(job.Y2Label,color=\"b\",fontsize=job.fontSize)\n else:\n ax2.set_ylabel(job.Y2headers[0],color=\"b\",fontsize=job.fontSize)\n \n p2=ax2.plot(X2,Y2,color=\"b\")\n title=job.name+\"_\"\n title+=job.Y2headers[0]\n title = title + \"vs_\" + job.X1header\n \n if job.X2header != None and not job.X1header.lower() == job.X2header.lower():\n title = title + \"_\" + job.X2header\n # Override the constructed title if desired\n \n if job.titleOverride != None and not len(job.titleOverride)==0 and not job.titleOverride.lower() == \"None\".lower():\n title = job.titleOverride\n if job.log>=1:\n logging.info(\"Creating Graph:\"+title)\n \n if job.titleOverride !=\"None\":\n plt.title(title,fontsize=job.fontSize)\n \n if job.showGridLines:\n plt.grid(b=True, which='major', color='r', linestyle='--')\n \n if not job.removeAllLegends:\n p=p1+p2\n labs=[job.Y1headers[0],job.Y2headers[0]]\n ax.legend(p,labs)\n \n if \"(\" and \")\" in job.outputFilename:\n job.outputFilename=job.outputFilename.split(\"(\")[0]+\".jpg\"\n plt.savefig(os.path.join(job.outputDir,job.outputFilename),dpi=my_dpi)\n plt.close(\"all\")\n\n elif len(job.Y2headers)==0:\n X1=df.loc[:,job.X1header]\n Y1=df.loc[:,job.Y1headers]\n fig,ax = plt.subplots()\n fig.set_size_inches(w=job.imageWidth/my_dpi+3.1,h=job.imageHeight/my_dpi+1)\n \n if not os.path.exists(job.outputDir):\n os.mkdir(job.outputDir)\n \n if job.X1LowerBound and job.X1UpperBound:\n ax.set_xlim(job.X1LowerBound,job.X1UpperBound)\n elif job.X1LowerBound:\n ax.set_xlim(left=job.X1LowerBound)\n elif job.X1UpperBound:\n ax.set_xlim(right=job.X1UpperBound)\n\n if job.Y1LowerBound and job.Y1UpperBound:\n ax.set_ylim(job.Y1LowerBound,job.Y1UpperBound)\n elif job.Y1LowerBound:\n ax.set_ylim(bottom=job.Y1LowerBound)\n elif job.Y1UpperBound:\n ax.set_ylim(top=job.Y1UpperBound) \n \n ax.get_yaxis().set_major_locator(MaxNLocator(nbins=10,min_n_ticks=8))\n ax.get_xaxis().set_major_locator(MaxNLocator(nbins=10,min_n_ticks=5))\n ax.yaxis.set_major_formatter(tick.FuncFormatter(self.y_fmt))\n ax.set_xlabel(job.X1header,fontsize=job.fontSize)\n ax.set_ylabel(job.Y1headers[0],fontsize=job.fontSize)\n p=ax.plot(X1,Y1)\n title=job.name+\"_\"\n title = title + \"vs_\" + job.X1header\n \n if job.X2header != None and not job.X1header.lower() == job.X2header.lower():\n title = title + \"_\" + job.X2header\n \n # Override the constructed title if desired\n if job.titleOverride != None and not len(job.titleOverride)==0 and not job.titleOverride.lower() == \"None\".lower():\n title = job.titleOverride\n if job.log>=1:\n logging.info(\"Creating Graph:\"+title)\n \n if job.titleOverride !=\"None\":\n plt.title(title,fontsize=job.fontSize)\n \n if job.showGridLines:\n plt.grid(b=True, which='major', color='r', linestyle='--')\n \n if not job.removeAllLegends:\n labs=job.Y1headers\n plt.legend(p,labs)\n \n if \"(\" and \")\" in job.outputFilename:\n job.outputFilename=job.outputFilename.split(\"(\")[0]+\".jpg\"\n plt.savefig(os.path.join(job.outputDir,job.outputFilename),dpi=my_dpi)\n plt.close(\"all\")\n \n else:\n X1=df.loc[:,job.X1header]\n Y1=df.loc[:,job.Y1headers]\n fig,ax = plt.subplots()\n fig.set_size_inches(w=job.imageWidth/my_dpi+3.1,h=job.imageHeight/my_dpi+1)\n \n if not os.path.exists(job.outputDir):\n os.mkdir(job.outputDir)\n \n if job.X1LowerBound and job.X1UpperBound:\n ax.set_xlim(job.X1LowerBound,job.X1UpperBound)\n elif job.X1LowerBound:\n ax.set_xlim(left=job.X1LowerBound)\n elif job.X1UpperBound:\n ax.set_xlim(right=job.X1UpperBound)\n\n if job.Y1LowerBound and job.Y1UpperBound:\n ax.set_ylim(job.Y1LowerBound,job.Y1UpperBound)\n elif job.Y1LowerBound:\n ax.set_ylim(bottom=job.Y1LowerBound)\n elif job.Y1UpperBound:\n ax.set_ylim(top=job.Y1UpperBound) \n \n ax.get_yaxis().set_major_locator(MaxNLocator(nbins=10,min_n_ticks=8))\n ax.get_xaxis().set_major_locator(MaxNLocator(nbins=10,min_n_ticks=5))\n ax.yaxis.set_major_formatter(tick.FuncFormatter(self.y_fmt))\n ax.set_xlabel(job.X1header,fontsize=job.fontSize)\n ax.set_ylabel(job.Y1headers[0],fontsize=job.fontSize)\n p1=ax.plot(X1,Y1)\n title=job.name+\"_\"\n title = title + \"vs_\" + job.X1header\n \n if job.X2header != None and not job.X1header.lower() == job.X2header.lower():\n title = title + \"_\" + job.X2header\n \n # Override the constructed title if desired\n if job.titleOverride != None and not len(job.titleOverride)==0 and not job.titleOverride.lower() == \"None\".lower():\n title = job.titleOverride\n if job.log>=1:\n logging.info(\"Creating Graph:\"+title)\n \n if job.titleOverride !=\"None\":\n plt.title(title,fontsize=job.fontSize)\n \n if job.showGridLines:\n plt.grid(b=True, which='major', color='r', linestyle='--')\n \n if not job.removeAllLegends:\n labs=job.Y1headers\n ax.legend(p1, labs)\n \n if \"(\" and \")\" in job.outputFilename:\n job.outputFilename=job.outputFilename.split(\"(\")[0]+\".jpg\"\n plt.savefig(os.path.join(job.outputDir,job.outputFilename),dpi=my_dpi)\n plt.close(\"all\")", "def updatePlot(self):\n self.axes.clear()\n self.axes.plot(self.data[0], self.data[1], linestyle='-', color='gray')\n self.axes.plot(self.data[0], self.data[2], linestyle='-', color='blue')\n self.canvas.draw()", "def make_pipeline():\n universe = TradableStocksUS('Real Estate') | TradableStocksUS('Utilities') | \\\n TradableStocksUS('Consumer Staples') | TradableStocksUS('Technology') | \\\n TradableStocksUS('Financials') | TradableStocksUS('Energy') | \\\n TradableStocksUS('Materials') | TradableStocksUS('Health Care') | \\\n TradableStocksUS('Industrials') | TradableStocksUS('Consumer Discretionary') | \\\n TradableStocksUS('Communications')\n\n roic = shfd.slice(dimension='MRT', period_offset=0).ROIC.latest\n ebit = shfd.slice(dimension='MRQ', period_offset=0).EBIT.latest\n ev = shfd.slice(dimension='MRQ', period_offset=0).EV.latest\n volatility = AnnualizedVolatility(window_length=100)\n value = ebit / ev\n\n roic_rank = roic.rank(mask=universe)\n value_rank = value.rank(mask=universe)\n volatility_rank = volatility.rank(mask=universe, ascending=False)\n\n spy_ma100_price = SMA(inputs=[USEquityPricing.close], \n window_length=100)[algo.sid(\"FIBBG000BDTBL9\")]\n spy_price = USEquityPricing.close.latest[algo.sid(\"FIBBG000BDTBL9\")]\n\n momentum_score = MomentumScore()\n\n overall_rank = roic_rank + value_rank + volatility_rank\n\n # seven_month_returns = Returns(window_length=148, mask=universe,)\n # one_month_returns = Returns(window_length=30, mask=universe,)\n\n pipeline = Pipeline(\n columns={\n 'stock' : master.SecuritiesMaster.Symbol.latest,\n 'sid': master.SecuritiesMaster.Sid.latest,\n 'sector' : master.SecuritiesMaster.usstock_Sector.latest,\n 'average_dollar_volume': AverageDollarVolume(window_length=200),\n 'price': EquityPricing.close.latest,\n 'volume': EquityPricing.volume.latest,\n 'roic' : roic,\n 'value' : value,\n 'volatility': volatility,\n 'roic_rank' : roic_rank,\n 'value_rank' : value_rank,\n 'momentum': momentum_score,\n 'momentum_decile': momentum_score.deciles(),\n 'volatility_decile' : volatility.deciles(),\n 'overall_rank' : overall_rank,\n 'overall_rank_decile': overall_rank.deciles(),\n 'trend_filter': spy_price > spy_ma100_price,\n # 'returns' : one_month_returns - seven_month_returns\n }, \n screen = universe\n )\n\n return pipeline", "def _fit(self):\n # Paramters of the steps\n param_grid = {\n \"converter__to_convert\": [True, False],\n \"pca__n_components\": [0.3, 0.5, 0.7, 0.9],\n \"regressor__estimator__max_depth\": list(range(1, 5)),\n }\n # Fit with pipeline\n steps = [\n (\"converter\", _RateConverter()),\n (\"scaler\", MinMaxScaler()),\n (\"pca\", PCA(random_state=0)),\n (\"regressor\", MultiOutputRegressor(LGBMRegressor(n_estimators=200, random_state=0))),\n ]\n tscv = TimeSeriesSplit(n_splits=5).split(self._X_train)\n pipeline = GridSearchCV(Pipeline(steps=steps), param_grid, n_jobs=-1, cv=tscv)\n pipeline.fit(self._X_train, self._Y_train)\n # Update regressor\n self._pipeline = pipeline\n # Update param\n self._param.update(**{k: type(v) for (k, v) in steps})", "def evaluate_batch(self, pipelines):", "def main(x_axis, y_axis, filtered, unfiltered, name, histogram, total, true_max):\n axes = [x_axis, y_axis, 'description']\n uf_dict, f_dict, min_x, max_x, min_y, max_y = data_from_sc_file(axes, filtered, unfiltered, true_max)\n gen_plots(uf_dict, f_dict, min_x, max_x, min_y, max_y, axes, name, histogram, total)", "def generate(self):\n\n # Load the required datapoints into memory.\n self._load_results()\n\n # Calculate datapoints statistics, like min. and max. values.\n self._calc_stats()\n\n # Generate the plots.\n self._generate_scatter_plots()\n self._generate_histograms()\n\n # Put together the final HTML report.\n self._generate_report()", "def start(self, data):\n # show the plotting and leave a handle\n handle = show(self.p, notebook_handle=True)\n \n # keep update the column data source with new data and push the \n # updating onto Jupyter notebook.\n while True:\n try: self.ds.stream(data, rollover=300)\n except ValueError: return\n push_notebook(handle=handle)\n time.sleep(self.itv)", "def run(self):\r\n history = self.extracter.load_user_history()\r\n self.plot_history(history)\r\n \r\n pp_history = self.analyser.preprocess_history(history)\r\n part_worths, attribute_importance, relative_importance = self.analyser.conjoint_analysis(pp_history)\r\n self.plot_analysis(part_worths, relative_importance)\r\n \r\n return history, pp_history, part_worths, relative_importance", "def process(self):", "def process(self):" ]
[ "0.6797422", "0.67031926", "0.63262737", "0.62594944", "0.6251043", "0.62087804", "0.61756724", "0.61126584", "0.61017734", "0.60401547", "0.59446293", "0.59367925", "0.592344", "0.5875977", "0.58692324", "0.5868876", "0.5862814", "0.5774588", "0.5762678", "0.5713469", "0.5692765", "0.5687919", "0.5687919", "0.56732744", "0.5672062", "0.56660414", "0.5665765", "0.5655818", "0.56539536", "0.56409496", "0.5630573", "0.5620017", "0.5612691", "0.5607744", "0.56037414", "0.55724245", "0.5568966", "0.55616415", "0.55608606", "0.55460477", "0.5538541", "0.55294514", "0.5514453", "0.5511246", "0.55014175", "0.54872864", "0.54684967", "0.5462541", "0.5457634", "0.5452794", "0.544775", "0.54445845", "0.5444268", "0.54406637", "0.5429901", "0.54262924", "0.5422515", "0.5405614", "0.5396697", "0.53873247", "0.5375541", "0.536424", "0.536315", "0.535544", "0.5353066", "0.5351066", "0.53485507", "0.5348011", "0.53458506", "0.53419995", "0.5340616", "0.5339221", "0.533244", "0.5331798", "0.5330296", "0.53209877", "0.53179556", "0.5315155", "0.5310152", "0.5310152", "0.5310152", "0.5310152", "0.5310152", "0.5305326", "0.5302968", "0.5299946", "0.5299016", "0.52969646", "0.52849543", "0.5275948", "0.52678597", "0.5265341", "0.5259592", "0.52587175", "0.52408123", "0.5237698", "0.5236864", "0.5235714", "0.5233724", "0.5225944", "0.5225944" ]
0.0
-1
Initializes the dataset for loading.
def __init__( self, train_batch_size=1, val_batch_size=1, cuda=False, num_workers=1, path=None, random_resize_crop=(0, 0), scale=(0, 0), horizontal_flip_prob=0.0, vertical_flip_prob=0.0, gaussian_blur_prob=0.0, rotate_degree=0.0, cutout_prob=0.0, cutout_dim=(8, 8), hue_saturation_prob=0.0, contrast_prob=0.0 ): self.cuda = cuda self.num_workers = num_workers self.path = path self.train_batch_size = train_batch_size self.val_batch_size = val_batch_size self.sampler = None if not os.path.exists(self.path): raise ValueError('Invalid path specified.') # Set data augmentation parameters self.random_resize_crop = random_resize_crop self.scale = scale self.horizontal_flip_prob = horizontal_flip_prob self.vertical_flip_prob = vertical_flip_prob self.gaussian_blur_prob = gaussian_blur_prob self.rotate_degree = rotate_degree self.cutout_prob = cutout_prob self.cutout_dim = cutout_dim self.hue_saturation_prob = hue_saturation_prob self.contrast_prob = contrast_prob # Get dataset statistics self.image_size = self._get_image_size() self.mean = self._get_mean() self.std = self._get_std() # Get data self._split_data()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _init_al_dataset(self):\n\n self._init_dataset()\n\n train_dataset = self.datasets['train']\n\n dataset_size = len(train_dataset)\n self.budget = math.ceil(self.budget_frac*dataset_size)\n Sampler.__init__(self, config, self.budget) # TODO: Weird place to initialise this\n\n all_indices = set(np.arange(dataset_size))\n k_initial = math.ceil(len(all_indices)*self.initial_budget_frac)\n initial_indices = random.sample(list(all_indices), k=k_initial)\n\n sampler_init = data.sampler.SubsetRandomSampler(initial_indices) # need to sample from training dataset\n\n self.labelled_dataloader = data.DataLoader(train_dataset, sampler=sampler_init, batch_size=self.batch_size, drop_last=True)\n self.val_dataloader = data.DataLoader(self.datasets['valid'], batch_size=self.batch_size, drop_last=False)\n self.test_dataloader = data.DataLoader(self.datasets['test'], batch_size=self.batch_size, drop_last=False)\n\n return all_indices, initial_indices", "def __init__(self):\n self.__dataset = None", "def __init__(self, dataset):\n self._dataset = dataset", "def __init__(self, **kwargs):\n DataLoader.__init__(self, **kwargs)", "def __init__(self, **kwargs):\n DataLoader.__init__(self, **kwargs)\n \n self._results_ = None", "def init(*args):\n global dataset\n dataset = args[0]", "def setUp(self):\n self.dataset = self.dataset_cls()", "def initialise_dataset_loader(\n self, data_param=None, task_param=None, data_partitioner=None):\n raise NotImplementedError", "def __init__(self, dataset: Dataset):\n self.dataset = dataset", "def _init_dataset():\n global _residues\n if _residues is not None:\n # Database is already initialized\n return\n\n # Residuue data is taken from\n # ftp://ftp.wwpdb.org/pub/pdb/data/monomers/components.cif\n # (2019/01/27)\n _info_dir = dirname(realpath(__file__))\n with open(join(_info_dir, \"residues.msgpack\"), \"rb\") as file:\n _residues = msgpack.unpack(\n file, use_list=False, raw=False\n )", "def prepare_data(self):\n try:\n self.train_dataset = self.datasets['train']\n self.val_dataset = self.datasets['val']\n try:\n self.test_dataset = self.datasets['test']\n except:\n pass\n except Exception as e:\n print('Data was not succesfully prepared:', e)", "def load_data(self):\n\n self._load_train_data()\n self._load_test_data()", "def _load_data(self):\n\n # This allows a simulated dataset to use the same constructor.\n if self.input_file is None:\n return\n\n logging.info(f\"Loading data from file {self.input_file}\")\n\n # Load the dataset.\n if os.path.isdir(self.input_file):\n self.data = get_matrix_from_mtx(self.input_file)\n else:\n self.data = get_matrix_from_h5(self.input_file)", "def __init__(self):\n self.args = self._prepare_args(locals())\n self.requires_full_dataset_in_memory = False", "def load_dataset(self):\n # Get all the files in the directory\n file_list = self.get_file_list()\n\n # Concatenate the data corresponding to a list of files\n data = self.concatenate_file_data(file_list)\n\n # Shuffle the data and create the training and the validation datasets\n data = self.shuffle_data_dictionary(data)\n self.training_dataset, self.validation_dataset = self.split_data_into_training_and_validation(data)", "def init_datasets(self, data_dict, label_dict):\n\n ## If data is count, then convert any sparse inputs to sparse tensors\n convert_sparse = self.config_dict.get(\"sparse_mode\") == \"count\"\n\n splits = data_dict.keys()\n dataset_dict = {\n key: ArrayDataset(\n data_dict[key],\n torch.LongTensor(label_dict[key]),\n convert_sparse=convert_sparse,\n )\n for key in splits\n }\n return dataset_dict", "def setUp(self):\n self.dataset = get_test_dataset()", "def init(self, **kwargs):\n self._d = {}\n self._th = None\n self._run = True\n self.load()", "def _init_data(self, data):\n assert type(data) is dict, \"dict expected: %r\" % type(data)\n assert len(data) is 1, \"size of dict should be 1: %r\" % len(data)\n self._name = data.keys()[0]\n self._data = np.asarray(data[self._name])\n self._set = True", "def _initialize_data(self):\n self.reset_count = 0\n self._idn_no_firmware = \"KEPCO,BOP 50-20,E1234,\"\n self._firmware = 2.6\n self._init_data()", "def setup(self):\n # TODO check if need both dataset together\n self.train_dataset = ABSADataset(data_path=self.train_path, mode=self.in_mode, task=self.task, \n tokenizer=self.tokenizer, vocab=\"bert\")\n self.vocabulary = self.train_dataset.vocabulary\n\n self.eval_dataset = ABSADataset(data_path=self.dev_path, mode=self.in_mode, task=self.task,\n tokenizer=self.tokenizer, vocab=self.vocabulary)\n #self.train_restaurant = ABSADataset(data_path=RESTAURANT_TRAIN)\n #self.eval_restaurant = ABSADataset(data_path=RESTAURANT_DEV)", "def load_data(self) -> None:", "def _init_train_loader(self):\n # Choose the right dataset type\n if self.config_args[\"num_members\"] > 1:\n class_dataset_wrapper = dataset_wrapper.MixMoDataset\n else:\n class_dataset_wrapper = dataset_wrapper.MSDADataset\n\n # Load augmentations\n self.traindatasetwrapper = class_dataset_wrapper(\n dataset=self.train_dataset,\n num_classes=int(self.config_args[\"data\"][\"num_classes\"]),\n num_members=self.config_args[\"num_members\"],\n dict_config=self.config_args[\"training\"][\"dataset_wrapper\"],\n properties=self.properties\n )\n\n # Build standard sampler\n _train_sampler = torch.utils.data.sampler.RandomSampler(\n data_source=self.traindatasetwrapper, ## only needed for its length\n num_samples=None,\n replacement=False,\n )\n\n # Wrap it with the repeating sampler used for multi-input models\n batch_sampler = batch_repetition_sampler.BatchRepetitionSampler(\n sampler=_train_sampler,\n batch_size=self.batch_size,\n num_members=self.config_args[\"num_members\"],\n drop_last=True,\n config_batch_sampler=self.config_args[\"training\"][\"batch_sampler\"]\n )\n\n self.train_loader = torch.utils.data.DataLoader(\n self.traindatasetwrapper,\n batch_sampler=batch_sampler,\n num_workers=self.num_workers,\n batch_size=1,\n shuffle=False,\n sampler=None,\n drop_last=False,\n pin_memory=True,\n )", "def initialize(self):\n self.population.initialize()\n self.cache.initialize()\n if self.storage:\n self.storage.initialize()", "def __init__(self, config):\n logger.info(f\"{self.__class__.__name__}: Dataset initializing ...\")\n super().__init__(config)", "def _load_data(self):\n if self._api_response.status_code == 200:\n self._dataset = self._api_response.json()\n self._fill_day_dicts()", "def _initialize_data_filter(self):\n df_params = self._loading_params.copy()\n df_params[\"filter_negate\"] = True\n df_params[\"filter_upper\"] = True\n self._data_filter = LoadProcessedData(**df_params)", "def initialize(self):\n LOG.info(\"Initializing Model.\")\n self.model = self.convert(df=self.training_df)\n if self.bootstraps is not None:\n LOG.info(\"Bootstrapping Data.\")\n self.bootstrap_data()", "def InitDataset(self):\n train_txt = 'ImageSets/Main/train.txt'\n val_txt = 'ImageSets/Main/val.txt'\n annotations = \"Annotations\"\n jpegimages = \"JPEGImages\"\n images_path = train_txt if (self.is_train) else val_txt \n images_path = readTxt(os.path.join(self.path, images_path))\n images_path.pop(-1)\n # rawdata format: [path_2_image, path_2_xml]\n rawData = list()\n for each in images_path:\n xml = os.path.join(self.path, annotations, each + '.xml')\n jpeg = os.path.join(self.path, jpegimages, each + '.jpg')\n rawData.append([jpeg, xml])\n return rawData", "def __init__(self, num_locations):\n self.dataset = {}\n self.num_locations = num_locations\n self.add_locations()", "def prepare(self):\n if self.opts['verbose']:\n print(\"Preparing dataset (one-time operation)...\")\n # Create paths files and load them back in\n self._build_ID_sets()\n self._create_ID_files()\n self._load_ID_files()\n if self.opts['verbose']:\n print(\"... done with preparing the dataset.\")", "def _init_dataset(self):\n champions = set()\n\n for name in os.listdir(self.data_root):\n label = name.split(\".\")[0]\n champions.add(label)\n self.image_paths += [(os.path.join(self.data_root, name), [[label]])]\n\n # self.encoder = self.encoder.fit(np.array(list(champions)).reshape(-1, 1))\n self.encoder = self.encoder.fit(list(champions))", "def initialize(self):\n self.data = None\n self.errors = []", "def load_data(self):", "def __initDataFromImages(self):\n #Check if the local_db exist\n initial_dirs = os.listdir(os.getcwd())\n is_db_empty = False\n if len(os.listdir(self.base_dir)) == 1: #Empty here means no person data\n [images_dir] = os.listdir(self.base_dir)\n is_db_empty = images_dir == cfg.local[\"IMG_DIR\"]\n if cfg.local[\"DEFAULT_IMGS_DIR\"] in initial_dirs and is_db_empty:\n default_path = os.path.join(os.getcwd(), cfg.local[\"DEFAULT_IMGS_DIR\"])\n self.X, self.y = loadDataFromImagesPath(self.detector, default_path)\n self.le = LabelEncoder()\n #Nothing relate to mapping name to dir here, we don't care about\n #This data because of the user doesn't exist in the database\n self.__savePreProcessedData()", "def __init__(self, data=None, filename=None, schema=None):\n self.data = None\n self.schema = None\n self.filename = None\n if schema:\n self.load_schema(schema)\n if filename:\n self.load_file(filename)\n if data:\n self.load_data(data)", "def __init__(self, data_set):\r\n self.name = data_set\r\n\r\n # The training and test labels\r\n self.labels = {'train': None, 'test': None}\r\n\r\n # The training and test examples\r\n self.examples = {'train': None, 'test': None}\r\n\r\n # Load all the data for this data set\r\n for data in ['train', 'test']:\r\n self.load_file(data)\r\n\r\n # The shape of the training and test data matrices\r\n self.num_train = self.examples['train'].shape[0]\r\n self.num_test = self.examples['test'].shape[0]\r\n self.dim = self.examples['train'].shape[1]", "def _init_data(self) -> None:\n self.dtype = dict()\n self.shape = dict()\n self.size = dict()\n self.attrs = dict()\n self.data_ptr = dict()\n\n if self.mode == 'r':\n for k in self.fp.keys():\n self.dtype[k] = self.fp[k].dtype\n self.shape[k] = self.fp[k].shape\n self.size[k] = self.fp[k].shape[0]\n self.data_ptr[k] = 0", "def setUp(self):\n self.directory = tempfile.TemporaryDirectory()\n self.dataset = self.dataset_cls(cache_root=self.directory.name)", "def init(self):\n self.data_dir = self.data_dir or default_data_path / self._name_\n self.cache_dir = self.data_dir / \"cache\"\n assert self.level in [\n \"word\",\n \"char\",\n ], f\"level {self.level} not supported\"", "def __init__(self, dir_path, window_size,\n user_map_path, computer_map_path, auth_type_map_path, logon_type_map_path):\n logging.info(f\"Initiating Dataset instance for directory {dir_path}\")\n self.directory = dir_path\n self.filenames = [filename for filename in os.listdir(dir_path) if os.path.isfile(os.path.join(dir_path, filename))]\n assert len(self.filenames) > 0\n random.shuffle(self.filenames)\n self.window_size = window_size\n self.len = self.count_len()\n self.user_map, self.user_count = util.load_mapping(user_map_path)\n self.computer_map, self.computer_count = util.load_mapping(computer_map_path)\n self.auth_type_map, self.auth_type_count = util.load_mapping(auth_type_map_path)\n self.logon_type_map, self.logon_type_count = util.load_mapping(logon_type_map_path)", "def init_dataset(validation_dataset_name):\n transform = transforms.Compose([transforms.ToPILImage(),transforms.ToTensor(),transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])])\n \n if validation_dataset_name == 'datasetRAP':\n # validation = 8317 images = 166 batches of 50 images + 1 batch of 17 images\n dataset_valid = loader_rapdataset_yiqiang.RAPDataset(0,False,'/storage/Datasets/Rap-PedestrianAttributeRecognition/',transform)\n labels = loader_rapdataset_yiqiang.ATTRIBUTES\n datset_attr_nbr = 92\n elif validation_dataset_name == 'datasetPETA':\n dataset_valid = loader_peta_dataset.PETADataset(False, '/storage/Datasets/PETA-PEdesTrianAttribute', transform)\n labels = loader_peta_dataset.ATTRIBUTES\n datset_attr_nbr = 104\n elif validation_dataset_name == 'datasetRAPPETA':\n dataset_valid = loader_rap_plus_peta_dataset.RAPPlusPETADataset(False, '/storage/Datasets/Rap-PedestrianAttributeRecognition/', '/storage/Datasets/PETA-PEdesTrianAttribute', transform)\n labels = [peta_label for rap_label,peta_label in loader_rap_plus_peta_dataset.ATTRIBUTES]\n datset_attr_nbr = 49\n\n print (\"Dataset valid size :\", dataset_valid.__len__())\n print (\"Dataset Attributes number :\", datset_attr_nbr)\n assert (len(labels) == datset_attr_nbr)\n\n dataloader_valid = DataLoader(dataset_valid, batch_size=Param_Batchsize, shuffle=True, num_workers=Param_Nb_Workers)\n\n return dataloader_valid, dataset_valid", "def _init_dataset(self, data_config, split='train'):\n assert split in {'train', 'valid'}\n\n # load datasets\n print(f'Load {split} dataset')\n if data_config['type'] == 'npy':\n dataset = MSDMelDataset(\n data_config['mel_root'], data_config[f'{split}_tids_fn'],\n data_config['label_fn'], on_mem=data_config['on_mem'],\n ignore_intersection=data_config['ignore_label_intersection'],\n transform=ToVariable())\n\n elif data_config['type'] == 'hdf':\n dataset = MSDMelHDFDataset(\n data_config['hdf_fn'], data_config[f'{split}_tids_fn'],\n data_config['label_fn'],\n ignore_intersection=data_config['ignore_label_intersection'],\n transform=ToVariable())\n\n elif data_config['type'] == 'audio':\n dataset = MSDAudioDataset(\n data_config['audio_root'], data_config[f'{split}_tids_fn'],\n data_config['tid2path_fn'], data_config['label_fn'],\n ignore_intersection=data_config['ignore_label_intersection'],\n device='cpu',\n transform=ToVariable())\n\n return dataset", "def __init__(self):\n print ('Initializing Data reader object...')\n data_Train_Images, data_Train_Labels, data_Test_Image, data_Test_Labels = self.readDataFromFile()\n test_10k_x, test_10k_y, training_55k_x, training_55k_y, validation_5k_x, validation_5k_y = self.dataTransform(\n data_Test_Image, data_Test_Labels, data_Train_Images, data_Train_Labels)\n self.train = zip(training_55k_x, training_55k_y)\n self.valid = zip(validation_5k_x, validation_5k_y)\n self.test = zip(test_10k_x, test_10k_y)\n\n self.train_position = 0\n print ('Initialized!')", "def load_data(self):\n self.tif_file = self._find_tif_file()\n if self.with_labeling is not None:\n self.colabel_file = self._find_colabeled_file()\n self.colabel_stack = self._load_colabeled_img()\n self.dff, self.indices = self._populate_dff_data()\n self.loaded = True", "def initialize(self):\n \n #initialize the variables\n init = tf.global_variables_initializer()\n self.session.run(init)\n \n #initialize the data iterators\n self.session.run(self.data_iterator.initializer)", "def _load(self, dataset):\n raise NotImplementedError('Loader {} does not support loading datasets.'.format(self.type()))", "def __init__(self):\n self.model = None\n self.joined_datasets = None\n self.id_col = None\n self.val_col = None\n self.pop_col = None\n self.total_population_per_unit = None\n self.centroids_of_areal_data = None\n self.prepared_data = None\n self.unknown_area_id = None\n\n # Parameters\n self.lags = None\n self.step = None\n self.min_no_of_observations = None\n self.max_search_radius = None", "def init_data(dataset_config: dict):\n # train and dev will be in random order, test may be ordered according to labels\n if dataset_config[\"name\"] == \"CoLA\":\n train, dev, test, num_classes = load_cola(dataset_config)\n elif dataset_config[\"name\"] == \"AGNews\":\n train, dev, test, num_classes = load_ag_news(dataset_config)\n elif dataset_config[\"name\"] == \"DBPedia\":\n train, dev, test, num_classes = load_dbpedia(dataset_config)\n elif dataset_config[\"name\"] == \"YRF\":\n train, dev, test, num_classes = load_yrf(dataset_config)\n else:\n raise NameError(f\"Dataset {dataset_config['name']} not implemented.\")\n # etc.\n\n # shrink size if debugging\n if dataset_config[\"debug\"]:\n # choose a random subset using huggingface select function\n train = train.select(random.sample(range(len(train)), k=200))\n dev = dev.select(random.sample(range(len(dev)), k=40))\n test = test.select(random.sample(range(len(test)), k=200))\n\n # create class imbalance\n random.seed(dataset_config[\"seed\"])\n if dataset_config[\"pool_balance\"] == \"balanced\":\n pass\n elif dataset_config[\"pool_balance\"] == \"imbalanced\":\n train = train.filter(lambda example: create_imbalanced_dataset(example, dataset_config[\"imbalance_prop\"], dataset_config['imbalance_cls']))\n else:\n NameError(f\"pool_balance = {dataset_config['pool_balance']} not allowed\")\n\n if dataset_config[\"dev_balance\"] == \"balanced\":\n pass\n elif dataset_config[\"dev_balance\"] == \"imbalanced\":\n dev = dev.filter(lambda example: create_imbalanced_dataset(example, dataset_config[\"imbalance_prop\"], dataset_config['imbalance_cls']))\n else:\n NameError(f\"dev_balance = {dataset_config['dev_balance']} not allowed\")\n\n # get seed labelled pool indices (using the same seed data every time)\n random.seed(dataset_config[\"seed\"])\n if dataset_config[\"seed_balance\"] == \"balanced\":\n # this is random (will have some variance vs pool)\n indices = list(range(len(train)))\n unlabelled_pool_idx, labelled_pool_idx = split(\n indices,\n random_state=dataset_config[\"seed\"],\n test_size=dataset_config[\"seed_size\"]\n )\n elif dataset_config[\"seed_balance\"] == \"stratified\":\n # this is the same as the underlying train set (which may be unbalanced)\n indices = list(range(len(train)))\n unlabelled_pool_idx, labelled_pool_idx = split(\n indices,\n random_state=dataset_config[\"seed\"],\n test_size=dataset_config[\"seed_size\"],\n stratify=train['label']\n )\n elif dataset_config[\"seed_balance\"] == \"imbalanced\":\n # artificially sample an imbalanced seed set from the pool\n unlabelled_pool_idx, labelled_pool_idx = create_imbalanced_seed(\n train,\n num_classes,\n dataset_config[\"seed_size\"],\n dataset_config['imbalance_prop'],\n dataset_config['imbalance_cls']\n )\n else:\n raise NameError(f\"seed_balance = {dataset_config['seed_balance']} not allowed\")\n\n return train, dev, test, num_classes, labelled_pool_idx, unlabelled_pool_idx", "def _init_from_DataArrays(self, data, validate=True):\n self._data_vars = self._DataArrays_as_mapping(data)\n\n if (len(self) > 1) and validate:\n first = self[0]\n for i in range(1, len(self)):\n da = self[i]\n first._is_compatible(da, raise_error=True)\n\n self._check_all_different_ids(self._data_vars.values())\n\n self.__itemattr = []\n for key, value in self._data_vars.items():\n self._set_name_attr(key, value)\n\n self.plot = _DatasetPlotter(self)\n\n if len(self) > 0:\n self._set_spectral_attributes(self.geometry)\n\n # since Dataset is MutableMapping it has values and keys by default\n # but we delete those to avoid confusion\n # self.values = None\n self.keys = None", "def init_loader(dataset):\n if dataset == 'chaos':\n return ChaosLoader()\n return None", "def load_data(self):\n raise NotImplementedError()", "def _load_data(self):\n if self._name in BALANCE_DATASET:\n _loader = dataset_loaders[self._name]\n xnp, y = _loader()\n\n # Train - Test split\n gen = ShuffleSplit(n_splits=1, random_state=42, test_size=self._test_size).split(xnp)\n train_idx, test_idx = next(gen)\n\n # Train data.\n self.xnp_tr = xnp[train_idx]\n self.y_tr = y[train_idx]\n # Test data.\n self.xnp_ts = xnp[test_idx]\n self.y_ts = y[test_idx]\n\n else:\n _loader = dataset_loaders[self._name]\n xnp, xp, y = _loader()\n # self.xnp, self.xp, self.y = _loader()\n\n # Train - Test split\n gen = ShuffleSplit(n_splits=1, random_state=42, test_size=self._test_size).split(xnp)\n train_idx, test_idx = next(gen)\n\n # Train data.\n self.xnp_tr = xnp[train_idx]\n self.xp_tr = xp[train_idx]\n self.y_tr = y[train_idx]\n # Test data.\n self.xnp_ts = xnp[test_idx]\n self.xp_ts = xp[test_idx]\n self.y_ts = y[test_idx]", "def __init__(self):\n self.data_set_loc = conf.config_section_mapper(\"filePath\").get(\"data_set_loc\")\n self.data_extractor = DataExtractor(self.data_set_loc)", "def init_datasets(self, data_dict, label_dict):\n splits = data_dict.keys()\n dataset_dict = {\n key: ArrayDataset(data_dict[key], torch.LongTensor(label_dict[key]))\n for key in splits\n }\n return dataset_dict", "def setup(self):\n in_dataset, out_dataset = self.get_datasets()", "def setup(self):\n in_dataset, out_dataset = self.get_datasets()", "def ensure_dataset_loaded(self, name):\n if name not in self.datasets:\n print(f'Loading dataset \"{name}\"')\n pd_data = pd.read_excel(self.datafiles[name])\n data = pd.DataFrame.to_dict(pd_data, 'records')\n self.datasets[name] = data", "def init_datasets(self, data_dict, label_dict):\n splits = data_dict.keys()\n dataset_dict = {\n key: ArrayDataset(\n data_dict[key], torch.LongTensor(label_dict[key]), convert_sparse=False\n )\n for key in splits\n }\n return dataset_dict", "def __init__(self, path: str = './') -> None:\n super(DataHandler, self).__init__()\n self.path = path\n self.dataset = None # type: str\n self._file_format = None # type: str\n self._max_file_count = 0", "def initialize(self) -> None:\n self.model = load(self.path)", "def init_batch(self):\n pass", "def __init__(\n self,\n path,\n tier,\n embeddings=None,\n preprocessor=None,\n transform=True):\n super().__init__()\n assert os.path.isdir(path)\n assert os.path.exists(os.path.join(path, f'{tier}.csv'))\n assert tier in self.TIERS\n\n self.path = path\n self.tier = tier\n self.data = None\n self.embeddings = embeddings\n self.preprocessor = preprocessor\n self.is_transform = transform\n self._init_dataset()", "def __init__(self, data_config):\n self._brands = self._load_from_directory(data_config['targeted_brands_dir'])\n self._keywords = self._load_from_directory(data_config['keywords_dir'])\n self._fqdn_keywords = self._load_from_directory(data_config['fqdn_keywords_dir'])\n self._similarity_words = self._load_from_directory(data_config['similarity_words_dir'])\n self._tlds = self._load_from_directory(data_config['tld_dir'])", "def build_data_set(self):\n if not self.assert_data_correct():\n self.download_all_data()\n self.unpack_rename_data()\n self.split_data_characters()\n self.clean_data_fragments()\n self.create_font_data()\n if not self.assert_train_augmented():\n self.augment_train_data()\n if not self.assert_style_data_correct():\n self.download_style_data()\n self.unpack_rename_data()", "def __init__(self, unencoded_dataset): \n self._unencoded_dataset = unencoded_dataset\n self._plain_train = self._get_plain_train()\n self._plain_test = self._get_plain_test()\n self._cipher_train = self._get_cipher_train()\n self._cipher_test = self._get_cipher_test()", "def init_datasets(self, display_samples = False):\n print(\"==> Loading images from \", self.img_dir)\n self.image_data_gen = ImageDataGenerator(\n rescale=1./255,\n #rotation_range=30,\n #shear_range=30,\n #width_shift_range=.15,\n #height_shift_range=.15,\n #zoom_range=0.5,\n validation_split=0.2)\n\n self.train_dataset = self.image_data_gen.flow_from_directory(\n batch_size=BATCH_SIZE,\n directory=self.img_dir,\n shuffle=True,\n target_size=self.img_size,\n class_mode='sparse',\n subset='training')\n\n self.val_dataset = self.image_data_gen.flow_from_directory(\n batch_size=BATCH_SIZE,\n directory=self.img_dir,\n shuffle=True,\n target_size=self.img_size,\n class_mode='sparse',\n subset='validation')\n\n if display_samples:\n self.display_sample_images()", "def _load_dataset(self, data_path, augmentation, batch_size):\n if path.split(data_path)[1] == \"\":\n # Deal with edge case where there's a \"/\" at the end of the path.\n data_path = path.split(data_path)[0]\n\n if path.split(data_path)[1].endswith(\"training\"):\n dataset_name = \"training dataset\"\n else:\n dataset_name = \"validation dataset\"\n\n start_time = time.time()\n self._update_status(\"Loading {}.\".format(dataset_name))\n\n\n dataset = MapillaryDataset(data_path, augmentation, self.iaa)\n data_loader = DataLoader(dataset,\n batch_size,\n shuffle=True)\n\n self._update_status(\"{} loaded. ({} ms)\".format(\n dataset_name.capitalize(),\n int((time.time() - start_time) * 1000)))\n\n return data_loader", "def __init__(self, datafiles, plotter):\n self.datafiles = datafiles\n self.datasets = dict()\n self.plotter = plotter", "def load_data(self, dataset, dataset_name):\n with open(dataset, \"r\", encoding=\"utf-8\") as f:\n self.data = json.load(f)\n self.dataset_name = dataset_name", "def __init__(self, *, dataset=None, aliases=None):\n self._datasets = [] if dataset is None else [dataset]\n self._aliases = {} if aliases is None else {a: 0 for a in aliases}\n self._default_index = 0", "def _init_img_dataset(self, dataset_path):\n\n # ==\n # Define the classes used in the various states\n # form: (state class : cifar label class)\n class_dict = {\n 'initial': 'automobile',\n 'choice_1': 'dog',\n 'choice_2': 'cat',\n 'corridor': 'bird',\n }\n\n # ==\n # Download / initialize dataset\n ds = CIFAR10(dataset_path, train=self.training,\n download=True)\n\n # Get the CIFAR class index for each of the state classes\n cifar_class_dict = {\n k: ds.class_to_idx[class_dict[k]] for k in class_dict\n }\n\n # Iterate over the CIFAR dataset and get the idxs to each class\n cifar_indexes = {k: [] for k in class_dict}\n for i in range(len(ds)):\n cur_cifar_class = ds[i][1]\n for k in class_dict:\n if cur_cifar_class == cifar_class_dict[k]:\n cifar_indexes[k].append(i)\n\n # Manually sub-sample choice classes\n for k in ['choice_1', 'choice_2']:\n n_imgs = min(self.num_ds_imgs, len(cifar_indexes[k]))\n rng = np.random.default_rng()\n choice_imgs = rng.choice(cifar_indexes[k], size=n_imgs,\n replace=False)\n cifar_indexes[k] = choice_imgs\n\n # Manually shuffle the corridor class\n rng = np.random.default_rng()\n corri_img_shufIdxs = rng.choice(cifar_indexes['corridor'],\n size=len(cifar_indexes['corridor']),\n replace=False)\n cifar_indexes['corridor'] = corri_img_shufIdxs\n\n # ==\n # Construct the data subset dictionary\n ds_dict = {}\n for k in class_dict:\n ds_dict[k] = Subset(ds, cifar_indexes[k])\n\n return ds_dict", "def __loadDataset(self, parameters):\n # self.localConfigured = Settings.instance().readValue( key = 'Common/local-repo' )\n for pr in parameters:\n if pr['type'] == 'dataset':\n if pr['value'].startswith('undefined:/'):\n fileName = pr['value'].split('undefined:/')[1]\n if not os.path.exists( fileName ):\n raise Exception(\"the following test data file is missing: %s \" % fileName)\n\n doc = FileModelTestData.DataModel()\n res = doc.load( absPath = fileName )\n pr['value'] = \"undefined:/%s\" % doc.getRaw()\n elif pr['value'].startswith('local-tests:/'):\n fileName = pr['value'].split('local-tests:/')[1]\n\n if not os.path.exists( fileName ):\n raise Exception(\"the following test data file is missing: %s \" % fileName)\n \n doc = FileModelTestData.DataModel()\n res = doc.load( absPath = fileName )\n pr['value'] = \"local-tests:/%s\" % doc.getRaw()\n else:\n pass", "def _load_train_data(self):\n\n self.train_loader = data.Train_loader(self.N_max, self.n_per_conn,\n self.data_path, self.device)\n self.train_loader.load_data()\n\n # load mean and std\n scc_mean_std = np.loadtxt(\n os.path.join(self.data_path, 'scc_mean_std.csv'), delimiter=',')\n self.mean = torch.Tensor(scc_mean_std[0])\n self.std = torch.Tensor(scc_mean_std[1])", "def dataloader(self):\n\n # load / split data\n train_data = self.data.get_train_data()\n if self.args.use_dev:\n train_data, dev_data = self.data.split_data(train_data)\n test_data = self.data.get_test_data()\n\n #print(train_data[0])\n #print(dev_data[0])\n #print(test_data[0])\n\n # build dataset\n train_dataset = self.loader.build_dataset(\n train_data, \n self.args.train_max_seq_len)\n train_loader = self.loader.build_dataloader(\n train_dataset, 'train')\n\n test_dataset = self.loader.build_dataset(\n test_data,\n self.args.eval_max_seq_len)\n test_loader = self.loader.build_dataloader(\n test_dataset, 'test')\n\n if self.args.use_dev:\n dev_dataset = self.loader.build_dataset(\n dev_data,\n self.args.eval_max_seq_len)\n dev_loader = self.loader.build_dataloader(\n dev_dataset, 'dev')\n return train_loader, dev_loader, test_loader\n else:\n return train_loader, test_loader", "def loadData(self):\n batch_size = 256\n \n #if self.conv_sg == True:\n # batch_size = 1 \n \n download = True\n root = self.root + self.dataset\n if self.dataset == \"MNIST\": \n transform = transforms.Compose([transforms.ToTensor(), transforms.Normalize((0.1307,), (0.3081,))])\n trainset = torchvision.datasets.MNIST(root, train=True, download=download, transform=transform)\n testset = torchvision.datasets.MNIST(root, train=False, download=download, transform=transform)\n \n if self.dataset == \"CIFAR10\":\n transform = transforms.Compose([transforms.ToTensor(), transforms.Normalize((0.4914, 0.4822, 0.4465,), (0.2023, 0.1994, 0.2010,))])\n trainset = torchvision.datasets.CIFAR10(root, train=True, download=download, transform=transform)\n testset = torchvision.datasets.CIFAR10(root, train=False, download=download, transform=transform)\n \n if self.dataset == \"CIFAR100\":\n transform = transforms.Compose([transforms.ToTensor()])\n trainset = torchvision.datasets.CIFAR100(root, train=True, download=download, transform=transform)\n testset = torchvision.datasets.CIFAR100(root, train=False, download=download, transform=transform)\n \n \n trainloader = torch.utils.data.DataLoader(trainset, batch_size = batch_size,\n shuffle=False, num_workers=0, pin_memory = False)\n \n testloader = torch.utils.data.DataLoader(testset, batch_size= batch_size,\n shuffle=False, num_workers=2, pin_memory = False)\n \n return trainloader, testloader", "def initSets(self):\n data_frame = pd.read_csv(self.train_file, header=None)\n data_frame = data_frame.drop(columns=self.drop_cols)\n features = data_frame.iloc[:, :-1].values\n labels = data_frame.iloc[:, -1].values\n if self.test_file is None:\n self.train_feat, self.test_feat, self.train_labl, self.test_labl = train_test_split(features, labels, test_size=self.test_size)\n else:\n data_frame = pd.read_csv(self.test_file, header=None)\n data_frame = data_frame.drop(columns=self.drop_cols)\n self.train_feat, _, self.train_labl, _ = train_test_split(features, labels, test_size=self.test_size)\n features = data_frame.iloc[:, :-1].values\n labels = data_frame.iloc[:, -1].values\n _, self.test_feat, _, self.test_labl = train_test_split(features, labels, test_size=self.test_size)\n # kfold = KFold(n_splits=3)\n # self.train_index, self.test_index = kfold.split(features,labels)", "def initialize(self):\r\n if not self.context:\r\n self.context = SQLContext(self.url, self.connection, self.schema)\r\n if self.table is None:\r\n self.table = self.context.table(self.table_name)\r\n if not self.fields:\r\n self.read_fields()\r\n self.field_names = self.fields.names()", "def init_from_data(self, data):\n self.data = data\n self.norm_data()", "def __init__(self, data_path=root.joinpath(\"data\")):\n self.data_path = data_path", "def __init__(self, dataset, batch_size, n_threads=4,\n\t ten_crop=False, data_path='/home/dataset/', logger=None):\n\t\tself.dataset = dataset\n\t\tself.batch_size = batch_size\n\t\tself.n_threads = n_threads\n\t\tself.ten_crop = ten_crop\n\t\tself.data_path = data_path\n\t\tself.logger = logger\n\t\tself.dataset_root = data_path\n\t\t\n\t\tself.logger.info(\"|===>Creating data loader for \" + self.dataset)\n\t\t\n\t\tif self.dataset in [\"cifar100\"]:\n\t\t\tself.train_loader, self.test_loader = self.cifar(\n\t\t\t\tdataset=self.dataset)\n\n\t\telif self.dataset in [\"cifar10\"]:\n\t\t\tself.train_loader, self.test_loader = self.cifar(\n dataset=self.dataset)\n\t\t\n\t\telif self.dataset in [\"imagenet\"]:\n\t\t\tself.train_loader, self.test_loader = self.imagenet(\n\t\t\t\tdataset=self.dataset)\n\t\telse:\n\t\t\tassert False, \"invalid data set\"", "def _initialize_data(self):\n self.unique_id = 123\n\n self.gas_valve_open = False\n self.buffer_valve_open = False\n self.pump_valve_open = False\n\n self.operatingmode = 0\n\n self.sample_pressure_high_limit = 100\n self.sample_pressure_low_limit = 10\n self.sample_pressure = 0\n\n self.error = 0\n\n self.buffer_pressure_high = True", "def init_loaders(self, *args, **kwargs):\n\n # Convert the data to Dataset\n dataset_dict = self.init_datasets(*args, **kwargs)\n\n # If the Dataset implements collate_fn, that is used. Otherwise, default_collate is used\n if hasattr(dataset_dict[\"train\"], \"collate_fn\") and callable(\n getattr(dataset_dict[\"train\"], \"collate_fn\")\n ):\n collate_fn = dataset_dict[\"train\"].collate_fn\n else:\n collate_fn = default_collate\n\n # If 'iters_per_epoch' is defined, then a fixed number of random sample batches from the training set\n # are drawn per epoch.\n # Otherwise, an epoch is defined by a full run through all of the data in the dataloader.\n #\n if self.config_dict.get(\"iters_per_epoch\") is not None:\n num_samples = (\n self.config_dict[\"iters_per_epoch\"] * self.config_dict[\"batch_size\"]\n )\n loaders_dict = {}\n for key in dataset_dict.keys():\n if key == \"train\":\n loaders_dict[key] = DataLoader(\n dataset_dict[key],\n batch_sampler=BatchSampler(\n RandomSampler(\n dataset_dict[key],\n replacement=True,\n num_samples=num_samples,\n ),\n batch_size=self.config_dict[\"batch_size\"],\n drop_last=False,\n ),\n collate_fn=collate_fn,\n )\n else:\n loaders_dict[key] = DataLoader(\n dataset_dict[key],\n batch_size=self.config_dict[\"batch_size\"],\n collate_fn=collate_fn,\n )\n else:\n loaders_dict = {\n key: DataLoader(\n dataset_dict[key],\n batch_size=self.config_dict[\"batch_size\"],\n collate_fn=collate_fn,\n )\n for key in data_dict.keys()\n }\n\n return loaders_dict", "def _load_dataset(self, split, align, partition):\n\n if partition == 'all':\n self._image_list = self._face.image_list + self._clothes.image_list\n celeba_num = self._face.num_images\n deepfashion_num = self._clothes.num_images\n elif partition == 'face':\n self._image_list = self._face.image_list\n celeba_num = self._face.num_images\n deepfashion_num = 0\n elif partition == 'clothes':\n self._image_list = self._clothes.image_list\n celeba_num = 0\n deepfashion_num = self._clothes.num_images\n\n self._gtdb = {'attr': -1.*np.ones((self.num_images, self.num_classes), dtype=np.float64)}\n\n # load labels for celeba images if they are included. \n if celeba_num > 0:\n self._gtdb['attr'][:celeba_num, self._face_class_idx] = self._face.gtdb['attr']\n # load soft labels for clothes attributes on celeba\n if align:\n fn = osp.join(self.data_path, 'person_'+'face'+'_'+split+'_align.pkl')\n else:\n fn = osp.join(self.data_path, 'person_'+'face'+'_'+split+'.pkl') \n if osp.exists(fn):\n if partition == 'all':\n with open(fn, 'rb') as fid:\n labels = cPickle.load(fid)\n self._gtdb['attr'][:celeba_num, self._clothes_class_idx] = labels\n else:\n 'Dataset {}: Labels for clothes attributes on CelebA are not loaded, the partition is not \"all\"'.format(self.name)\n else:\n print 'Dataset {}: Labels for clothes attributes on CelebA are not available! Missing filename: {}. Did you forget to run load_person.py first?'.\\\n format(self.name, fn)\n\n # load labels for deepfashion images if they are included.\n if deepfashion_num > 0:\n self._gtdb['attr'][celeba_num:, self._clothes_class_idx] = self._clothes.gtdb['attr']\n # load soft labels for face attributes on deepfashion\n fn = osp.join(self.data_path, 'person_'+'clothes'+'_'+split+'.pkl')\n if osp.exists(fn):\n if partition == 'all':\n with open(fn, 'rb') as fid:\n labels = cPickle.load(fid)\n self._gtdb['attr'][celeba_num:, self._face_class_idx] = labels\n else:\n 'Dataset {}: Labels for face attributes on Deepfashion are not loaded, the partition is not \"all\"'.format(self.name)\n else:\n print 'Dataset {}: Labels for face attributes on Deepfashion are not available! Missing filename: {}. Did you forget to run load_person.py first?'.\\\n format(self.name, fn)", "def refresh_train_dataset(self):\n inputs_id, inputs_starts, inputs_paths, inputs_ends, inputs_label = self.build_data(self.reader, self.train_items, self.option.max_path_length)\n self.train_dataset = CodeDataset(inputs_id, inputs_starts, inputs_paths, inputs_ends, inputs_label)", "def _load_training_data(self):\n self._save_training_data()", "def initialize(self):\n self.initialize_edges()\n self.initialize_prob()\n self.initialize_total_input_dict()\n\n self.initialize_fpmusigv_dict()", "def __init__(self, dataset_path):\n super(TorchData, self).__init__()\n self.dataset = h5py.File(dataset_path, 'r')\n self.bg = self.dataset['bg']\n self.vocal = self.dataset['vocal']\n self.mix = self.dataset['mix']\n self.len = self.bg.shape[0]", "def __init__(self, directory):\n self._path = os.path.join(\"../../datasets\", directory)\n self.airlines = pd.read_csv(os.path.join(self._path, 'airlines.csv'))\n self.airports = pd.read_csv(os.path.join(self._path, 'airports.csv'))\n self.planes = pd.read_csv(os.path.join(self._path, 'planes.csv'))\n self.countries = pd.read_csv(os.path.join(self._path, 'countries.csv'))\n self.routes = pd.read_csv(os.path.join(self._path, 'routes.csv'))\n self._CreateGraph()", "def _make_data(self):\n pdf_datasets_all = make_pdf_datasets(self.pdf_list, self.xlims, self.ylims, self.tlims, self.dims, 9)\n self.pdf_dataset = np.concatenate(pdf_datasets_all, axis = 0)\n self.PDE_dataset = make_PDE_dataset(self.num_collocation, self.xlims, self.ylims, self.tlims, self.dims)\n self.BC_dataset = make_BC_dataset(self.num_BC, self.xlims, self.ylims, self.tlims, self.dims)", "def __init__(self, **kwargs):\n DataLoader.__init__(self, **kwargs)\n\n self.init_db()\n \n self._query_names_['totals'] = 'report_campaign_totals'\n self._query_names_['times'] = 'report_campaign_times'\n self._query_names_[FDH._TESTTYPE_BANNER_] = 'report_campaign_banners'\n self._query_names_[FDH._TESTTYPE_LP_] = 'report_campaign_lps'\n self._query_names_[FDH._TESTTYPE_BANNER_LP_] = 'report_campaign_bannerlps'\n \n self._query_type_ = kwargs['query_type']", "def __init__(self):\n self.data_file = ''\n self.data = pd.DataFrame()\n self.labels = pd.DataFrame()\n self.labels_onehot = pd.DataFrame()\n self.df = pd.DataFrame()\n self.df_perm = pd.DataFrame() # for debug purpose\n self.n_samples = 0\n self.n_features = 0\n self.label_dict = {}\n self.inv_label_dict = {}\n self.n_classes = 0\n self.batch_size = 0\n self.n_batch = 0\n self.current_batch_idx = 0\n self.true_distribution = []", "def __init__(self, dataset_name, teacher_model, students_model):\n self.data_manager = DataManager(dataset_name)\n self.dataset_name = dataset_name\n self.teacher_model = teacher_model\n self.student_model = students_model", "def load_data(self):\n if self.debug:\n print(\"Loading data\")", "def __init__(self):\n \n self.load_PSF_data()", "def __init__(self, dataloader):\n self._dataloader = dataloader\n\n self._iterator = iter(self._dataloader)", "def setup(self, stage: Union[str, None] = None) -> None:\n self.data_splits = {}\n # set up each of the dataset splits\n for key, path in self.paths.items():\n self.data_splits[key] = self.dataset_class(path)", "def __init__(self):\n self._distance_data = []\n self._location_data = []\n self._package_data = []", "def _init_dataset(self):\n chars = set()\n with open(self.file_path + \"/words.txt\", 'r') as input_file:\n for line in input_file:\n line_split = line.strip().split('\\t')\n file_name = self.file_path+\"/words/\"+line_split[1]\n gt_text = line_split[0]\n chars = chars.union(set(list(gt_text)))\n self.samples.append((file_name, gt_text))\n input_file.close()\n\n self.char_set = sorted(list(chars))", "def __init__(self):\n\n self.current_path = os.getcwd()\n self.data_path = self.current_path + \"/data\"\n\n self.original_files = {}\n self.imitation_files = {}\n self.original_test_files = {}\n self.imitation_test_files = {}\n\n self.training_set = None\n self.original_test_set = None\n self.imitation_test_set = None\n\n self.accuracy = 0.\n self.threshold = 0.\n\n self.get_files()", "def build_dataset(self):\n print(\"reading data of images currently , please wait......\")\n x_train, y_train, _ = get_images(self.train_directory)\n x_test, y_test, _ = get_images(self.test_directory)\n x_train, y_train = image_subset(self.num_classes, x_train, y_train)\n x_test, y_test = image_subset(self.num_classes, x_test, y_test)\n x_train = x_train.astype('float32')\n x_test = x_test.astype('float32')\n self.x_train = x_train / 255\n self.x_test = x_test / 255\n self.y_train = utils.to_categorical(y_train, self.num_classes)\n self.y_test = utils.to_categorical(y_test, self.num_classes)" ]
[ "0.76784205", "0.757365", "0.7362059", "0.7325153", "0.7320442", "0.72897947", "0.720113", "0.7199336", "0.7177944", "0.7079859", "0.705883", "0.7033235", "0.7033188", "0.6956903", "0.69027686", "0.6867748", "0.68586814", "0.6850223", "0.68302983", "0.6828051", "0.6822883", "0.6822728", "0.68213457", "0.6788598", "0.67676085", "0.67556787", "0.675236", "0.6742196", "0.67290014", "0.672894", "0.6717624", "0.67050344", "0.6679376", "0.66749513", "0.6674735", "0.66727763", "0.6665771", "0.66527116", "0.66479236", "0.66308695", "0.6606026", "0.6603607", "0.65759003", "0.6560324", "0.6554468", "0.65543485", "0.6553759", "0.65481395", "0.6527734", "0.65274996", "0.6525997", "0.6518341", "0.6511697", "0.650746", "0.6505691", "0.65027934", "0.65027934", "0.64932746", "0.6491598", "0.6489366", "0.6482915", "0.64826626", "0.6480744", "0.6474175", "0.64644736", "0.6460897", "0.6442308", "0.6432283", "0.64321107", "0.6424559", "0.64168537", "0.64114267", "0.6405278", "0.6404562", "0.6400427", "0.6396778", "0.6393507", "0.6390667", "0.63815016", "0.6366647", "0.6366536", "0.63501155", "0.63403654", "0.63336533", "0.63251734", "0.631963", "0.6317961", "0.6316837", "0.6314378", "0.6311377", "0.6297586", "0.6291488", "0.6288581", "0.62807953", "0.6279806", "0.62728065", "0.6268041", "0.6264303", "0.626026", "0.6258268", "0.6255105" ]
0.0
-1
Split data into training and validation set.
def _split_data(self): # Set training data self.train_data = torchvision.datasets.ImageFolder( os.path.join(self.path, 'train'), transform=self._transform() ) self.classes = self.train_data.classes # Set validation data self.val_data = torchvision.datasets.ImageFolder( os.path.join(self.path, 'test'), transform=self._transform(train=False) )
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def split_data_into_training_and_validation(self, data):\n training_dataset = self.get_data_from_indices(data, np.arange(self.num_training_samples))\n validation_dataset = self.get_data_from_indices(data, np.arange(self.num_training_samples,\n self.p.trainer.num_samples))\n return training_dataset, validation_dataset", "def __split_dataset(self):\n self.train, self.valid, _, _ = train_test_split(self.data, self.data, test_size=0.2)\n self.valid, self.test, _, _ = train_test_split(self.valid, self.valid, test_size=0.5)", "def split_data_into_train_and_test(raw_training_data):\n train_set, test_set = train_test_split(raw_training_data, test_size=0.2, random_state=42)\n return train_set, test_set", "def DataSplit(self, data):\n train_X,test_X,train_y,test_y=train_test_split(data[0],data[1], random_state=2)\n valid_X,valid_y=train_test_split(data[0],data[1],random_state=2,test_size=0.15)[1],train_test_split(data[0],data[1],random_state=2,test_size=0.15)[3]\n return (train_X,test_X,valid_X,train_y,test_y,valid_y)", "def split_data(self):\n X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=7)\n\n return X_train, X_test, y_train, y_test", "def split_data(self, data):\n\n train_df, test_df = train_test_split(data, test_size=self.test_size, \n random_state=0, \n stratify=data[self.outcome_name])\n\n # print(\"Splitting data into training with \", train_df.shape, \"sampes and \",\n # test_df.shape, \"testing samples\")\n\n return train_df, test_df", "def split_data(self):\n self.train, self.val, self.test_x, self.test_y = [], [], [], []\n train_size = self.horizon\n # This assumes all countries have the same length.\n # The minus two gives space for the validation and test sets as they will overshoot.\n k_folds = len(self.countries[0].data)//self.horizon - 2\n for _ in range(k_folds):\n tr, v, te_x, te_y = self.cross_validate(train_size)\n self.train.append(tr), self.val.append(v), self.test_x.append(te_x), self.test_y.append(te_y)\n train_size += self.horizon", "def train_test_split(data, validate_size=0.3):\r\n\r\n split = len(data) * (1 - validate_size)\r\n split = int(split)\r\n train = data[:split]\r\n validate = data[split:]\r\n\r\n return train, validate", "def split_data(self):\n if not self.load_data:\n raise AttributeError('Preprocessor has not loaded any data.')\n \n # 3 - Find example counts for each set\n self.n_examples = self.data[0].shape[0]\n self.n_train = int(self.n_examples * self.train_ratio)\n self.n_val = int(self.n_examples * self.val_ratio)\n self.n_test = self.n_examples - self.n_train - self.n_val\n \n logger.info(f'Set sizes:')\n logger.info(f'train: {self.n_train}')\n logger.info(f'val: {self.n_val}')\n logger.info(f'test: {self.n_test}')\n if self.n_test < 0:\n raise ValueError('Train + validation ratios must bef < 1')\n\n # 4 - Separate data into train, test, val\n if isinstance(self.data[0], pd.DataFrame):\n logger.info('Dataset is in a dataframe.')\n self.isdataframe = True\n\n self.train_data = [self.data[0].iloc[:self.n_train],\n self.data[1].iloc[:self.n_train]]\n \n self.val_data = [self.data[0].iloc[self.n_train:self.n_val + self.n_train],\n self.data[1].iloc[self.n_train:self.n_val + self.n_train]]\n \n self.test_data = [self.data[0].iloc[self.n_val + self.n_train:],\n self.data[1].iloc[self.n_val + self.n_train:]]\n logger.info('Data was split into train, val, test.')\n else:\n self.isdataframe = False\n logger.info('Dataset is in a numpy array.')\n \n # If datasets are numpy array or sparse\n self.train_data = [self.data[0][:self.n_train],\n self.data[1][:self.n_train]]\n \n self.val_data = [self.data[0][self.n_train:self.n_val + self.n_train],\n self.data[1][self.n_train:self.n_val + self.n_train]]\n \n self.test_data = [self.data[0][self.n_val + self.n_train:],\n self.data[1][self.n_val + self.n_train:]]\n logger.info('Data was split into train, val, test.')\n \n assert(self.n_train == self.train_data[0].shape[0])\n assert(self.n_val == self.val_data[0].shape[0])\n assert(self.n_test == self.test_data[0].shape[0])\n \n # Free memory\n del self.data\n \n if self.save_sets:\n self.save_datasets()", "def data_split(X, y):\n folds = KFold(n_splits=SPLITS, shuffle=True, random_state=RANDOM_STATE)\n train_indices, validation_indices = list(folds.split(X))[-1][0], list(folds.split(X))[-1][1]\n\n X_train = X.iloc[train_indices]\n X_validation = X.iloc[validation_indices]\n\n y_train = y.iloc[train_indices]\n y_validation = y.iloc[validation_indices]\n\n return X_train, X_validation, y_train, y_validation", "def train_val_test_split(data):\n raise NotImplementedError", "def split_dataset(self, test_size=0.20):\n\t\t(self.training_data, self.test_data, self.training_labels, self.test_labels) = train_test_split(self.training_data, self.training_labels, test_size=test_size)", "def split_data(self, val_size=0.1, test_size=0.5):\n df = pd.read_csv(self.csv_path, delimiter=';')\n train, val = train_test_split(df, test_size=val_size)\n val, test = train_test_split(df, test_size=test_size)\n return train, val, test", "def split_data(data, test_size):\r\n ntest = int(round(len(data) * (1 - test_size)))+1\r\n \r\n train, test = data[:ntest], data[ntest:]\r\n \r\n return train,test", "def split_data(train, parameters):\n labels = train.labels\n train_indices, val_indices = train_test_split(range(len(labels)),\n stratify=labels,\n random_state=parameters['seed'],\n test_size=parameters['validation_size'])\n return train_indices, val_indices", "def split_data(X, y, test_size, random_state):\n X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=test_size, random_state=random_state)\n\n return X_train, X_test, y_train, y_test", "def splitData(X, y):\n X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.25, random_state=0)\n print(X_train)\n print(y_train)\n print(X_test)\n print(y_test)\n return X_train, X_test, y_train, y_test", "def split_dataset(self, split):\n trunk_pos_size = math.ceil((1 - split) * len(self.Pos))\n trunk_neg_size = math.ceil((1 - split) * len(self.Neg))\n trunk_num = int(1 / (1 - split))\n pos_temp = list()\n neg_temp = list()\n for index in range(trunk_num):\n pos_temp.append(self.Pos[index * trunk_pos_size:(index + 1) *\n trunk_pos_size])\n neg_temp.append(self.Neg[index * trunk_neg_size:(index + 1) *\n trunk_neg_size])\n self.test = pos_temp.pop(2) + neg_temp.pop(2)\n # self.train = [i for item in pos_temp + neg_temp for i in item]\n self.train = []\n for item in pos_temp + neg_temp:\n for i in item:\n self.train.append(i)\n\n random.shuffle(self.train)\n random.shuffle(self.test)", "def split_train_and_validation(whole_train_data, whole_train_labels, validation_index, k_fold):\n dimension = whole_train_data.shape[1]\n train_data_chunks = np.array_split(whole_train_data, k_fold)\n train_label_chunks = np.array_split(whole_train_labels, k_fold)\n validation_data = train_data_chunks[validation_index]\n validation_labels = train_label_chunks[validation_index]\n train_data = np.delete(train_data_chunks, validation_index, 0)\n train_data = train_data.reshape((-1, dimension))\n train_labels = np.delete(train_label_chunks, validation_index, 0)\n train_labels = train_labels.flatten()\n return train_data, train_labels, validation_data, validation_labels", "def split_data(basedir, data_split=0.80):\n manip = data_manipulator(basedir)\n manip.train_test_split(data_split=data_split)", "def split_test_and_train_data(self, test_size=0.3, view=0):\n X_train, X_test, y_train, y_test = train_test_split(self.data[view], self.class_data, test_size=test_size)\n return X_train, X_test, y_train, y_test", "def split_data(data, labels):\r\n # Split the data into train and test\r\n X_train, X_test, y_train, y_test = train_test_split(data, labels, test_size=0.30, random_state = 42)\r\n return(X_train, y_train, X_test, y_test)", "def split_train_val(X, y, train_size):\n\n total_size = len(X)\n # shuffle data\n shuffle_indices = np.random.permutation(np.arange(total_size))\n X = X[shuffle_indices]\n y = y[shuffle_indices]\n\n # split training data\n train_indices = np.random.choice(total_size, train_size, replace=False)\n X_train = X[train_indices]\n y_train = y[train_indices]\n\n # split validation data\n val_indices = [i for i in xrange(total_size) if i not in train_indices]\n X_val = X[val_indices]\n y_val = y[val_indices]\n\n return X_train, y_train, X_val, y_val", "def split_data(self, model_data, tuning=True):\n pass", "def split_data(X_data, y_data):\n return cv.train_test_split(X_data, y_data, test_size=0.1, random_state=0)", "def data_split(data, labels, train_ratio=0.5, rand_seed=42):\n\n assert 0 <= train_ratio <= 1, \"Error: training set ratio must be between 0 and 1\"\n\n x_train, x_temp, y_train, y_temp = train_test_split(data,\n labels,\n train_size=train_ratio,\n random_state=rand_seed)\n\n x_val, x_test, y_val, y_test = train_test_split(x_temp,\n y_temp,\n train_size=0.5,\n random_state=rand_seed)\n\n return x_train, x_val, x_test, y_train, y_val, y_test", "def split_data(data, labels, val_size):\n # Shuffle index\n index = np.random.permutation(len(data))\n\n # Split into Datasets\n X_val = data[index][-val_size:]\n X_train = data[index][:-val_size]\n y_val = labels[index][-val_size:].ravel()\n y_train = labels[index][:-val_size].ravel()\n\n return X_train, X_val, y_train, y_val", "def TrainTestSplit(self,X, y):\n X_train, X_test, y_train, y_test = train_test_split(X, y, \n test_size=0.33,\n random_state=42)\n return X_train, X_test, y_train, y_test", "def perform_data_split(X, y, training_idxs, test_idxs, val_idxs):\n X_train = X[training_idxs]\n X_test = X[test_idxs]\n #X_val = X[val_idxs]\n\n y_train = y[training_idxs]\n y_test = y[test_idxs]\n #y_val = y[val_idxs]\n\n return X_train, X_test, y_train, y_test,", "def split_training_and_validation_data(x, y, sample_weights, validation_split):\n if has_symbolic_tensors(x):\n raise ValueError('If your data is in the form of symbolic tensors, '\n 'you cannot use `validation_split`.')\n if hasattr(x[0], 'shape'):\n split_at = int(x[0].shape[0] * (1. - validation_split))\n else:\n split_at = int(len(x[0]) * (1. - validation_split))\n x, val_x = (generic_utils.slice_arrays(x, 0, split_at),\n generic_utils.slice_arrays(x, split_at))\n y, val_y = (generic_utils.slice_arrays(y, 0, split_at),\n generic_utils.slice_arrays(y, split_at))\n if sample_weights:\n sample_weights, val_sample_weights = (\n generic_utils.slice_arrays(sample_weights, 0, split_at),\n generic_utils.slice_arrays(sample_weights, split_at),\n )\n else:\n val_sample_weights = None\n return x, y, sample_weights, val_x, val_y, val_sample_weights", "def split_data(dataset_x, dataset_y, split_ratio):\n num_examples = len(dataset_x)\n training_x = dataset_x[:int(num_examples*split_ratio)]\n training_y = dataset_y[:int(num_examples*split_ratio)]\n\n validation_x = dataset_x[int(num_examples*split_ratio): num_examples]\n validation_y = dataset_y[int(num_examples*split_ratio): num_examples]\n\n training_y = np.asarray(training_y, dtype='float32')\n validation_y = np.asarray(validation_y, dtype='float32')\n return training_x, training_y, validation_x, validation_y", "def split_data(X:np.ndarray, y:np.ndarray) -> (np.ndarray, np.ndarray, np.ndarray, np.ndarray):\n \n X_train, X_val, y_train, y_val = train_test_split(X, y, test_size=0.20, random_state=42, stratify=y)\n \n return X_train, X_val, y_train, y_val", "def train_val_split(self):\n idx = np.arange(self.num_data)\n np.random.shuffle(idx)\n val_num = int(self.ratio * self.num_data)\n dev_num = int(self.dev_ratio * self.num_data)\n self.num_train = self.num_data - val_num\n\n self.val_data = self.data[idx[:val_num]]\n self.val_label = self.label[idx[:val_num]]\n \n self.train_data = self.data[idx[val_num:]]\n self.train_label = self.label[idx[val_num:]]\n\n self.dev_data = self.data[idx[:dev_num]]\n self.dev_label = self.label[idx[:dev_num]]", "def split_data(self):\n np.random.seed(seed=self.seed)\n indices = np.random.permutation(self.predictor_vars.shape[0])\n split_row = round(self.predictor_vars.shape[0] * self.train_split)\n train_idx, test_idx = indices[:split_row], indices[split_row:]\n self.predictor_vars_train, self.predictor_vars_test = (\n self.predictor_vars[train_idx, :],\n self.predictor_vars[test_idx, :],\n )\n self.response_var_train, self.response_var_test = (\n self.response_var[train_idx],\n self.response_var[test_idx],\n )", "def split_data(dataset, test_size=0.5):\n shuffled_data = np.random.RandomState(seed=721).permutation(dataset)\n train_set = shuffled_data[: int(len(dataset) * (1 - test_size)), :]\n test_set = shuffled_data[int(len(dataset) * (1 - test_size)):, :]\n return train_set, test_set", "def train_test_split(dataset, split):\r\n train = list()\r\n train_size = split * len(dataset)\r\n dataset_copy = list(dataset) \r\n while len(train) < train_size:\r\n index = randrange(len(dataset_copy))\r\n train.append(dataset_copy.pop(index))\r\n return train, dataset_copy", "def prepare_train_validation(self) -> Tuple:\n Xt, Xv, Yt, Yv = self.dataset.train_test_split_representations()\n\n Xt = self.dataset.prepare_input_samples(Xt)\n Yt = self.dataset.prepare_output_samples(Yt)\n traindataset = tf.data.Dataset.from_tensor_slices((Xt, Yt))\n traindataset = traindataset.batch(\n self.batch_size,\n num_parallel_calls=tf.data.experimental.AUTOTUNE\n )\n\n Xv = self.dataset.prepare_input_samples(Xv)\n Yv = self.dataset.prepare_output_samples(Yv)\n validdataset = tf.data.Dataset.from_tensor_slices((Xv, Yv))\n validdataset = validdataset.batch(\n self.batch_size,\n num_parallel_calls=tf.data.experimental.AUTOTUNE\n )\n\n return traindataset, validdataset", "def split_data(self,test=False):\n shuffle_index = torch.randperm(self.train_target.shape[0])\n load = shuffle_index.shape[0]\n train_input_shuffle = self.train_input[shuffle_index]\n train_target_shuffle = self.train_target[shuffle_index]\n train_classes_shuffle = self.train_classes[shuffle_index]\n index_train = self.index_for_equal_class(train_target_shuffle[:load//2])\n train_input = train_input_shuffle[index_train]\n train_target = train_target_shuffle[index_train]\n train_classes = train_classes_shuffle[index_train]\n if not test:\n index_test = self.index_for_equal_class( train_target_shuffle[load//2:]) + load//2\n test_input = train_input_shuffle[index_test]\n test_target = train_target_shuffle[index_test]\n test_classes = train_classes_shuffle[index_test]\n else:\n index_test = self.index_for_equal_class(self.test_target)\n test_input = self.test_input[index_test]\n test_target = self.test_target[index_test]\n test_classes = self.test_classes[index_test]\n train_input, mean, std = normalize(train_input)\n test_input, _, _ = normalize(test_input,mean,std)\n return train_input, train_target, train_classes ,test_input ,test_target ,test_classes", "def split_data(test_data, split_ratio):\n split_index = int(split_ratio * len(test_data))\n \n # randomly permute the values in place\n random.shuffle(test_data)\n \n # take slices of the determined size\n training_set = copy.copy(test_data[:split_index])\n test_data = copy.copy(test_data[split_index:])\n\n return training_set, test_data", "def test_split_data():\n from parrot import process_input_data as pid\n\n data_file = os.path.abspath(\"../data/seq_class_dataset.tsv\")\n train, val, test = pid.split_data(data_file, datatype='sequence',\n problem_type='classification', num_classes=3)\n\n assert (len(train) == 210) and (len(val) == 45) and (len(test) == 45) and (len(train[0]) == 3)", "def split_dataset(dataset, test_size):\n train_data = dataset.skip(test_size).shuffle(SHUFFLE_BUFFER_SIZE)\n train_data = train_data.padded_batch(BATCH_SIZE)\n \n test_data = dataset.take(test_size)\n test_data = test_data.padded_batch(BATCH_SIZE)\n \n return train_data, test_data", "def train_validation_test_split(\n data: pd.DataFrame,\n target: str,\n val_partition: float = 0.2,\n test_partition: float = 0.15\n) -> list:\n\n assert val_partition + test_partition < 1.0\n\n val_samples = val_partition * data.shape[0]\n test_samples = test_partition * data.shape[0]\n\n train_validation, test = train_test_split(\n data, test_size=int(test_samples), stratify=data[target]\n )\n\n train, validation = train_test_split(\n train_validation, test_size=int(val_samples), stratify=train_validation[target]\n )\n\n return [train, validation, test]", "def split_dataset(X: np.array, y: np.array, ratio=0.8):\n '''split dataset to train data and valid data'''\n X_train = X[:int(X.shape[0] * ratio)]\n y_train = y[:int(y.shape[0] * ratio)]\n X_valid = X[int(X.shape[0] * ratio):]\n y_valid = y[int(y.shape[0] * ratio):]\n dataset = tuple([X_train, y_train, X_valid, y_valid])\n\n return dataset", "def split_dataset(dataset, train_percentage, valid_percentage):\n\n # Split dataset into train and test dataset\n train_x, test_x, train_y, test_y = train_test_split(dataset[:, :-1], dataset[:, -1],\n train_size=train_percentage + valid_percentage,\n test_size=1-(train_percentage + valid_percentage))\n\n valid_x = train_x[int(np.ceil(train_percentage * len(dataset))):]\n valid_y = train_y[int(np.ceil(train_percentage * len(dataset))):]\n\n return train_x, valid_x, test_x, train_y, valid_y, test_y", "def split_samples(data):\n\n training_samples = data[0:9497]\n test_samples = data[9497:11300]\n\n return training_samples, test_samples", "def split_data_crossvalid(data):\n X_trainfolder = []\n X_testfolder = []\n y_trainfolder = []\n y_testfolder = []\n data = data[data[:, 0].argsort()]\n number_one = np.count_nonzero(data[:, :1])\n data_one = data[np.where(data[:, 0] == 1)]\n data_zero = data[np.where(data[:, 0] == 0)]\n one_ratio = round(number_one / len(data), 1)\n one_zero_ratio = 1 - one_ratio\n batch_one = int(70 * one_ratio)\n batch_zero = int(70 * one_zero_ratio)\n batchs = len(data) // 70\n for i in range(batchs):\n test_one = data_one[i * batch_one:(i + 1) * batch_one, :]\n train_one = np.delete(data_one, test_one, axis = 0)\n test_zero = data_zero[i * batch_zero:(i + 1) * batch_zero, :]\n train_zero = np.delete(data_zero, test_zero, axis = 0)\n train_sets = np.concatenate((train_one, train_zero), axis=0)\n test_sets = np.concatenate((test_one, test_zero), axis=0)\n np.random.shuffle(train_sets)\n np.random.shuffle(test_sets)\n X_trainfolder.append(train_sets[:, 1:])\n y_trainfolder.append(train_sets[:, 0])\n X_testfolder.append(test_sets[:, 1:])\n y_testfolder.append(test_sets[:, 0])\n return X_trainfolder, y_trainfolder, X_testfolder, y_testfolder", "def split_dataset(dataset: tf.data.Dataset, validation_data_fraction: float):\n\n validation_data_percent = round(validation_data_fraction * 100)\n if not (0 <= validation_data_percent <= 100):\n raise ValueError(\"validation data fraction must be ∈ [0,1]\")\n\n dataset = dataset.enumerate()\n train_dataset = dataset.filter(lambda f, data: f % 100 > validation_data_percent)\n validation_dataset = dataset.filter(lambda f, data: f % 100 <= validation_data_percent)\n\n # remove enumeration\n train_dataset = train_dataset.map(lambda f, data: data)\n validation_dataset = validation_dataset.map(lambda f, data: data)\n\n return train_dataset, validation_dataset", "def split_dataset(dataset: tf.data.Dataset, validation_data_fraction: float):\n\n validation_data_percent = round(validation_data_fraction * 100)\n if not (0 <= validation_data_percent <= 100):\n raise ValueError(\"validation data fraction must be ∈ [0,1]\")\n\n dataset = dataset.enumerate()\n train_dataset = dataset.filter(lambda f, data: f % 100 > validation_data_percent)\n validation_dataset = dataset.filter(lambda f, data: f % 100 <= validation_data_percent)\n\n # remove enumeration\n train_dataset = train_dataset.map(lambda f, data: data)\n validation_dataset = validation_dataset.map(lambda f, data: data)\n\n return train_dataset, validation_dataset", "def split_dataset(dataset: tf.data.Dataset, validation_data_fraction: float):\n\n validation_data_percent = round(validation_data_fraction * 100)\n if not (0 <= validation_data_percent <= 100):\n raise ValueError(\"validation data fraction must be ∈ [0,1]\")\n\n dataset = dataset.enumerate()\n train_dataset = dataset.filter(lambda f, data: f % 100 > validation_data_percent)\n validation_dataset = dataset.filter(lambda f, data: f % 100 <= validation_data_percent)\n\n # remove enumeration\n train_dataset = train_dataset.map(lambda f, data: data)\n validation_dataset = validation_dataset.map(lambda f, data: data)\n\n return train_dataset, validation_dataset", "def split_data(train_split, src_dir, train_dir, test_dir, classes):\n for cls in classes:\n # get all dat files of this class\n data = get_instances_of_class(cls, src_dir)\n \n # how many of the data points are for training?\n train_count = round(len(data) * train_split / 100)\n \n # randomly choose indexes\n train_indexes = set()\n while len(train_indexes) < train_count:\n train_indexes.add(random.randrange(len(data)))\n \n # move all train_indexes to train_dir, others to test_dir\n COPY = lambda src, dst, filename:\\\n shutil.copy2(\n \"{}/{}\".format(src, data[i]),\n \"{}/{}\".format(dst, data[i])\n )\n \n for i in range(len(data)):\n if i in train_indexes:\n COPY(src_dir, train_dir, data[i])\n else:\n COPY(src_dir, test_dir, data[i])", "def split(self, X):\n # Make sure it's a sparse array...\n X = check_sparse_array(X)\n\n # Use np.linspace to evenly partition the space between 0 and 1 into\n # k + 1 pieces so we can use them as \"training_sizes\"\n train_sizes = np.linspace(0, 1, self.n_splits + 1)\n\n # We use a series of \"permuted values\" to mask out the training/testing\n # folds.\n random_state = check_random_state(self.random_state)\n values = _get_train_mask_linspace(X.nnz, random_state,\n shuffle=self.shuffle)\n\n # Iterate the fold space bounds in a generator, returning train/test\n for lower, upper in zip(train_sizes[:-1], train_sizes[1:]):\n test, train = _split_between_values(X, values, lower, upper)\n yield train, test", "def split(self,X,y=None):\n all_idx = pd.Series(np.arange(X.shape[0])) \n mbrg = int(X.shape[0]*self.embargo_pct)\n test_starts=[(i[0],i[-1]+1) for i in np.array_split(all_idx.values,self.n_splits)]\n for i, j in test_starts:\n t0 = all_idx.index[i] # start of test set\n test_indices = all_idx.values[i:j]\n maxT1Idx = all_idx.index.searchsorted(all_idx[test_indices].max())\n train_indices = all_idx.index.searchsorted(all_idx[all_idx<=t0].index)\n if maxT1Idx < X.shape[0]: \n train_indices=np.concatenate((train_indices,all_idx[maxT1Idx+mbrg:]))\n yield train_indices,test_indices", "def __train_test_splits(self):\n # By default, our indices are just 0-n\n split_indices = list(range(len(self.data)))\n # If shuffling, use our shared Random instance to shuffle our indices before slicing\n if self.shuffle:\n np.random.shuffle(split_indices)\n # Regardless of shuffle, take the first self.train_proportion for training, and the last\n # 1 - self.train_proportion records as test\n train_n = int(self.train_proportion * len(self.data))\n training_indices = split_indices[:train_n]\n test_indices = split_indices[train_n:]\n return training_indices, test_indices", "def data_splitting(data_features, data_targets, test_size):\n data_features_train, data_features_test, data_targets_train, data_targets_test = \\\n train_test_split(data_features,\n data_targets,\n test_size = test_size)\n\n return data_features_train, data_features_test, data_targets_train, data_targets_test", "def data_splitting(data_features, data_targets, test_size):\n data_features_train, data_features_test, data_targets_train, data_targets_test = \\\n train_test_split(data_features,\n data_targets,\n test_size = test_size)\n\n return data_features_train, data_features_test, data_targets_train, data_targets_test", "def split_data_set_splitted(X, Y):\n\n # Uso la funcion de scikitlearn para separar el data_set\n # Esta funcion por defecto mezcla los datos para asegurar la representacion\n # de los datos en los dos subconjuntos\n #\n # Blanca Cano Camarero me comenta que ponga el stratify = Y porque asi se lo\n # indica el profesor Pablo Mesejo en una consulta realizada. En la referencia\n # que indico de scikitlearn tambien viene documentado este parametro\n # Lo que hace es evitar que haya clases que queden infrarepresentadas\n X_training, X_test, Y_training, Y_test= train_test_split(X, Y, train_size = 0.75, test_size = 0.25, stratify = Y)\n return X_training, X_test, Y_training, Y_test", "def train_dev_test_split(data, train_pct=0.7):\n train_len, dev_len, test_len = create_split_bounds(len(data), train_pct)\n\n # Train (70%)\n train = data[0:train_len]\n\n # Dev (15%)\n dev_ub = (train_len + dev_len)\n dev = data[train_len:dev_ub]\n\n # Test (15%)\n test = data[dev_ub:]\n\n assert \"One of the sets contains an unexpected number of elements\", \\\n (len(train) == train_len and len(dev) == dev_len and len(test) == test_len)\n\n return train, dev, test", "def train_test_split_soy(soy_data):\n\n print('[ INFO ]: Creating training and testing set for soy data...')\n\n train_set_size = round(0.67*len(soy_data))\n soy_data['index'] = soy_data.index.tolist()\n\n # Set any record with an index less than 2/3 of the number of records\n # in the data frame to the training set\n train_set = soy_data[soy_data['index'] < train_set_size]\n train_set = train_set.drop('index', axis=1)\n\n # Assign the next 1/3 to the testing set\n test_set = soy_data[soy_data['index'] >= train_set_size]\n test_set = test_set.drop('index', axis=1)\n\n return train_set, test_set", "def _divide_into_test_train(\n self, test_size: int, train_size: int\n ) -> Tuple[pd.DataFrame, pd.DataFrame, pd.DataFrame, pd.DataFrame]:\n X_train, X_test, y_train, y_test = train_test_split(\n self.df.iloc[:, :-1],\n self.df.iloc[:, -1],\n test_size=test_size,\n train_size=train_size,\n )\n return X_train, X_test, y_train, y_test", "def split_dataset(dset: tf.data.Dataset, validation_data_fraction: float):\n\n validation_data_percent = round(validation_data_fraction * 100)\n if not (0 <= validation_data_percent <= 100):\n raise ValueError(\"validation data fraction must be ∈ [0,1]\")\n\n dset = dset.enumerate()\n train_dataset = dset.filter(lambda f, data: f % 100 > validation_data_percent)\n validation_dataset = dset.filter(lambda f, data: f % 100 <= validation_data_percent)\n\n # remove enumeration\n train_dataset = train_dataset.map(lambda f, data: data)\n validation_dataset = validation_dataset.map(lambda f, data: data)\n\n return train_dataset, validation_dataset", "def split(self, X, y=None, group=None):\n\n # Initiate loop variables\n trainset = []\n testset = []\n train_index = 0\n test_index = 0\n tsplit = self.startdate + self.traindur\n\n # Adjust start index to correspond to start date\n while self.dates[train_index] < self.startdate:\n train_index += 1\n\n n_pos = 0\n while tsplit + self.gap + self.testdur < self.enddate:\n # Set test index to correspond to appropriate date\n test_index = train_index\n while self.dates[test_index] < tsplit + self.gap:\n test_index += 1\n\n # Build training set\n while self.dates[train_index] < tsplit:\n trainset.append(train_index)\n train_index += 1\n\n # Build test set\n testset = []\n while self.dates[test_index] < tsplit + self.gap + self.testdur:\n testset.append(test_index)\n test_index += 1\n if y[test_index] == 1:\n n_pos += 1\n\n if self.debug:\n print(str(len(trainset)) + ' ' + str(len(testset)) + ' ' \\\n + str(n_pos) + ' ' + str(self.dates[test_index]))\n n_pos = 0\n\n # Loop update\n tsplit += self.update\n\n yield trainset, testset", "def split_dev(self):\n\t\tprint(\"Splitting test set into dev and test set\")\n\n\t\told_length = len(self.X[\"test\"])\n\t\tindices = list(range(old_length))\n\n\t\tnp.random.seed(0)\n\t\tnp.random.shuffle(indices)\n\t\t\n\t\tsplit = int(len(indices) * 0.5)\n\n\t\tsplit_indices = {\"test\": indices[:split], \"dev\": indices[split:]}\n\t\n\t\tfor dataset in (\"dev\", \"test\"):\n\t\t\tself.X[dataset] = [self.X[\"test\"][idx] for idx in split_indices[dataset]]\n\t\t\tself.Y[dataset] = [self.Y[\"test\"][idx] for idx in split_indices[dataset]]\n\t\t\tself.raw_documents[dataset] = [self.raw_documents[\"test\"][idx] for idx in split_indices[dataset]]\n\t\t\tself.tokenized_documents[dataset] = [self.tokenized_documents[\"test\"][idx] for idx in split_indices[dataset]]\n\t\t\n\t\tprint(\"Split test set with\", old_length, \"samples into\", len(self.X[\"test\"]), \"/\", len(self.X[\"dev\"]), \"samples\")", "def train_test_split(x, y, test_pct):\n data = zip(x, y)\n train, test = split_data(data, 1 - test_pct)\n x_train, y_train = zip(*train)\n x_test, y_test = zip(*test)\n return x_train, y_train, x_test, y_test", "def split(self, X=None, y=None, groups=None):\n\n for train_index in [0,1]:\n train_indices=np.where(self.test_fold==train_index)[0]\n test_indices=np.where(self.test_fold==(train_index+1)%2)[0]\n if self.shuffle:\n self.rng.shuffle(train_indices)\n self.rng.shuffle(test_indices)\n yield train_indices, test_indices", "def split_train_test_dev(self):\n for dir_name in (self.config.train_dir, self.config.dev_dir,\n self.config.test_dir):\n create_dir(dir_name)\n\n self.split_helper(self.config.parsed_train_file_pos, 'pos')\n self.split_helper(self.config.parsed_train_file_neg, 'neg')", "def split_train_validation_test(x, y, split):\n\n nsamples = x.shape[0]\n\n if y.shape[0] != nsamples:\n raise Exception('in split_train_validation_test, x has shape {}'.format(x.shape) +\n 'but y has shape {}'.format(y.shape) +\n 'First dimensions do not match')\n\n # make sure split array sums to 1\n split = np.asarray(split)\n split = split / split.sum()\n\n nsamples_train = int(split[0] * nsamples)\n nsamples_valid = int(split[1] * nsamples)\n\n # create a set of randomly shuffled indices\n indices = np.random.permutation(nsamples)\n\n idx_train = indices[:nsamples_train]\n idx_valid = indices[nsamples_train:nsamples_train+nsamples_valid]\n idx_test = indices[nsamples_train+nsamples_valid:]\n\n x_train = x[idx_train]\n y_train = y[idx_train]\n\n x_valid = x[idx_valid]\n y_valid = y[idx_valid]\n\n x_test = x[idx_test]\n y_test = y[idx_test]\n\n return x_train, y_train, x_valid, y_valid, x_test, y_test", "def split_data(paragraphs):\n para_count = len(paragraphs)\n training_index = int(para_count * 0.7)\n validation_index = int(para_count * 0.9)\n training_data = paragraphs[:training_index]\n validation_data = paragraphs[training_index:validation_index]\n test_data = paragraphs[validation_index:]\n return training_data, validation_data, test_data", "def split_data(x: np.array, y: np.array, test_size: float = 0.2,\n val_size: float = 0.2, random_state: int = None\n ) -> List[Tuple[np.array]]:\n\n train_x, test_x, train_y, test_y = \\\n train_test_split(x, y, test_size=test_size, random_state=random_state)\n\n train_x, val_x, train_y, val_y = \\\n train_test_split(train_x, train_y, test_size=val_size, random_state=random_state)\n\n return (train_x, train_y), (val_x, val_y), (test_x, test_y)", "def split_test_training(data_path, sequence_length):\n\n # logic for loading the CSV, using 'result' (2nd) column as basis for prediction\n with open(data_path) as f:\n record = csv.reader(f, delimiter=\",\")\n next(record, None)\n spat = []\n nb_of_values = 0\n for line in record:\n spat.append(float(line[2]))\n nb_of_values += 1\n\n # break file into chunks based on sequence length\n result = []\n for index in range(len(spat) - sequence_length):\n result.append(spat[index: index + sequence_length])\n result = np.array(result)\n\n # divide set into 20% for test, 80% for training\n row = int(round(0.8 * result.shape[0]))\n train = result[:row, :]\n np.random.shuffle(train)\n X_train = train[:, :-1]\n y_train = train[:, -1]\n X_test = result[row:, :-1]\n y_test = result[row:, -1]\n X_train = np.reshape(X_train, (X_train.shape[0], X_train.shape[1], 1))\n X_test = np.reshape(X_test, (X_test.shape[0], X_test.shape[1], 1))\n\n return [X_train, y_train, X_test, y_test]", "def make_split(data, target, test_size=0.3):\n train, test = train_test_split(data, test_size=test_size)\n x_train = train.drop(target, axis=1)\n y_train = train[target]\n x_test = test.drop(target, axis=1)\n y_test = test[target]\n return x_train, y_train, x_test, y_test", "def split_data(data,param):\n all_x = data[['GDP_scaled','CLPRB_scaled','EMFDB_scaled','ENPRP_scaled','NGMPB_scaled','PAPRB_scaled','PCP_scaled','ZNDX_scaled','OP_scaled', 'OP2_scaled']][:55]\n all_y = data[[param]][:55]\n return cross_validation.train_test_split(all_x, all_y, test_size=0.2, random_state=0)", "def split_data(data):\n testing_set = data.applymap(lambda x: 0)\n\n taken_courses_flat = data.stack().to_frame()\n taken_courses_flat = taken_courses_flat[taken_courses_flat[0] == 1]\n\n for student in taken_courses_flat.index.get_level_values('PersonID').unique():\n courses = taken_courses_flat.loc[student]\n for course in courses.sample(frac=0.2, replace=False).index:\n testing_set.loc[student, course] = 1\n training_set = data - testing_set\n\n # Numpifies the data\n train_np = training_set.apply(axis=1, func=lambda x: x.astype(int)).as_matrix()\n test_np = testing_set.apply(axis=1, func=lambda x: x.astype(int)).as_matrix()\n\n # the indices of each user\n users = np.array(np.arange(data.shape[0])[np.newaxis].T, dtype=np.int32)\n\n return train_np, test_np, users", "def split_train_val(self,ratio=.1):\n lim = int(np.ceil(len(self.train) * ratio))\n order = list(range(len(self.train)))\n np.random.shuffle(order)\n self.train_train = self.train.ix[order[lim:]]\n self.train_val = self.train.ix[order[:lim]]\n log(\"Split data into training/val: {} -> {} {}\".format(\n len(self.train),len(self.train_train),lim))", "def train_test_split(x, y, test_pct):\n\tdata = zip(x,y)\n\ttrain, test = split_data(data, 1 - test_pct)\n\tx_train, y_train = zip(*train)\n\tx_test, y_test = zip(*test)\n\treturn x_train, y_train, x_test, y_test", "def split_data(x, y, ratio, seed=1):\n # set seed\n np.random.seed(seed)\n # ***************************************************\n # INSERT YOUR CODE HERE\n # split the data based on the given ratio: TODO\n # ***************************************************\n \n def split_data(x, y, ratio, seed=1):\n \"\"\"split the dataset based on the split ratio.\"\"\"\n # set seed\n np.random.seed(seed)\n # ***************************************************\n # INSERT YOUR CODE HERE\n # split the data based on the given ratio: TODO\n # ***************************************************\n trainDataLen = round(len(y)*ratio)\n \n trainDataID = random.sample(range(len(y)), trainDataLen)\n \n # USing bool value to obtaint he remainling data for validation data set\n validDataID = np.array(range(len(y))) + 1\n validDataID[trainDataID] = 0\n validDataID = validDataID >0\n \n \n # obtain the trainning data\n trainDataX = x[trainDataID]\n trainDataY = y[trainDataID]\n \n # obtain the validation data\n validDataX = x[validDataID]\n validDataY = y[validDataID] \n \n return trainDataX,trainDataY, validDataX, validDataY\n \n #raise NotImplementedError", "def split_dataset(data_set, train_size, test_size):\n # Generate random indices without replacement, to make train and test sets disjoint\n rand_indices = np.random.choice(data_set.shape[0], train_size+test_size, replace=False)\n feature_end = data_set.shape[1] - 1\n output_location = feature_end\n feature_offset = var.ALGORITHM_INFO['feature_offset']\n\n # Define the training and testing matrices\n x_train = data_set[rand_indices[0:train_size], feature_offset:feature_end]\n y_train = data_set[rand_indices[0:train_size], output_location]\n x_test = data_set[rand_indices[train_size:train_size+test_size], feature_offset:feature_end]\n y_test = data_set[rand_indices[train_size:train_size+test_size], output_location]\n favorite_test = data_set[rand_indices[train_size:train_size+test_size], 0]\n\n # Normalize features, with maximum value in training set\n # as realistically, this would be the only possibility\n\n for ii in range(x_train.shape[1]):\n maxval = np.max(np.abs(x_train[:, ii]))\n if maxval > 0:\n x_train[:, ii] = np.divide(x_train[:, ii], maxval)\n x_test[:, ii] = np.divide(x_test[:, ii], maxval)\n\n\n # Add a column of ones; done after to avoid modifying entire data_set\n x_train = np.hstack((x_train, np.ones((x_train.shape[0], 1))))\n x_test = np.hstack((x_test, np.ones((x_test.shape[0], 1))))\n\n return (x_train, y_train), (x_test, y_test), favorite_test", "def split_dataset(dataset, train_percentage, feature_headers, target_header):\r\n\r\n train_x, test_x, train_y, test_y = train_test_split(dataset[feature_headers], dataset[target_header],\r\n train_size=train_percentage, random_state=42)\r\n return train_x, test_x, train_y, test_y", "def create_train_valid_set(self):\n\n if not self.eq_train:\n X_train_high_level, X_valid_high_level, X_train_low_level, X_valid_low_level, train_w, valid_w, y_train, y_valid = train_test_split(self.X_train_high_level, self.X_train_low_level, self.train_weights, self.y_train,\n train_size=0.7, test_size=0.3\n )\n else:\n X_train_high_level, X_valid_high_level, X_train_low_level, X_valid_low_level, train_w, valid_w, w_train_eq, w_valid_eq, y_train, y_valid = train_test_split(self.X_train_high_level, self.X_train_low_level,\n self.train_weights, self.train_weights_eq, self.y_train,\n train_size=0.7, test_size=0.3\n )\n self.train_weights_eq = w_train_eq\n\n #NOTE: might need to re-equalise weights in each folds as sumW_sig != sumW_bkg anymroe!\n self.train_weights = train_w\n self.valid_weights = valid_w #validation weights should never be equalised weights!\n\n print 'creating validation dataset'\n self.X_train_high_level = X_train_high_level\n self.X_train_low_level = self.join_objects(X_train_low_level)\n\n self.X_valid_high_level = X_valid_high_level\n self.X_valid_low_level = self.join_objects(X_valid_low_level)\n print 'finished creating validation dataset'\n\n self.y_train = y_train\n self.y_valid = y_valid", "def load_data(n_folds=10):\n\n sss = sssplit(n_splits=n_folds, test_size=.1, random_state=np.random.RandomState(830452))\n X, y = get_transformed_data()\n (X_train, y_train), (X_test, y_test) = ([],[]),([],[])\n\n for train_idx, test_idx in sss.split(X, y):\n X_train.append(X[train_idx])\n y_train.append(y[train_idx])\n X_test.append(X[test_idx])\n y_test.append(y[test_idx])\n\n return (X_train, y_train), (X_test, y_test)", "def make_splits(self):\n # produce fold/portion splits of the training indexes: these output indexes to the tr. indexes themselves\n if self.folds is not None:\n meta_trainval_idx = kfold_split(self.train_idx, self.folds, self.seed, self.labels, self.label_info)\n elif self.portion is not None:\n meta_trainval_idx = portion_split(self.train_idx, self.portion, self.seed, self.labels, self.label_info)\n else:\n meta_trainval_idx = [(np.arange(len(self.train_idx)), np.arange(0, dtype=np.int32))]\n # \"dereference\" the metaindexes to point to the data themselves\n self.trainval_idx = []\n for (tidx, vidx) in meta_trainval_idx:\n self.trainval_idx.append((self.train_idx[tidx], self.train_idx[vidx]))", "def train_test_split_data(data, test_size=0.25):\n \n y = data['fraud'].values\n X = data.drop(columns=['fraud','description','name','org_desc','org_name','payee_name']).values\n \n return train_test_split(X, y, test_size=test_size, stratify=y, shuffle=True)", "def train_test_split(df):\n training_size = int(len(df) * .67)\n test_size = int(len(df) - training_size)\n train, test = df[0:training_size], df[training_size:len(df)]\n return train, test", "def split_data(df):\n\trandom_seed = 1\n\tdf_train = df.sample(frac=0.8, random_state=random_seed)\n\tdf_rem = df.loc[~df.index.isin(df_train.index)]\n\tdf_valid = df_rem.sample(frac=0.5, random_state=random_seed)\n\tdf_test = df_rem.loc[~df_rem.index.isin(df_valid.index)]\n\tlogger.info(\"Shape of training dataframe: \" + str(df_train.shape))\n\tlogger.info(\"Shape of validation dataframe: \" + str(df_valid.shape))\n\tlogger.info(\"Sahpe of test dataframe: \" + str(df_test.shape))\n\n\treturn df_train, df_valid, df_test", "def split_data(images, labels):\n images, labels = shuffle_data_pair(images, labels)\n\n num_covid_points = sum(map(lambda label: label == 0, labels))\n\n # Calculate split\n num_test = int(num_covid_points * 0.1)\n num_covid_train = num_covid_points - num_test * 2\n num_other_train = int(num_covid_train * 1.1)\n\n # (train, validate, test) points added\n num_points_added = [\n [0, 0, 0], # COVID-19\n [0, 0, 0], # Viral pneumonia\n [0, 0, 0] # Normal\n ]\n\n # Datasets\n images_train = []\n labels_train = []\n images_validate = []\n labels_validate = []\n images_test = []\n labels_test = []\n\n # Add images and labels to datasets\n notifier.send(\" Adding images and labels to dataset...\")\n for i, label in enumerate(labels):\n print(f\" Point: {i} / {len(labels)}\")\n completed_labels = [False, False, False] # Enough of label added\n if all(completed_labels):\n break\n for j in range(3): # 0: COVID-19, 1: Viral pneumonia, 2: Normal\n if completed_labels[j]:\n continue\n if label == j:\n # Add training data\n can_add_training = False\n if j == 0: # COVID-19\n if num_points_added[j][0] < num_covid_train:\n can_add_training = True\n num_points_added[j][0] += 1\n elif num_points_added[j][0] < num_other_train: # Not COVID-19\n can_add_training = True\n num_points_added[j][0] += 1\n if can_add_training:\n images_train.append(images[i])\n labels_train.append(labels[i])\n break\n\n # Add validation data\n if num_points_added[j][1] < num_test:\n num_points_added[j][1] += 1\n images_validate.append(images[i])\n labels_validate.append(labels[i])\n break\n\n # Add testing data\n if num_points_added[j][2] < num_test:\n num_points_added[j][2] += 1\n images_test.append(images[i])\n labels_test.append(labels[i])\n break\n\n # Point couldn't be added anywhere: label is complete\n completed_labels[j] = True\n break\n\n # Shuffle all data\n notifier.send(\" Shuffling data...\")\n images_train, labels_train = shuffle_data_pair(\n images_train, labels_train\n )\n images_validate, labels_validate = shuffle_data_pair(\n images_validate, labels_validate\n )\n images_test, labels_test = shuffle_data_pair(\n images_test, labels_test\n )\n\n if PLOT_LABELS:\n # Plot data frequencies\n plt.hist(labels, bins=3)\n plt.title(\"Labels\")\n\n plt.hist(labels_train, bins=3)\n plt.title(\"Train Labels\")\n\n plt.hist(labels_validate, bins=3)\n plt.title(\"Validate Labels\")\n\n plt.hist(labels_test, bins=3)\n plt.title(\"Test Labels\")\n\n plt.show()\n\n # Make labels categorical\n notifier.send(\" Making labels categorical: train...\")\n labels_train = tf.keras.utils.to_categorical(labels_train)\n notifier.send(\" Making labels categorical: validate...\")\n labels_validate = tf.keras.utils.to_categorical(labels_validate)\n notifier.send(\" Making labels categorical: test...\")\n labels_test = tf.keras.utils.to_categorical(labels_test)\n\n notifier.send(\" Converting data to NumPy arrays...\")\n return \\\n np.array(images_train), np.array(images_validate), np.array(images_test), \\\n np.array(labels_train), np.array(labels_validate), np.array(labels_test)", "def splitting(X, y, TrainingShare=0.5, seed=0):\n\n\t#target_name = 'default'\n\t#X = default.drop('default', axis=1)\n\t#robust_scaler = RobustScaler()\n\t#sc = StandardScaler()\n\t#X = robust_scaler.fit_transform(X)\n\t#y = default[target_name]\n\n\t# Train-test split\n\tX_train, X_test, y_train, y_test=train_test_split(X, y, train_size=TrainingShare, test_size = 1-TrainingShare, random_state=seed)\n\treturn X_train, X_test, y_train, y_test", "def splits(cls, root='.data', train=None, validation=None, test=None, **kwargs):\n path = cls.download(root)\n train_data = None if train is None else cls(os.path.join(path, train), **kwargs)\n val_data = None if validation is None else cls(os.path.join(path, validation), **kwargs)\n test_data = None if test is None else cls(os.path.join(path, test), **kwargs)\n return tuple(d for d in (train_data, val_data, test_data) if d is not None)", "def split(self, X_train, Y_train, run_train=None, run_test=None):\n result = []\n logo = LeavePOut(self.out_per_fold)\n for train, test in logo.split(X_train):\n y_train = [Y_train[i] for i in train]\n x_train = [X_train[i] for i in train]\n y_test = [Y_train[i] for i in test]\n x_test = [X_train[i] for i in test]\n result.append({'X_train': x_train,\n 'Y_train': y_train,\n 'X_test': x_test,\n 'Y_test': y_test,\n 'run_train': [run_train[index] for index in train] if run_train is not None else train,\n 'run_test': [run_train[index] for index in test] if run_train is not None else test\n })\n return result", "def split_dev_data(input_data: List[Tuple[str, int]]) -> Tuple[List[Tuple[str, int]],\n List[Tuple[str, int]],\n List[Tuple[str, int]]]:\n training_data, test_data = split_data(input_data)\n\n # split test data in half to test on\n dev_data = set(random.sample(test_data, int(len(test_data) / 2)))\n test_data = set(test_data) - set(dev_data)\n\n return list(training_data), list(test_data), list(test_data)", "def train_test_split(df, test_size=0.3):\r\n # split df here\r\n train_size = int(df.shape[0] * (1 - test_size))\r\n test_size = df.shape[0] - train_size\r\n train = df[:train_size]\r\n test = df[train_size:]\r\n\r\n return train, test # return the train and test datasets\r", "def _split_train_tst(self):\n num_samples = self.Y.shape[0]\n mapper_file = self.checkpointer.get_mapper_file_location()\n if not self.checkpointer.is_mapper_checkpointed():\n print 'No mapper checkpoint found. Fresh loading in progress ...'\n # Now shuffle the data\n sample_id = range(num_samples)\n random.shuffle(sample_id)\n print 'Dumping the mapper shuffle for reuse.'\n Pickle.dump(sample_id, open(mapper_file, 'wb'))\n print 'Dump complete. Moving Forward...'\n else:\n print 'Mapper Checkpoint found... Reading from mapper dump'\n sample_id = Pickle.load(open(mapper_file, 'rb'))\n print 'Mapping unpickling complete.. Moving forward...'\n\n self.X_fwd = self.X_fwd[sample_id]\n self.X_bwd = self.X_bwd[sample_id]\n self.Y = self.Y[sample_id]\n # Now divide the data into test ans train set\n test_fraction = 0.01\n self.test_size = int(test_fraction * num_samples)\n self.train_size = num_samples - self.test_size\n # Forward review\n self.X_trn_fwd = self.X_fwd[0:self.train_size]\n self.X_tst_fwd = self.X_fwd[self.train_size:num_samples]\n # Backward review\n self.X_trn_bwd = self.X_bwd[0:self.train_size]\n self.X_tst_bwd = self.X_bwd[self.train_size:num_samples]\n # Summary\n self.Y_trn = self.Y[0:self.train_size]\n self.Y_tst = self.Y[self.train_size:num_samples]", "def split_to_batches(self, train_data, batch_size):\n num_of_training_examples = len(train_data)\n for i in range(0, num_of_training_examples, batch_size):\n x, y = zip(*train_data[i: i+batch_size])\n yield np.vstack(x), np.vstack(y)", "def split_dataset(dataset, train_percentage, feature_headers, target_header):\r\n\r\n # Split dataset into train and test dataset\r\n train_x, test_x, train_y, test_y = train_test_split(dataset[feature_headers], dataset[target_header],train_size=train_percentage)\r\n return train_x, test_x, train_y, test_y", "def partition_data(data, folds, fold_n, fold_size):\n\n # Training Data Partition\n train_1 = data[:(folds - fold_n) * fold_size]\n train_2 = data[(folds - fold_n + 1) * fold_size:]\n train = np.concatenate((train_1, train_2), axis=0)\n\n # Validation Data Partition\n val = data[(folds - fold_n) * fold_size:(folds - fold_n + 1) * fold_size]\n\n return train, val", "def train_test_split(X, y, split = 0.20):\n \n import random\n \n lst = [i for i in range(len(X))]\n random.shuffle(lst)\n train_amount = int((1 - split) * len(X))\n\n train_samples_idx = lst[:train_amount]\n test_samples_idx = lst[train_amount:]\n \n X_train = [X[i] for i in train_samples_idx]\n X_test = [X[i] for i in test_samples_idx]\n \n y_train = [y[i] for i in train_samples_idx]\n y_test = [y[i] for i in test_samples_idx]\n \n return X_train, X_test, y_train, y_test", "def get_split_data(self):\n X, y, _, _ = self.get_subsets()\n return train_test_split(X, y, test_size=0.3, random_state=42)", "def split_data(train_percentage, *data):\n train = [entry[0:int(train_percentage * len(entry))] for entry in data]\n val = [entry[int(train_percentage * len(entry)):] for entry in data]\n return train, val", "def data_split(dataset, val_ratio=0.1, test_ratio=0.1, seed=1234):\n\n\t# How you grab the labels will depend on what type of Pytorch Dataset object 'dataset' is\n\t# (i.e. ImageFolder/DatasetFolder or not)\n\n\t# For fun, check the method resolution order (MRO) of 'dataset'\n\tprint('Dataset object\\'s inheritance: ', type(dataset).__mro__)\n\n\t# Determine what kind of Dataset object it is, then grab labels\n\t# Warning: currently this will break for anything other than an ImageFolder or CIFAR10 train set\n\tif isinstance(dataset, datasets.CIFAR10):\n\t\tlabels = dataset.train_labels\n\telif isinstance(dataset, datasets.ImageFolder):\n\t\tlabels = [img[1] for img in dataset.imgs]\n\telse:\n\t\terror('Dataset not supported yet')\n\n\t# Calculate class priors, (number in class)/(size of dataset)\n\tidcs = [i for i in range(len(dataset))]\n\tsamples_per_class = np.bincount(np.array(labels))\n\tpriors = samples_per_class/len(labels)\n\n\t# Number of samples in each class for val and test set \n\tval_per_class = np.ceil(samples_per_class*val_ratio).astype(np.int)\n\ttest_per_class = np.ceil(samples_per_class*test_ratio).astype(np.int)\n\n\t# Copy and shuffle the labels and corresponding indices to randomize before splitting\n\tshuffled_labels = list(labels)\n\tshuffled_idcs = list(idcs)\n\trandom.Random(seed).shuffle(shuffled_labels)\n\trandom.Random(seed).shuffle(shuffled_idcs)\n\n\t# Iterate through, grabbing indices for each class to place in validation set\n\t# until the desired number is reached\n\tval_idcs = []\n\tval_counts = np.zeros(val_per_class.shape)\n\n\tfor i, l in zip(shuffled_idcs, shuffled_labels):\n\t\t# Check if validation set quota has been reached yet for this class\n\t\tif val_counts[l] < val_per_class[l]:\n\t\t\tval_idcs.append(i)\n\t\t\tval_counts[l] += 1\n\n\t\t# Check if stopping point is reached\n\t\tif (val_counts == val_per_class).all():\n\t\t\tbreak\n\n\t# Repeat for test set\n\ttest_idcs = []\n\ttest_counts = np.zeros(test_per_class.shape)\n\tfor i, l in zip(shuffled_idcs, shuffled_labels):\n\t\t# Check if this index is already in val set\n\t\tif i in val_idcs:\n\t\t\tcontinue\n\n\t\t# Check if test set quota has been reached yet for this class\n\t\tif test_counts[l] < test_per_class[l]:\n\t\t\ttest_idcs.append(i)\n\t\t\ttest_counts[l] += 1\n\n\t\t# Check if stopping point is reached\n\t\tif (test_counts == test_per_class).all():\n\t\t\tbreak\n\n\t# Get train indices too (all the remaining samples not in val or test)\n\ttrain_idcs = [j for j in idcs if j not in val_idcs+test_idcs]\n\n\t# Split the data\n\ttrain = Subset(dataset, train_idcs)\n\tval = Subset(dataset, val_idcs)\n\ttest = Subset(dataset, test_idcs)\n\n\treturn train, val, test", "def train_val_test_split(self, p1=0.2, p2=0.2):\n X_train, X_test = train_test_split(self._data, test_size=p1)\n X_train, X_val = train_test_split(X_train, test_size=p2)\n train = KerasDataset(X_train, self.gene_meta.copy(), self.n_genes, \"train\")\n val = KerasDataset(X_val, self.gene_meta.copy(), self.n_genes, \"validation\")\n test = KerasDataset(X_test, self.gene_meta.copy(), self.n_genes, \"test\")\n return train, val, test", "def generate_training_validation_split(X: np.ndarray, y: Union[np.ndarray, np.array]):\n # an 80/20 split by default seems reasonable\n PORTION_ASSIGNED_TRAINING = 0.8\n num_data_points = X.shape[0]\n assert num_data_points == y.shape[0]\n # seed the RNG so that we get consistent results across multiple executions\n np.random.seed(1)\n training_indices = np.random.choice(\n range(X.shape[0]), size=int(PORTION_ASSIGNED_TRAINING * num_data_points), replace=False\n )\n validation_indices = np.setdiff1d(np.arange(X.shape[0]), training_indices)\n training_design_matrix = X[training_indices]\n training_targets_array = y[training_indices]\n validation_design_matrix = X[validation_indices]\n validation_targets_array = y[validation_indices]\n return training_design_matrix, training_targets_array, validation_design_matrix, validation_targets_array", "def train_validation_split(self, threshold=None):\n for train, validation in self._get_k_folds(5, threshold):\n train_provider = train\n validation_provider = validation\n break\n return train_provider, validation_provider" ]
[ "0.8642924", "0.83752245", "0.8353596", "0.8348435", "0.81380725", "0.80903614", "0.8082711", "0.7984082", "0.7881793", "0.78108215", "0.7794041", "0.76179", "0.761665", "0.7565056", "0.75570375", "0.7532845", "0.75105983", "0.75036824", "0.7495741", "0.7445284", "0.7443028", "0.7436937", "0.742345", "0.7417998", "0.7413789", "0.73517054", "0.73378944", "0.7337047", "0.7335494", "0.72858447", "0.72761214", "0.72714216", "0.72158265", "0.71979016", "0.71957475", "0.71938884", "0.7188611", "0.71642536", "0.7152531", "0.7139691", "0.71392107", "0.7134662", "0.71268606", "0.711716", "0.7115178", "0.70953643", "0.70936614", "0.70936614", "0.70936614", "0.70883536", "0.7087003", "0.7074244", "0.7070541", "0.706459", "0.706459", "0.70586854", "0.7057152", "0.705151", "0.70420474", "0.7038895", "0.7029331", "0.70251566", "0.7023936", "0.7021513", "0.70198107", "0.70178586", "0.6992514", "0.6975671", "0.6973761", "0.6972638", "0.69633645", "0.69628173", "0.695026", "0.69428205", "0.693619", "0.6935131", "0.6928185", "0.6927643", "0.6920025", "0.69179505", "0.69163346", "0.69141966", "0.69129705", "0.69128376", "0.69120723", "0.690622", "0.6906064", "0.69032633", "0.6900236", "0.6898305", "0.68979734", "0.68907225", "0.68894625", "0.688286", "0.6880951", "0.68760544", "0.6868785", "0.6863323", "0.6855797", "0.68533766" ]
0.7568678
13
Return shape of data i.e. image size.
def _get_image_size(self): return (3, 224, 224)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def image_shape(self):\n return self.data.shape[:2]", "def image_shape(self):\n return self.data.shape[:2]", "def shape(self):\n return self.data.shape", "def shape(self):\n return self.data.shape", "def shape(self):\n return self.data.shape", "def shape(self):\n return self.data.shape", "def shape(self):\n return self.data.shape", "def get_shape(self):\n if self.__data is None:\n return None\n return self.__data.shape", "def shape(self):\n\n return self.data.shape", "def shape(self):\n return self._data.shape", "def shape(self):\n return self._data.shape", "def shape(self):\n return self._data.shape", "def shape(self):\n return self.dataset.shape", "def data_shape():\n return DATA_SHAPE", "def size(self):\n if type(self._shape).__name__ == 'tuple':\n return self._shape[-1]\n else:\n return self._shape", "def get_shape(self):\n return shape(self._data)", "def shape(self):\n return self.image.shape", "def shape(self):\n return self._img.shape", "def shape(self):\n return () if self.data is None else self.data.shape", "def shape(self):\n return self._fl.h5[\"raw\"].shape", "def get_num_dimensions(self):\n dimensions = self.data.shape\n return dimensions[1]", "def getDimension(data):\r\n # open image for reading in binary mode\r\n\r\n # read the 2 bytes\r\n a = data[163:165]\r\n\r\n # calculate height\r\n height = (a[0] << 8) + a[1]\r\n\r\n # next 2 bytes is width\r\n a = data[165:167]\r\n\r\n # calculate width\r\n width = (a[0] << 8) + a[1]\r\n\r\n return (width, height)", "def shape(self):\n return self.array.shape", "def size(self):\n\t\treturn self.dims", "def get_layer_shape(self,layer_id):\n return self.net.blobs[layer_id].data.shape[1:] # Chop off batch size", "def shape(self,squeeze=True):\n return np.shape(self.getData(squeeze=squeeze))", "def get_data_dimensions(self):\n return image_utils.convert_shape_indexing(self._get_data_dimensions_rc(),\"rc\",self.image_indexing)", "def shape(self):\n return self.to_array().shape", "def shape_of(data, dtype=\"int32\"):\n return _make.shape_of(data, dtype)", "def get_size(self, shape_info):\r\n if shape_info:\r\n return numpy.prod(shape_info) * numpy.dtype(self.dtype).itemsize\r\n else: # a scalar\r\n return numpy.dtype(self.dtype).itemsize", "def size(self):\n return int(misc.intprod(self.shape))", "def size(img):\n\treturn img.size", "def image_shape(self) -> np.ndarray:\n return self.__image_shape", "def get_data_size(self):\n if self.doc_ftrs is not None:\n data = self.doc_ftrs\n elif self.query_ftrs:\n data = self.query_ftrs\n elif self.usr_ftrs:\n data = self.usr_ftrs\n else:\n raise ValueError('Cannot infer data size.')\n data_shape = tf.shape(data)\n return data_shape[0], data_shape[1]", "def shape(self):\n return self.__value.shape", "def size(self):\n\n frame = self.get_frame()\n\n # Unpack array dimensions\n height, width, layers = np.array(frame).shape\n\n return width, height", "def shape(self):\n return np.array([self.w, self.h])", "def get_data_shape(self):\n return self._header.get_data_shape()", "def data_shape(self):\n raise NotImplementedError", "def data_shape(self):\n raise NotImplementedError", "def shape(self):\n # type: () -> Tuple[int]\n # This may be patched by pandas to support pseudo-2D operations.\n return (self.size,)", "def batch_size(self):\n self.validate_shape_and_dtype()\n return self.rgb.shape[0]", "def shape_for_keras(data):\n raise NotImplementedError", "def get_size(self):\n return self.get_params().shape[0]", "def ndarray_size(data, dtype=\"int32\"):\n return _make.ndarray_size(data, dtype)", "def __len__(self):\n ret = self.data.shape[0]\n return ret", "def read_image_dims(self, image_data):\n image = self.decode_image(image_data)\n return image.shape[:3]", "def shape(self):\n return (self.numberOfImages,) + self.imageList[0].shape", "def shape(self):\n return self._image_shape", "def shape(self):\n return self.__shape", "def shape(self):\n return self.__shape", "def shape(data):\n if hasattr(data, \"shape\"):\n return list(data.shape)\n else:\n try:\n length = len(data)\n return [length] + shape(data[0])\n except TypeError:\n return []", "def length(self):\n mmap = self.memmap;\n self.memmap = 'r';\n data = self.load();\n self.memmap = mmap;\n return data.shape[0];", "def get_shape(data):\n\n def _get_shape(tensor):\n return list(tensor.shape)\n\n return recursively_apply(_get_shape, data)", "def shape(self):\n return self[0].shape", "def shape(self):\n return self.axes.size", "def get_dimensions(self):\n return self.lon_arr.shape", "def shape(self):\n return self.X.shape", "def ndim(self):\n return len(self._shape)", "def get_size(img):\n ih, iw = img.shape[:2]\n return iw * ih", "def __len__(self) -> int:\n\n return self.shape.shape[1]", "def dimension_size(self):\n return self._dim", "def get_width(self):\n width = np.size(self.img, 0)\n return width", "def ndim(self):\n # type: () -> int\n return len(self.shape)", "def get_image_shape(self) -> Tuple[int, int]:\n x = self.header.get(\"Rows\")\n y = self.header.get(\"Columns\")\n if x is not None and y is not None:\n return (x // self.size, y // self.size)", "def dim(self):\n return len(self.shape)", "def dim(self):\n return len(self.shape)", "def tensor_length(data):\n if hasattr(data, \"shape\"):\n return data.shape[0]\n else:\n try:\n length = len(data)\n except TypeError:\n length = 0\n return length", "def ndim(self):\n return len(self.shape)", "def ndim(self):\n return len(self.shape)", "def ndim(self):\n return len(self.shape)", "def ndim(self):\n return len(self.shape)", "def ndim(self):\n return len(self.shape)", "def ndim(self):\n return len(self.shape)", "def _get_image_size(img: Any) -> List[int]:\n if _is_numpy_image(img):\n return img.shape[1::-1]\n raise TypeError(\"Unexpected type {}\".format(type(img)))", "def get_input_shape(self):\n return self.network.inputs[self.input_blob].shape", "def getShape(self):\n if self.initDone:\n return self.pixelHeight,self.pixelWidth\n\n self._waitForInit()\n\n return self.pixelHeight,self.pixelWidth", "def width(self) -> int:\n return self._image_data.width", "def shape(self):\n return self.initial_value.shape", "def __len__(self):\n # type: () -> int\n return self.shape[0]", "def output_shape(self) ->torch.Size:\n return self._computed_output_shape()", "def input_image_size(interpreter):\n _, height, width, channels = interpreter.get_input_details()[0]['shape']\n return width, height, channels", "def dimensions( cls, value, typeCode=None ):\n return value.shape", "def get_data_dims(self):\n return [len(self.tps)] + self.get_shape()[1:]", "def get_value_size(self):\n size = 0\n if self._data is not None:\n size = shape(self._data)[self.value_dimension]\n\n return size", "def size(self):\n return self.__image.size", "def sizeof(shape, dtype=\"uint8\"):\n itemsize = numpy.dtype(dtype).itemsize\n cnt = 1\n if \"__len__\" in dir(shape):\n for dim in shape:\n cnt *= dim\n else:\n cnt = int(shape)\n return cnt * itemsize", "def shape_from_args(self):\n return self.args[0].shape", "def input_shape(self) ->torch.Size:\n pass", "def shape(self):\n\n self._check_assigned()\n\n if (\n self.lazy\n and self.transformer is not None\n and hasattr(self.transformer, \"get_transformed_shape\")\n ):\n return self.transformer.get_transformed_shape(self.values)\n else:\n return self.__array__().shape", "def __len__(self):\n return self.flat_image.size", "def ndims(x):\n return len(x.get_shape())", "def ndim(self):\n return self.data.ndim", "def __len__(self):\n return self.dataset.shape[0]", "def get_input_shape(self):\n return self.__x.shape", "def get_size(self):\n # return the size along the index dimension\n size = 0\n if self._data is not None:\n size = shape(self._data)[self.index_dimension]\n\n return size", "def image_shape(self):\n return tuple(self._img_shape)", "def get_image_size(self, **kwargs):\n points = kwargs['points']\n max_val = points.max(0)\n min_val = points.min(0)\n height = np.ceil((max_val[0] - min_val[0]) * self.res_x).astype(int)\n width = np.ceil((max_val[1] - min_val[1]) * self.res_y).astype(int)\n\n return height, width", "def ndarray_size(self) -> int:\n pass", "def __len__(self):\r\n return self.data.shape[-1]", "def get_image_size(path, width, type_name):\n fc = _os.path.getsize(path) / type_mapping[type_name].itemsize\n shape = [width, int(fc / width)]\n computed_size = shape[0] * shape[1] * type_mapping[type_name].itemsize\n measured_size = _os.path.getsize(path)\n return shape" ]
[ "0.82124084", "0.82124084", "0.8147183", "0.8147183", "0.8147183", "0.8147183", "0.8147183", "0.80937505", "0.8010233", "0.8006064", "0.8006064", "0.8006064", "0.80019945", "0.79247403", "0.79092556", "0.78679425", "0.786282", "0.7814866", "0.7725889", "0.76113486", "0.75898", "0.75542593", "0.75402063", "0.75401044", "0.7533746", "0.7514471", "0.7483948", "0.7467519", "0.7459952", "0.74306667", "0.7425545", "0.7397945", "0.73844516", "0.73783296", "0.7373956", "0.7370754", "0.7351399", "0.7344849", "0.7329388", "0.7329388", "0.73232275", "0.7310361", "0.7304852", "0.7299279", "0.7293622", "0.72876865", "0.7283168", "0.72561735", "0.7248334", "0.72393227", "0.72393227", "0.71899706", "0.71791726", "0.715908", "0.7158376", "0.71563023", "0.71470845", "0.71367127", "0.7116653", "0.7110866", "0.71107966", "0.7108274", "0.71076566", "0.7080282", "0.70786023", "0.7068122", "0.7068122", "0.7065524", "0.70564526", "0.70564526", "0.70564526", "0.70564526", "0.70564526", "0.70564526", "0.70514107", "0.70389074", "0.70333725", "0.7015813", "0.7012563", "0.7003152", "0.6996766", "0.6995038", "0.6977338", "0.69573003", "0.6955594", "0.6955067", "0.69385815", "0.693204", "0.6929297", "0.69215196", "0.69208926", "0.69066375", "0.68992186", "0.6891699", "0.68904006", "0.6889557", "0.6887211", "0.6879573", "0.6877291", "0.68770766", "0.6874067" ]
0.0
-1
Returns mean of the entire dataset.
def _get_mean(self): return (0.485, 0.456, 0.406)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def mean(self):\r\n return np.mean(self.data_array)", "def mean(self):\n return self.data.mean(axis=-1, keepdims=True)", "def mean(self):\n return self.aggregate(np.mean)", "def Mean(data):\n return data.mean()", "def mean(self):\n mean = sum(self.data)/self.size\n return mean", "def mean(self):\n\n\t\tif not self._masked:\n\t\t\t\n\t\t\treturn self.data.mean()\n\t\t\n\t\telse:\n\t\t\t\n\t\t\tif not hasattr(self,\"_full_mask\"):\n\t\t\t\tself.maskBoundaries()\n\t\t\t\n\t\t\treturn self.data[self._full_mask].mean()", "def mean(self):\n\n return self._reduce_for_stat_function(F.mean, only_numeric=True)", "def mean(self):\n return self._summarize(lambda c: c.mean)", "def mean(self):\n return self._lift(\"mean\")", "def mean(self) -> float:\n return self._data.mean()", "def meanOf(classObj):\r\n return np.mean(classObj.dataSet, axis=0)", "def mean(data):\n n = len(data)\n return sum(data)/float(n)", "def mean(self):\n return self._mean", "def mean(self):\n return self._mean", "def get_mean(self):\n self.meanval = np.mean(self.adulist)", "def mean(data):\n if iter(data) is data:\n data = list(data)\n data_len = len(data)\n if data_len < 1:\n raise StatisticsError('mean requires at least one data point')\n return sum(data) / data_len", "def mean(self):\n return self._mean_func", "def mean(data):\n n = len(data)\n if n < 1:\n raise ValueError('mean requires at least one data point')\n return sum(data)/n # in Python 2 use sum(data)/float(n)", "def mean(data):\n n = len(data)\n if n < 1:\n raise ValueError('mean requires at least one data point')\n return sum(data)/n # in Python 2 use sum(data)/float(n)", "def mean(data):\n n = len(data)\n if n < 1:\n raise ValueError('mean requires at least one data point')\n return sum(data)/n # in Python 2 use sum(data)/float(n)", "def getMean(self):\n return self.mean", "def ensemble_mean(self):\n return self.mean(dim='mem')", "def normalize_mean(dataset):\n normalized_dataset = np.array(dataset)\n return normalized_dataset - np.mean(normalized_dataset)", "def mean(self, axis=0, **kwargs) -> \"Dataset\":\n return self.aggregate(axis=axis, func=np.mean, **kwargs)", "def get_mean(self, X):\n raise NotImplementedError", "def mean(self):\r\n\t\treturn sum(self.sample)/len(self.sample)", "def average(self):\n return np.mean(self.buf[:self._size], axis=0)", "def mean(data):\n n = len(data)\n if n < 1:\n return 0\n return sum(data)/float(n)", "def get_mean(self):\r\n for i in range(1,len(self.data[0])):\r\n self.prom.append(np.mean(self.data[:,i]))", "def calculate_mean(self) -> float:\n\n if self.data:\n return np.mean(self.data)\n else:\n return self.mu", "def mean(self):\n return self.sum / self.sum_weights", "def mean(self):\n return self.vmean", "def get_mean(self):\n return self.serie.mean()", "def get_mean_of_dataset(train_data_loader, args, idx=0):\n meter = AverageMeter()\n for i in train_data_loader:\n if isinstance(i, list):\n meter.update(i[idx])\n else:\n meter.update(i)\n data_mean = meter.mean\n if data_mean.ndim == 2: data_mean = data_mean.mean(0)\n return tensor(data_mean, args)", "def mean(self, indices = None):\n\n # make sure that data is well-formed\n for j in range(len(self.data)-1):\n assert len(self.data[j]) == len(self.data[j+1])\n\n # populate indices, if not given:\n if (indices == None):\n indices = range(len(self.data[0]))\n \n # __average__() need to be defined in child classes\n # the child class also knows what needs to be averaged\n # and what needs to be sanity checked.\n return self.__average__(indices)", "def get_means(self):\n if self.metadata is None:\n self.get_metadata()\n\n # we want only the numerical features\n df = self.metadata.select_dtypes(include=['int64', 'float64'])\n return df.mean()", "def average(data):\n return np.average(data)", "def mean(\n self,\n dim: Dims = None,\n *,\n skipna: bool | None = None,\n keep_attrs: bool | None = None,\n **kwargs: Any,\n ) -> DataArray:\n return self.reduce(\n duck_array_ops.mean,\n dim=dim,\n skipna=skipna,\n keep_attrs=keep_attrs,\n **kwargs,\n )", "def mean(self):\n return np.average(self.particles, weights=self.weights, axis=0)", "def mean(\n self,\n dim: Dims = None,\n *,\n skipna: bool | None = None,\n keep_attrs: bool | None = None,\n **kwargs: Any,\n ) -> Dataset:\n return self.reduce(\n duck_array_ops.mean,\n dim=dim,\n skipna=skipna,\n numeric_only=True,\n keep_attrs=keep_attrs,\n **kwargs,\n )", "def get_mean(self):\n average = self.df[self.col_name].mean()\n return average", "def mean(self) -> \"Stream[float]\":\n return self.agg(np.mean).astype(\"float\")", "def mean(data):\n n, total = _generalised_sum(data, None)\n if n == 0:\n raise ValueError('mean of empty sequence is not defined')\n return total/n", "def get_mean(self):\n raise NotImplementedError(\"This is an abstract method and needs to be implemented in derived classes.\")", "def mean_value(self):\n\n return self._system.mean()", "def mean(self):\n clean, total = self._prepare_for_stats()\n if not total:\n return None\n\n weighted_sum = sum(key * value for key, value in clean.items())\n return weighted_sum / total", "def mean(data_matrix):\n return np.asmatrix(np.mean(data_matrix, axis=0))", "def get_average(data):\n average = sum(data) / len(data)\n\n return average", "def mean(self) -> FrameLike:\n return super().mean()", "def mean(self) -> FrameLike:\n return super().mean()", "def mean(self) -> FrameLike:\n return super().mean()", "def mean(self) -> FrameLike:\n return super().mean()", "def mean(self) -> FrameLike:\n return super().mean()", "def mean(self) -> FrameLike:\n return super().mean()", "def _compute_global_mean(self, dataset, session, limit=None):\n _dataset = dataset\n mean = 0.\n if isinstance(limit, int):\n _dataset = _dataset[:limit]\n if isinstance(_dataset, np.ndarray):\n mean = np.mean(_dataset)\n else:\n # Iterate in case of non numpy data\n for i in range(len(dataset)):\n mean += np.mean(dataset[i]) / len(dataset)\n self.global_mean.assign(mean, session)\n return mean", "def get_mean(self):\n mean = np.array(np.zeros((4,8)))\n for i,c in enumerate(self.cellLines):\n for j,l in enumerate(self.ligands):\n mean[i][j] = self.aucs[c][l]['mean']\n return mean", "def _get_mean(self):\n return [layer._get_mean() for layer in self.layers]", "def mean(self, avg=True):\n if not self.fp_init:\n if not avg:\n return self._calc_mean(self.f, self.a, self.b, self.Z)\n else:\n return self._calc_mean(self.f_avg, self.a_avg, self.b_avg,\n self.Z_avg)\n return self._mean if not avg else self._mean_avg", "def my_mean(x):\n return my_sum(x) / my_len(x)", "def mean_list(data):\n return sum(data) / len(data)", "def calculate_mean(weather_data):\n sum_value=0\n\n for value in weather_data:\n sum_value += float(value)\n \n mean = sum_value/len(weather_data)\n\n return (mean)", "def avgX(self):\n return np.mean(self.getx())", "def mean(self):\n mean=np.zeros(self.shape)\n if self.Fourier:\n ind=self.mean_index()\n for di in np.ndindex(*self.shape):\n mean[di]=np.real(self.val[di][ind])/self.fft_coef\n else:\n for di in np.ndindex(*self.shape):\n mean[di]=np.mean(self.val[di])\n return mean", "def mean(self):\n raise RuntimeError(\"Needs to be implemented in base class\")", "def mean(x):\n return sum(x) / len(x)", "def mean(self, values):\n return self.aggregate(values, \"mean\")", "def _compute_global_mean(self, dataset, session, limit=None):\n _dataset = dataset\n mean = 0.\n if isinstance(limit, int):\n _dataset = _dataset[:limit]\n if isinstance(_dataset, np.ndarray) and not self.global_mean_pc:\n mean = np.mean(_dataset)\n else:\n # Iterate in case of non numpy data\n for i in range(len(dataset)):\n if not self.global_mean_pc:\n mean += np.mean(dataset[i]) / len(dataset)\n else:\n mean += (np.mean(dataset[i], axis=(0, 1),\n keepdims=True) / len(dataset))[0][0]\n self.global_mean.assign(mean, session)\n return mean", "def getMean(self, windowSize=0):\r\n try:\r\n if self._data.size == 0:\r\n raise RuntimeError(\"Filter1D data is empty. Call Filter1D.addDataPoint() to add data prior calling Filter1D.getMean().\")\r\n if type(windowSize) is int:\r\n if windowSize <= 0 or windowSize > self._maxSize:\r\n windowSize = self._maxSize\r\n return np.mean(self._data[-windowSize:])\r\n else:\r\n raise TypeError(\"windowSize must be an integer\")\r\n except TypeError or RuntimeError:\r\n raise", "def mean(self):\n return self.mu", "def average(self):\n s = self.sum()\n flat_shape = self.flatten_shape(self.shape)\n num_of_elements = fct.reduce(opr.mul, flat_shape, 1)\n average = s / num_of_elements\n return average", "def get_mean(data, n=-1):\n \n return round((sum(data)/n),1)", "def average(self):\n if self._average is None:\n self._average = sum([df.df for df in self])/len(self)\n return self._average", "def average(self):\n return self.summation() / self.count()", "def mean(self):\n\n return time_stat(self, stat=\"mean\")", "def average(data, number=None):\n if number is None:\n return numpy.mean(data)\n return numpy.sum(data) / number", "def calcAverage(dat):\n return sum(dat)/len(dat)", "def mean(x):\n return sum(x)/len(x)", "def mean(self) -> Dict:\n raise NotImplementedError", "def _mean(items):\n return sum(items) / len(items)", "def mean(values):\n # Write the mean() function\n mean = sum(values) / len(values)\n return mean", "def mean(self, mean):\n\n self._mean = mean", "def mean(values):\n # Write the mean() function\n mean = sum(values) / len(values)\n return mean", "def mean(inputs):\n # pylint disable necessary for numpy and pandas\n if len(inputs) == 0: # pylint: disable=g-explicit-length-test\n return 0\n else:\n return np.mean(inputs)", "def mean(x):\n length = len(x)\n\n return sum(x) / length if length else None", "def mean(dfs):\n df_mean = sum(dfs)\n return df_mean/len(dfs)", "def calculate_mean(data_dir):\n data = ([each for each in os.listdir(data_dir)\n if each.endswith('.h5')])\n all_data = []\n for num_data in data:\n processed_data = os.path.join(data_dir, num_data)\n file = h5py.File(processed_data, 'r') \n data = file.get('Processed_data') \n all_data.append(data)\n all_data = np.array(all_data)\n all_data = np.mean(all_data, axis=0)\n return all_data", "def mean(\n self,\n dim: Dims = None,\n *,\n skipna: bool | None = None,\n keep_attrs: bool | None = None,\n **kwargs: Any,\n ) -> Dataset:\n if (\n flox_available\n and OPTIONS[\"use_flox\"]\n and contains_only_chunked_or_numpy(self._obj)\n ):\n return self._flox_reduce(\n func=\"mean\",\n dim=dim,\n skipna=skipna,\n numeric_only=True,\n # fill_value=fill_value,\n keep_attrs=keep_attrs,\n **kwargs,\n )\n else:\n return self.reduce(\n duck_array_ops.mean,\n dim=dim,\n skipna=skipna,\n numeric_only=True,\n keep_attrs=keep_attrs,\n **kwargs,\n )", "def mean(\n self,\n dim: Dims = None,\n *,\n skipna: bool | None = None,\n keep_attrs: bool | None = None,\n **kwargs: Any,\n ) -> Dataset:\n if (\n flox_available\n and OPTIONS[\"use_flox\"]\n and contains_only_chunked_or_numpy(self._obj)\n ):\n return self._flox_reduce(\n func=\"mean\",\n dim=dim,\n skipna=skipna,\n numeric_only=True,\n # fill_value=fill_value,\n keep_attrs=keep_attrs,\n **kwargs,\n )\n else:\n return self.reduce(\n duck_array_ops.mean,\n dim=dim,\n skipna=skipna,\n numeric_only=True,\n keep_attrs=keep_attrs,\n **kwargs,\n )", "def mean(\n self,\n dim: Dims = None,\n *,\n skipna: bool | None = None,\n keep_attrs: bool | None = None,\n **kwargs: Any,\n ) -> DataArray:\n if (\n flox_available\n and OPTIONS[\"use_flox\"]\n and contains_only_chunked_or_numpy(self._obj)\n ):\n return self._flox_reduce(\n func=\"mean\",\n dim=dim,\n skipna=skipna,\n # fill_value=fill_value,\n keep_attrs=keep_attrs,\n **kwargs,\n )\n else:\n return self.reduce(\n duck_array_ops.mean,\n dim=dim,\n skipna=skipna,\n keep_attrs=keep_attrs,\n **kwargs,\n )", "def mean(\n self,\n dim: Dims = None,\n *,\n skipna: bool | None = None,\n keep_attrs: bool | None = None,\n **kwargs: Any,\n ) -> DataArray:\n if (\n flox_available\n and OPTIONS[\"use_flox\"]\n and contains_only_chunked_or_numpy(self._obj)\n ):\n return self._flox_reduce(\n func=\"mean\",\n dim=dim,\n skipna=skipna,\n # fill_value=fill_value,\n keep_attrs=keep_attrs,\n **kwargs,\n )\n else:\n return self.reduce(\n duck_array_ops.mean,\n dim=dim,\n skipna=skipna,\n keep_attrs=keep_attrs,\n **kwargs,\n )", "def compute_mean(self):\n # load_in_all_parameters(self.save_directory, self.auto_encoder)\n for i, data_row in enumerate(self.X_train_naive):\n input_nn = data_row\n if torch.cuda.is_available():\n input_nn = Variable(torch.Tensor(np.asarray(input_nn).reshape(1, 242)).cuda())\n else:\n input_nn = Variable(torch.Tensor(np.asarray(input_nn).reshape(1, 242)))\n\n prediction_embedding = self.model.forward_only_encoding(input_nn)\n print(prediction_embedding)\n if i == 0:\n self.embedding_np = prediction_embedding.data.clone().cpu().numpy()[0]\n else:\n self.embedding_np = np.vstack((self.embedding_np, prediction_embedding.data.clone().cpu().numpy()[0]))\n self.mean_embedding = np.average(self.embedding_np, axis=0)\n print('mean embedding is ', self.mean_embedding)", "def mean(self):\n return sum(p * x for x, p in self.items())", "def get_mean(cls, data: tuple or list) -> float:\n cls._data_validation(data)\n sum_ = fsum(data)\n n = cls.get_n(data)\n try:\n return float(sum_ / n)\n except ZeroDivisionError as exc:\n # for hyp score calculation, n = 0 for empty set is useful\n return 0", "def mean(values):\r\n return sum(values) / float(len(values))", "def nanmean(self, axis=0, **kwargs) -> \"Dataset\":\n return self.aggregate(axis=axis, func=np.nanmean, **kwargs)", "def mean(headers, data):\n\tcolumn_matrix=data.get_data(headers)\n\tmean_values=column_matrix.mean(0)\n\t\n\treturn mean_values", "def get_mean(iterable):\n return sum(iterable) / len(iterable)", "def hyper_mean(udf_data: UdfData):\n # Iterate over each tile\n cube_list = []\n for cube in udf_data.get_datacube_list():\n mean = cube.array.mean(dim=\"t\")\n mean.name = cube.id + \"_mean\"\n cube_list.append(XarrayDataCube(array=mean))\n udf_data.set_datacube_list(cube_list)", "def getMean(self, field):\n\n return np.mean([self.fitnesses[i][field] for i in range(len(self.fitnesses))])", "def mean(points):\r\n\t\treturn sum(points)/len(points)", "def global_mean(self):\n if self._global_mean is None:\n self._global_mean = np.mean([r for (_, _, r) in\n self.all_ratings()])\n\n return self._global_mean" ]
[ "0.82411206", "0.8131355", "0.8031648", "0.8015793", "0.79474837", "0.7917345", "0.7879034", "0.78769094", "0.7875092", "0.78153133", "0.76866186", "0.767841", "0.7648848", "0.7648848", "0.75604665", "0.7517856", "0.74952906", "0.7446794", "0.7446794", "0.7446794", "0.7443946", "0.7415502", "0.73920345", "0.7389664", "0.73764", "0.7353897", "0.73479265", "0.7333165", "0.7318242", "0.73139375", "0.7307442", "0.72832215", "0.7270336", "0.7268258", "0.7229411", "0.7209308", "0.7205407", "0.7174575", "0.71558887", "0.7148965", "0.71215004", "0.7113961", "0.70970905", "0.70839506", "0.70573217", "0.70511997", "0.70207834", "0.7017219", "0.7015154", "0.7015154", "0.7015154", "0.7015154", "0.7015154", "0.7015154", "0.7004615", "0.69999087", "0.69986236", "0.69853663", "0.69748664", "0.6963187", "0.6947705", "0.6945113", "0.69424003", "0.692531", "0.6910596", "0.6909641", "0.6908464", "0.69043183", "0.6893314", "0.6888224", "0.68860924", "0.68802506", "0.68678766", "0.68612605", "0.6856549", "0.6849033", "0.6842512", "0.68209386", "0.68056184", "0.6804452", "0.6799107", "0.67986614", "0.6795001", "0.67789334", "0.67764914", "0.67669827", "0.67609817", "0.67609817", "0.67553383", "0.67553383", "0.6755224", "0.67480576", "0.6742922", "0.6742271", "0.6740069", "0.67322713", "0.671411", "0.67050683", "0.6694809", "0.66947114", "0.66932225" ]
0.0
-1
Returns standard deviation of the entire dataset.
def _get_std(self): return (0.229, 0.224, 0.225)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_std_dev(self, data):\n mean = 0\n data_arr = []\n for i in data:\n data_arr.append(i[1])\n return statistics.stdev(data_arr)", "def get_stddev(self):\r\n for i in range(1,len(self.data[0])):\r\n self.stddev.append(np.std(self.data[:,i]))", "def stdDev(data):\r\n sum = 0\r\n ave = average(data)\r\n for i in data:\r\n sum += (i-ave)**2\r\n return math.sqrt(sum/len(data))", "def _std(self, data):\n var = stats.var(data)\n if var>0.0:\n sd = math.sqrt(var)\n else:\n sd = 0.0\n return sd", "def stdev(headers, data):\n\tcolumn_matrix=data.get_data(headers)\n\tmean_values=column_matrix.std(0)\n\tstd_values=mean_values.tolist()\n\treturn std_values", "def stddev(self, sample=True):\n distance_squared = list(map(lambda x: (x - sum(self.data)/self.size)**2, self.data))\n\n if sample == True:\n variance = sum(distance_squared)/(self.size - 1)\n stddev = variance**(1/2)\n if sample == False:\n variance = sum(distance_squared)/(self.size)\n stddev = variance**(1/2)\n return stddev", "def std(self):\n\n return self._reduce_for_stat_function(F.stddev, only_numeric=True)", "def stdev(items):\n return Series.std(Series(items))", "def standard_dev(self):\n return self.variance()**0.5", "def standard_deviation(data):\n\n return np.sqrt(variance(data))", "def standard_deviation(data):\n\n return np.sqrt(variance(data))", "def std(self):\r\n return np.std(self.data_array)", "def std_dev(self) -> float:\n return math.sqrt(self.variance())", "def get_stdev(cls, data: tuple or list, is_population=False) -> float:\n cls._data_validation(data)\n from math import sqrt\n return sqrt(cls.get_var(data, is_population))", "def get_std_dev(data, n = -1):\n mean = get_mean(data, n =n)\n\n deviations = []\n\n for i in range(0,n):\n deviations.append( (data[i] - mean)**2 )\n\n std_dev = sqrt( sum(deviations)/n )\n\n return std_dev", "def std(self, dset):\n avg = self.mean(dset)\n variance = sum([math.pow(x - avg, 2) for x in dset])\n std = math.sqrt(variance)\n return std", "def std(self):\n return self.data.std(axis=-1, keepdims=True)", "def stddev(data, ddof=0):\n n = len(data)\n if n < 2:\n return 0\n ss = _ss(data)\n pvar = ss/(n-ddof)\n return pvar**0.5", "def calculate_std(self) -> float:\n\n if self.data:\n return np.std(self.data)\n else:\n return self.sigma", "def calculate_std_dev(X):\n\tstd_dev = np.sqrt(calculate_variance(X))\n\treturn std_dev", "def standard_deviation(self):\n clean, total = self._prepare_for_stats()\n if not total:\n return None\n\n return math.sqrt(clean.variance())", "def stddev(r):\n avg = average(r)\n sdsq = sum([(i - avg) ** 2 for i in r])\n return (sdsq / (len(r) - 1 or 1)) ** 0.5", "def stdev(data, xbar=None):\n return math.sqrt(variance(data, xbar))", "def std(self) -> float:\n return self._data.std()", "def standard_deviation(self):\r\n\t\treturn self.variance()**(1/2)", "def stddev(data, ddof=0):\n n = len(data)\n if n < 2:\n raise ValueError('variance requires at least two data points')\n ss = _ss(data)\n pvar = ss/(n-ddof)\n return pvar**0.5", "def standard_deviation( values, sample=False ):\n return ma.sqrt( variance( values, sample ) )", "def stdev(values):\n mean = avg(values)\n diffs = [(value - mean) ** 2 for value in values]\n return avg(diffs) ** 0.5", "def calc_standard_deviation(data: list) -> float:\n mean = calc_mean(data)\n acc = 0.0\n for n in data:\n acc += (n - mean) ** 2\n acc /= len(data) - 1\n return math.sqrt(acc)", "def std(self):\n variance, mean = self.variance()\n standard_deviation = variance**0.5\n print(f\"Standard Deviation is: {standard_deviation}\")\n return standard_deviation, mean", "def stddev(self, num_list):\n try:\n mean = self.average(num_list)\n\n minus_mean = []\n\n for number in num_list:\n try:\n minus_mean.append((number - mean) ** 2)\n except Exception as e:\n print(\"Error: \", e)\n\n meany_mean = self.average(minus_mean)\n\n meany_mean = meany_mean ** .5\n\n except Exception as e:\n print(\"Error: \", e)\n\n return meany_mean", "def StandardDeviation(numlist):\n\tv = Variance(numlist)\n\t#print v\n\treturn math.sqrt(v)", "def get_std(self):\r\n cov = self.to_sparse().diagonal()\r\n std = np.sqrt(cov)\r\n return pd.Series(std, index=self.data.index, name=\"STD\")", "def global_std_deviation(self):\n\n return np.std(self.average_scores_all_subjects(), axis=0)", "def std_deviation(array):\n if not array or len(array) == 1:\n return 0\n\n average = AGGREGATES['mean_arithmetic'](array)\n variance = map(lambda x: (x-average)**2,array)\n stdev = AGGREGATES['mean_arithmetic'](variance)\n return math.sqrt(stdev)", "def std_dev(l):\n return variance(l)**.5", "def std_dev(L, is_sample=0):\n\treturn math.sqrt(variance(L, is_sample))", "def stdev_from_mean(x):\r\n x = array(x)\r\n return (x - mean(x)) / std(x)", "def test_stddev(self):\n self.assertEqual(stddev(list1, sample=False), np.std(list1))\n self.assertEqual(stddev(list1), np.std(list1, ddof=1))", "def get_stdev(self):\n var_x = numpy.var(self._x)\n var_y = numpy.var(self._y)\n return numpy.sqrt(var_x + var_y)", "def std(self):\n stds = [(x.m-self.mean)**2 + x.std**2 for x in self.xs]\n return np.sqrt(np.dot(self.a, np.array(stds)))", "def standard_deviation(xs: List[float]) -> float:\n return math.sqrt(variance(xs))", "def standard_deviation(xs: List[float]) -> float:\n return math.sqrt(variance(xs))", "def normalize_standard_deviation(dataset):\n return dataset*(1/np.std(dataset))", "def MeanAndStandardDeviation(data):\n n = len(data)\n if n == 0:\n return 0.0, 0.0\n mean = float(sum(data)) / n\n variance = sum([(element - mean)**2 for element in data]) / n\n return mean, math.sqrt(variance)", "def std(self):\n return self._summarize(lambda c: c.std)", "def get_mean_stddev(self):\n return self.get_mean(), self.get_std_dev()", "def stddev(std_numbers):\n mean = sum(std_numbers) / float(len(std_numbers))\n sum_std = 0.0\n\n for x in std_numbers:\n sum_std += (mean - x) * (mean - x)\n\n variance = sum_std / float(len(std_numbers))\n stddev = math.sqrt(variance)\n\n return stddev", "def mean_stddev(self):\n if len(self.vs) == 0:\n raise StdDevFilterException\n\n mx = self.mean()\n # compute variance\n variance = sum([(x - mx)**2 for x in self.vs])/len(self.vs)\n # return mean value and standard deviation (square root of variance)\n return mx,math.sqrt(variance)", "def pstdev(data):\n n = len(data)\n c = mean(data)\n ss = sum((x-c)**2 for x in data)\n if n < 2:\n raise ValueError('variance requires at least two data points')\n pvar = ss/n # the population variance\n return round(pvar**0.5, 1)", "def test_stdev_from_mean(self):\r\n x = [2.1, 4.2, 5.9, 8.4, 9.6]\r\n result = stdev_from_mean(x)\r\n self.assertFloatEqual(\r\n result,\r\n [-1.292463399014413,\r\n -0.60358696806764478,\r\n -0.045925095396451399,\r\n 0.77416589382589174,\r\n 1.1678095686526162])", "def get_std(self):\n std_value = self.df[self.col_name].std()\n return std_value", "def std(self):\n return np.sqrt(self.var)", "def sampleStandardDeviation(numlist):\n\tv = sampleVariance(numlist)\n\t#print v\n\treturn math.sqrt(v)", "def calc_std_deviation(average):\r\n sqr_sum = 0\r\n count = len(records)\r\n for i in records:\r\n value = int(i[i.find(',')+1:])\r\n sqr_sum+=(value-average)**2 \r\n std_deviation = math.sqrt(sqr_sum/count)\r\n return std_deviation", "def overall_standard_deviation(individual, test_data, truth_data, name=None):\r\n test_data = np.array(test_data)\r\n truth_data = np.array(truth_data)\r\n return np.std(test_data - truth_data)", "def std(self, ddof=0, weight_by_area=True):\n return numpy.sqrt(self.var(ddof=ddof, weight_by_area=weight_by_area))", "def stddev(x: pd.Series, d: int or float) -> pd.Series:\n if isinstance(d, float):\n d = math.floor(d)\n\n if isinstance(x.index, pd.MultiIndex):\n return x.groupby(level=1).rolling(d).std()\n else:\n return x.rolling(d).std()", "def get_stdev(self):\n if self._y.mean() == 0:\n raise ValueError('invalid value of mean of y, the ratio is not computable')\n\n var = numpy.mean(self._x ** 2) * numpy.mean(1.0 / self._y ** 2) - \\\n (numpy.mean(self._x) ** 2) * (numpy.mean(1.0 / self._y) ** 2)\n return numpy.sqrt(var)", "def GetStandardDeviation(vals_l, mean):\n\n\n sum_deviations_squared = 0\n\n for x in vals_l:\n sum_deviations_squared += (x - mean)**2\n\n return math.sqrt(float(sum_deviations_squared)/float(len(vals_l)))", "def zstddev(list) -> float:\n\n var = zvariance.zvariance(list)\n std_dev = math.sqrt(var)\n return std_dev", "def sd(x):\n x_mean = mean(x)\n return (\n sum((x_i - x_mean) ** 2 for x_i in x) / (len(x) - 1)\n ) ** 0.5", "def datasd(var):\n sd = 0\n getdata = \"sd = (data[i].\" + var + \" - dataavg(var)) ** 2\"\n for i in data:\n exec getdata\n return sqrt(sd)", "def Std(data):\n return data.std()", "def calculate_std_dev(temps, temp_average):\n\n variance_sum = 0\n for temp in temps:\n variance = (temp - temp_average) ** 2\n variance_sum += variance\n\n variance = variance_sum / len(temps)\n standard_deviation = variance ** 0.5\n\n return standard_deviation", "def standard_deviation(list):\n num_items = len(list)\n mean = sum(list) / num_items\n differences = [x - mean for x in list]\n sq_differences = [d ** 2 for d in differences]\n ssd = sum(sq_differences)\n\n\n variance = ssd / num_items\n\n sd = sqrt(variance)\n\n return sd", "def standard_deviation(scores):\n num_scores = len(scores)\n if num_scores == 0: return 0\n\n mean_score = mean(scores, False)\n sum_x2 = sum(score**2 for score in scores)\n std_dev_score = (sum_x2/num_scores - mean_score ** 2) ** 0.5\n return round(std_dev_score, 2)", "def std(self, data):\n ts_ = self.ts(data)\n if 'year' not in ts_.coords:\n return ts_\n return ts_.std('year')", "def deviation(xs):\n\ta = avg(xs)\n\treturn sqrt(sum([(x - a) ** 2 for x in xs]) / (len(xs) - 1))", "def std(self) -> \"Stream[float]\":\n return self.agg(lambda x: np.std(x, ddof=1)).astype(\"float\")", "def std(dfs):\n df_mean = mean(dfs)\n df_sq = sum([(df - df_mean)*(df - df_mean) for df in dfs])\n return df_sq / len(dfs)", "def pooled_standard_deviation(input_variances):\r\n # compute and return pooled standard deviation\r\n return sqrt(mean(square([float(i) for i in input_variances])))", "def std(self, axis=None, dtype=None, out=None, ddof=0, keepdims=False):\n ret = self.var(axis=axis, dtype=dtype, out=out, ddof=ddof, keepdims=keepdims)\n\n ret = np.sqrt(ret)\n return ret", "def pstdev(data):\n n = len(data)\n if n < 2:\n raise ValueError('variance requires at least two data points')\n ss = _ss(data)\n pvar = ss/n # the population variance\n return pvar**0.5", "def pstdev(data):\n n = len(data)\n if n < 2:\n raise ValueError('variance requires at least two data points')\n ss = _ss(data)\n pvar = ss/n # the population variance\n return pvar**0.5", "def pstdev(data):\n n = len(data)\n if n < 2:\n raise ValueError('variance requires at least two data points')\n ss = _ss(data)\n pvar = ss/n # the population variance\n return pvar**0.5", "def pstdev(data):\n n = len(data)\n if n < 2:\n raise ValueError('variance requires at least two data points')\n ss = _ss(data)\n pvar = ss/n # the population variance\n return pvar**0.5", "def get_sterr(cls, data: tuple or list, is_population=False) -> float:\n cls._data_validation(data)\n from math import sqrt\n return cls.get_stdev(data, is_population) / sqrt(cls.get_n(data))", "def sd(vals):", "def sd(self, dist=None):\n return np.sqrt(self.var(dist))", "def standard_deviation(lst):\n\tnum_items = len(lst)\n\tif num_items == 0:\n\t\treturn -1\n\tmean = sum(lst) / num_items\n\tdifferences = [x - mean for x in lst]\n\tsq_differences = [d ** 2 for d in differences]\n\tssd = sum(sq_differences)\n\treturn ssd", "def std(self, axis=0, **kwargs) -> \"Dataset\":\n return self.aggregate(axis=axis, func=np.std, **kwargs)", "def std(values, ave):\n return math.sqrt(float(sum((value-ave)**2 for value in values))/len(values))", "def getSTD(self, field):\n return np.std([self.fitnesses[i][field] for i in range(len(self.fitnesses))])", "def std(self):\n\n\t\tif not self._masked:\n\n\t\t\treturn self.data.std()\n\n\t\telse:\n\n\t\t\tif not hasattr(self,\"_full_mask\"):\n\t\t\t\tself.maskBoundaries()\n\n\t\t\treturn self.data[self._full_mask].std()", "def untruncatedStdDev(self):\n return self._distribution.untrStdDev()", "def deviation(values, val):\n\tm = mean(values)\n\tdev = abs(val-m)\n\tsd = standard_deviation(values)\n\treturn float(dev)/sd if sd!=0 else 0.0", "def daily_std(self, periods=252):\n return self.pct_change[min(periods, self._max_periods) * -1:].std()", "def std(mean, vals):\n return sqrt(sum([(i-mean)**2 for i in vals])/len(vals))", "def calc_sq_std(df):\n\n sq_std = df.dropna()\n\n sq_std = (df['std'].divide(df['mean']))**2\n\n sq_std.name = 'sq_std'\n\n sq_std = pd.DataFrame(sq_std)\n\n sq_std = sq_std.dropna()\n\n return sq_std", "def std_mean(self):\n std = self.std\n if self.ddof != 0:\n # ddof correction, (need copy of std)\n std = std * np.sqrt(\n (self.sum_weights - self.ddof) / self.sum_weights\n )\n\n return std / np.sqrt(self.sum_weights - 1)", "def calc_stdev(a, b, c, d, e):\n mean_of_num = (a + b + c + d + e) / 5\n return (((a - mean_of_num)**2 + (b - mean_of_num)**2 + (c - mean_of_num)**2\n + (d - mean_of_num)**2 + (e - mean_of_num)**2) / 5) ** 0.5", "def std(x, axis=None):\r\n try:\r\n sample_variance = var(x, axis=axis)\r\n except IndexError as e: # just to avoid breaking the old test code\r\n raise IndexOrValueError(e)\r\n return sqrt(sample_variance)", "def std(x):\n return sqrt(TinyStatistician.var(x))", "def std_ddof(self, ddof=0):\n return np.sqrt(self.var_ddof(ddof=ddof))", "def std_dev(list_num):\n\n # Calculate the mean of the numbers\n mean = sum(list_num)/len(list_num)\n\n # Initialise a variable to hold the sum of the squared distance to the mean\n sum_sqrd_dist = 0\n \n # Iterate over the numbers\n for num in list_num:\n # Subtract the mean from the number and square the result\n sqrd_dist = (num - mean)**2\n # Add the number to the sum of the squared distances \n sum_sqrd_dist = sum_sqrd_dist + sqrd_dist\n\n # return the square root of the sum of squared distances divided by the length of the list\n return (sum_sqrd_dist/len(list_num))**(1/2)", "def deviation_from_actual_value(array):\n if array.ndim == 3:\n deviations = np.zeros((array.shape[1],array.shape[2]))\n for pt in range(array.shape[1]):\n for param in range(array.shape[2]):\n dev = np.std(array[:,pt,param])\n deviations[pt,param] = dev\n return deviations\n\n elif array.ndim == 4:\n deviations = np.zeros((array.shape[1], array.shape[2], array.shape[3]))\n for pt in range(array.shape[1]):\n for param_ind1 in range(array.shape[2]):\n for param_ind2 in range(array.shape[3]):\n dev = np.std(array[:, pt, param_ind1, param_ind2])\n deviations[pt, param_ind1, param_ind2] = dev\n return deviations\n else:\n raise ValueError(\"Wrong num of dimensions\")", "def std(\n self,\n axis: Optional[Axis] = None,\n skipna: bool = True,\n ddof: int = 1,\n numeric_only: bool = None,\n ) -> Union[Scalar, \"Series\"]:\n if not isinstance(ddof, int):\n raise TypeError(\"ddof must be integer\")\n\n axis = validate_axis(axis)\n\n if numeric_only is None and axis == 0:\n numeric_only = True\n\n def std(psser: \"Series\") -> Column:\n spark_type = psser.spark.data_type\n spark_column = psser.spark.column\n if isinstance(spark_type, BooleanType):\n spark_column = spark_column.cast(LongType())\n elif not isinstance(spark_type, NumericType):\n raise TypeError(\n \"Could not convert {} ({}) to numeric\".format(\n spark_type_to_pandas_dtype(spark_type), spark_type.simpleString()\n )\n )\n return SF.stddev(spark_column, ddof)\n\n return self._reduce_for_stat_function(\n std, name=\"std\", axis=axis, numeric_only=numeric_only, ddof=ddof, skipna=skipna\n )", "def _std(listvalue,ddof=1):\n\tmean=_mean(listvalue)\n\ttemp=[math.pow(i-mean,2) for i in listvalue]\n\tres=math.sqrt(sum(temp)/(len(listvalue)-ddof))\n\treturn res", "def column_stdev(column_values, mean):\n\n try:\n stdev = math.sqrt(\n sum([(mean-x)**2 for x in column_values]) / len(column_values))\n except ZeroDivisionError:\n print(\"Column is empty, cannot perform calculation\",\n file=sys.stderr)\n sys.exit(1)\n\n return stdev", "def _std_err(self):\n return np.sqrt(np.sum(np.square(self._resids), axis=-2) / self._df_err)" ]
[ "0.84144866", "0.82050383", "0.8168596", "0.8070408", "0.7985524", "0.796877", "0.79068154", "0.78719604", "0.78662646", "0.7861871", "0.7861871", "0.7852831", "0.78476846", "0.7799383", "0.77913153", "0.7748432", "0.7714268", "0.7689626", "0.76288074", "0.76285523", "0.7615825", "0.7582248", "0.7578129", "0.7557169", "0.7543551", "0.7539858", "0.7517216", "0.750992", "0.74703586", "0.7422362", "0.74214125", "0.74156505", "0.7403858", "0.73959035", "0.73822427", "0.7372341", "0.7365596", "0.73138416", "0.7288106", "0.72864205", "0.726908", "0.72524065", "0.72524065", "0.7234071", "0.71926874", "0.71522623", "0.7141249", "0.7118303", "0.71072793", "0.70911837", "0.70797306", "0.7076035", "0.7053571", "0.70513916", "0.70476043", "0.70354897", "0.70343655", "0.7014594", "0.7008674", "0.69862235", "0.69780123", "0.6976451", "0.69705886", "0.69634616", "0.69564587", "0.69537336", "0.6948953", "0.6948248", "0.6943478", "0.6902328", "0.688336", "0.6876687", "0.6871561", "0.68711114", "0.68711114", "0.68711114", "0.68711114", "0.68586516", "0.6851526", "0.6840823", "0.683806", "0.6836443", "0.6835002", "0.6830379", "0.68273044", "0.6820433", "0.68122464", "0.67868775", "0.6785795", "0.67804366", "0.6758705", "0.67532164", "0.6738598", "0.67257565", "0.66689765", "0.666439", "0.66457784", "0.6642525", "0.6640356", "0.661708", "0.66109234" ]
0.0
-1
Return data based on train mode.
def data(self, train=True): data = self.train_data if train else self.val_data return data.data, data.targets
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_data_train(self):\n return self.get_data(self.file_train, self.batch)", "def get_train_data():\n # train set\n train = pd.read_csv(\"train.csv\")\n\n return train", "def getTrainingData(self):\n raise NotImplementedError", "def train_data(self):\n return self._train_data", "def get_train(self, data_file):\r\n return self.read_data(data_file)", "def getData(trainSize):\r\n return splitData([getReal(), getFake()], trainSize=trainSize)", "def get_train(self, preprocess=False):\n return self._dataset('train', self._directory, 'sharded_train_0shifted_affnist.tfrecords')", "def is_training(self):\n return self.mode == \"train\"", "def is_training(self):\n return self.mode == \"train\"", "def get_data():\n transform = Compose([paddle.vision.Resize(32),\n Normalize(mean=[127.5], std=[127.5], data_format='CHW'),\n paddle.vision.transforms.Transpose()])\n train_data = paddle.vision.datasets.Cifar10(mode='train', transform=transform)\n l = len(train_data)\n return paddle.io.random_split(train_data, [l // 2, l - l // 2])", "def get_training_data() -> GraphDataset:\n _load_data_if_needed()\n return training_data", "def get_data(self):\n if self.config['model'] == 'vggnet':\n if self.is_training:\n return self.data.shuffle(self.shuffle).batch(self.batch_size)\n elif self.is_testing:\n return self.data.batch(self.batch_size)\n elif not self.is_testing and not self.is_training:\n return self.data.batch(self.batch_size)\n else:\n raise NotImplementedError('In dataset.py: default input not specified for this model!')", "def get_train(self, data_file):\n return self.read_data(data_file)", "def train(self, data):\n pass", "def input_fn(self,features, labels, batch_size, shuffle_num, mode):\r\n dataset = tf.data.Dataset.from_tensor_slices((features, labels))\r\n if mode == tf.estimator.ModeKeys.TRAIN:\r\n dataset = dataset.shuffle(shuffle_num).batch(batch_size).repeat(self.epochs)\r\n else:\r\n dataset = dataset.batch(batch_size)\r\n iterator = dataset.make_one_shot_iterator()\r\n data, labels = iterator.get_next()\r\n return data, labels", "def train(self) -> tf.contrib.data.Dataset:\n return self.__train_dataset", "def train(self, force=False):\n return self._fetch_base_data(force)", "def train_data(self):\n\n return self.__train_data, self.__train_labels", "def set_mode_train(self):\n self._set_mode('train')\n return self", "def get_data(\n data_type,\n train_fraction=1,\n added_edge_fraction=0,\n feature_noise_ratio=0,\n **kwargs):\n def to_mask(idx, size):\n mask = torch.zeros(size).bool()\n mask[idx] = True\n return mask\n path = osp.join(osp.dirname(osp.realpath(\"__file__\")), '..', 'data', data_type)\n # Obtain the mode if given:\n data_type_split = data_type.split(\"-\")\n \n data_type_full = data_type\n data_type = data_type_split[0]\n mode = \"lcc\" if \"lcc\" in data_type_split else None\n boolean = True if \"bool\" in data_type_split else False\n split = \"rand\" if \"rand\" in data_type_split else None\n \n # Load data:\n info = {}\n if data_type in [\"Cora\", \"Pubmed\", \"citeseer\"]:\n dataset = Planetoid(path, data_type, transform=T.NormalizeFeatures())\n data = dataset[0]\n info[\"num_features\"] = dataset.num_features\n info[\"num_classes\"] = dataset.num_classes\n info['loss'] = 'softmax'\n else:\n raise Exception(\"data_type {} is not valid!\".format(data_type))\n\n # Process the dataset according to the mode given:\n if mode is not None:\n if mode == \"lcc\":\n data = get_data_lcc(dataset.data)\n else:\n raise\n\n if boolean:\n data.x = data.x.bool().float()\n \n if split == \"rand\":\n unlabeled_share = 0.8\n val_share = 0.1\n train_share = 1 - unlabeled_share - val_share\n\n split_train, split_val, split_unlabeled = train_val_test_split_tabular(np.arange(data.x.shape[0]),\n train_size=train_share,\n val_size=val_share,\n test_size=unlabeled_share,\n stratify=to_np_array(data.y),\n random_state=kwargs[\"seed\"] if \"seed\" in kwargs else None,\n )\n data.train_mask = to_mask(split_train, data.x.shape[0])\n data.val_mask = to_mask(split_val, data.x.shape[0])\n data.test_mask = to_mask(split_unlabeled, data.x.shape[0])\n\n # Reduce the number of training examples by randomly choosing some of the original training examples:\n if train_fraction != 1:\n try:\n train_mask_file = \"../attack_data/{}/train_mask_tr_{}_seed_{}.p\".format(data_type_full, train_fraction, kwargs[\"seed\"] % 10)\n new_train_mask = pickle.load(open(train_mask_file, \"rb\"))\n data.train_mask = torch.BoolTensor(new_train_mask).to(data.y.device)\n print(\"Load train_mask at {}\".format(train_mask_file))\n except:\n raise\n ids_chosen = []\n n_per_class = int(to_np_array(data.train_mask.sum()) * train_fraction / info[\"num_classes\"])\n train_ids = torch.where(data.train_mask)[0]\n for i in range(info[\"num_classes\"]):\n class_id_train = to_np_array(torch.where(((data.y == i) & data.train_mask))[0])\n ids_chosen = ids_chosen + np.random.choice(class_id_train, size=n_per_class, replace=False).tolist()\n new_train_mask = torch.zeros(data.train_mask.shape[0]).bool().to(data.y.device)\n new_train_mask[ids_chosen] = True\n data.train_mask = new_train_mask\n make_dir(\"../attack_data/{}/\".format(data_type_full))\n pickle.dump(to_np_array(new_train_mask), open(\"../attack_data/{}/train_mask_tr_{}_seed_{}.p\".format(data_type_full, train_fraction, kwargs[\"seed\"] % 10), \"wb\"))\n\n # Add random edges for untargeted attacks:\n if added_edge_fraction > 0:\n data = add_random_edge(data, added_edge_fraction=added_edge_fraction)\n elif added_edge_fraction < 0:\n data = remove_edge_random(data, remove_edge_fraction=-added_edge_fraction)\n\n # Perturb features for untargeted attacks:\n if feature_noise_ratio > 0:\n x_max_mean = data.x.max(1)[0].mean()\n data.x = data.x + torch.randn(data.x.shape) * x_max_mean * feature_noise_ratio\n\n # For adversarial attacks:\n data.data_type = data_type\n if \"attacked_nodes\" in kwargs:\n attack_path = osp.join(osp.dirname(osp.realpath(\"__file__\")), '..', 'attack_data', data_type_full) \n if not os.path.exists(attack_path):\n os.makedirs(attack_path)\n try:\n with open(os.path.join(attack_path, \"test-node.pkl\"), 'rb') as f:\n node_ids = pickle.load(f)\n info['node_ids'] = node_ids\n print(\"Load previous attacked node_ids saved in {}.\".format(attack_path))\n except:\n test_ids = to_np_array(torch.where(data.test_mask)[0])\n node_ids = get_list_elements(test_ids, kwargs['attacked_nodes'])\n with open(os.path.join(attack_path, \"test-node.pkl\"), 'wb') as f:\n pickle.dump(node_ids, f)\n info['node_ids'] = node_ids\n print(\"Save attacked node_ids into {}.\".format(attack_path))\n return data, info", "def training_data(loan_data):\n return loan_data[loan_data.status != \"LIVE\"]", "def train(self, training_data):\n pass", "def get_data(self):\n return self.X_train, self.X_test, self.y_train, self.y_test", "def get_training_data(self):\n train_data = None\n \n if self.left_data is not None:\n train_data = self.left_data\n \n if self.right_data is not None:\n if train_data is not None:\n train_data = train_data.join(self.right_data)\n else:\n train_data = self.right_data\n \n return train_data", "def get_dataset(self):\n if self.mode == \"test\":\n return OnlineQueryDataset(self.mode, self.df, self.tokenizer)\n else:\n return OnlineQueryDataset(self.mode, self.df_reindex, self.tokenizer)", "def getTrainSet(self):\r\n return self.fTrainData", "def data():\n\n run_type = 'standardised'\n sr = 48000\n train_perc = 0.9\n\n if sr == 48000:\n time_dimension = 282\n if sr == 44100:\n time_dimension = 259\n\n x_train, y_train, x_test, y_test = essential.compile_dataset(run_type, sr)\n\n # reshape for CNN input\n x_train = np.array([x.reshape((128, time_dimension, 1)) for x in x_train])\n x_test = np.array([x.reshape((128, time_dimension, 1)) for x in x_test])\n\n # encoded \n encoder = LabelEncoder()\n encoder.fit(y_train)\n encoder.fit(y_test)\n y_train = encoder.transform(y_train)\n y_test = encoder.transform(y_test)\n\n return x_train, y_train, x_test, y_test", "def get_data(train_path,\n test_path,\n tokenize='spacy',\n max_vocab_size=25000,\n train_valid_split=0.8,\n toy=False):\n train_data = pd.read_csv(train_path)\n test_data = pd.read_csv(test_path)\n\n if toy:\n train_data = train_data.head(100)\n test_data = test_data.head(100)\n\n train_data, test_data = pre_process_df(train_data, test_data)\n\n train_data_path = \"train_processed.csv\"\n test_data_path = \"test_processed.csv\"\n\n train_data.to_csv(train_data_path, header=False, index=False)\n test_data.to_csv(test_data_path, header=False, index=False)\n\n if tokenize == 'spacy':\n TEXT = data.Field(tokenize=tokenize)\n else:\n TEXT = data.Field()\n\n LABEL = data.LabelField(dtype=torch.float)\n train = data.TabularDataset(path=train_data_path,\n format=\"csv\",\n fields=[('text', TEXT),\n ('label', LABEL)])\n test = data.TabularDataset(path=test_data_path,\n format=\"csv\",\n fields=[('text', TEXT),\n ('label', LABEL)])\n\n os.remove(train_data_path)\n os.remove(test_data_path)\n\n train, valid = train.split(train_valid_split)\n\n TEXT.build_vocab(train, max_size=max_vocab_size)\n LABEL.build_vocab(train)\n\n return TEXT, LABEL, train, valid, test", "def get_data(csv_filename,train_min_class_count,test_split=0.2,val_split=0.2):\n args = get_arguments(csv_filename)\n args_train, args_test, args_val = args_train_test_val_split(args,train_min_class_count=train_min_class_count)\n\n X_train = get_train_data(args_train)\n X_val = get_train_data(args_val)\n\n return X_train, X_val, args_test", "def get_train_full_examples(self, data_dir):\n raise NotImplementedError()", "def train(self, trainData):\n pass", "def get_training_data():\n \n X = pd.read_csv('../data/train_values.csv').set_index('sequence_id')\n y = pd.read_csv('../data/train_labels.csv').set_index('sequence_id')\n return X, y", "def load_data(self,split='train'):\n raise NotImplementedError", "def load_data(train_data,limit=0, split=0.8):\n # Partition off part of the train data for evaluation\n #random.shuffle(train_data)\n train_data = train_data[-limit:]\n Y,X = train_data[\"type\"], train_data[\"posts\"]\n y = []\n for y_ in Y:\n if y_[0] == 'I' : INTROVERTED = True\n else: INTROVERTED = False\n if y_[1] == 'N' : INTUTIVE= True\n else: INTUTIVE= False\n if y_[2] == 'T' : THINKING= True\n else: THINKING= False\n if y_[3] == 'J' : JUDGEMENTAL= True\n else: JUDGEMENTAL= False\n y.append({'INTROVERTED':INTROVERTED,\"INTUTIVE\":INTUTIVE,\"THINKING\":THINKING,\"JUDGEMENTAL\":JUDGEMENTAL})\n\n X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=split, random_state=42)\n return (X_train, y_train), (X_test, y_test)", "def get_train(self, preprocess=False):\n return self._dataset(self._directory, 'images_background_small1', preprocess)", "def train(self,path,mode):\n if mode == \"porto\":\n self.prepare_data(path)\n else:\n self.prepare_sumo_data(path)\n self.poly_regression()", "def load_data(train=True):\n#test set\n start = time.clock()\n\n if train==False:\n dp = '/home/thomas/Desktop/OttoChallenge/test.csv'\n df = pd.read_csv(dp)\n X = df.values.astype(np.float32)[:,1:]\n return X\n#train set\n else:\n dp = '/home/thomas/Desktop/OttoChallenge/train.csv'\n df = pd.read_csv(dp)\n X = df[df.columns[:-1]].values.astype(np.float32)[:,1:]\n y = df.target\n y =y.apply(lambda X: int(X[-1])).values\n y = y.astype(np.int32)\n X, y = shuffle(X, y)\n #print(X.shape,y.shape)\n end = time.clock()\n print(end-start)\n\n return X,y", "def train_test_dataset(self, train_rate=0.8):\n point_date = int(len(self.y) * train_rate)\n y_to_train = self.y[:point_date]\n y_to_val = self.y[point_date:]\n predict_date = len(self.y) - len(y_to_train) # the number of data points for the test set\n date_val = self.y.index[point_date]\n return y_to_train, y_to_val, predict_date, date_val", "def train(self):\n return", "def getTrainInstance(self): #NOTE: Probably faster way of doing this than additional 'if' statement every learning iteration\r\n return [self.currentTrainState, self.currentTrainPhenotype] #Return unadulterated training data\r", "def train(self, mode: bool = True):\n T = super().train(mode=mode)\n if mode:\n self.graph_construction()\n return T", "def get_data(file_size):\n data_funcs_by_size = {'small': data.get_small, 'medium': data.get_medium, 'large': data.get_large}\n all_data = data_funcs_by_size[file_size]()\n train_data, test_data = data.split(all_data, 0.8)\n return train_data, test_data", "def data(self):\n (x_train, y_train), (_, _) = datasets.fashion_mnist.load_data()\n x_train = x_train.reshape((-1, 28, 28, 1))\n x_train, y_train = x_train.astype('float16') / 255.0, \\\n tf.keras.utils.to_categorical(y_train.astype('float16'), 10)\n (x_train, x_eval) = x_train[5000:], x_train[:5000]\n (y_train, y_eval) = y_train[5000:], y_train[:5000]\n train_data, eval_data = (x_train, y_train), (x_eval, y_eval)\n return train_data, eval_data", "def eval(self):\n self.train(mode=False)", "def get_train_data(self) -> Tuple[np.array, np.array, np.array]:\n train_data = []\n for season in self.__train_seasons:\n train_data.extend(self.__get_season_data(season, sys.maxsize, True))\n train_input = np.array([ExamDropEncoder.extract_features(sample, sys.maxsize) for sample in train_data])\n train_output = np.array([1.0 if get_is_mol(sample.selected_player) else 0.0 for sample in train_data])\n\n num_bins = self.get_num_bins(train_input, self.__max_splits)\n self.__discretizer = KBinsDiscretizer(n_bins = num_bins, encode = \"onehot-dense\",\n strategy = ExamDropExtractor.BIN_STRATEGY)\n train_input = self.__discretizer.fit_transform(train_input)\n train_input = self.__add_answered_on_feature(train_data, train_input)\n self.__anova_f_filter = SelectFpr(f_classif, alpha = self.__anova_f_significance)\n train_input = self.__anova_f_filter.fit_transform(train_input, train_output)\n self.__pca = PCA(n_components = self.__pca_explain)\n train_input = self.__pca.fit_transform(train_input)\n return train_input, train_output, self.__get_train_weights(train_data)", "def load_data(self,split='train'):\n return load_arrow_data(self.config,split)", "def load_data():\n # Load data\n X_train = pd.read_csv(\n \"data/trimmed_merged_no_track_id_or_session_id.csv\"\n ) # load data from csv\n\n drop_columns = [\"mode\"] # drop unimportant columns\n X_train.drop(labels=drop_columns, axis=1, inplace=True)\n\n y_train = X_train[\"skip\"]\n X_train.drop(labels=\"skip\", axis=1, inplace=True) # ground truth from train_data\n\n # split data 60% train, 20% validation, 20% test\n X_train, X_val, y_train, y_val = train_test_split(\n X_train, y_train, test_size=0.2, random_state=1\n )\n X_train, X_test, y_train, y_test = train_test_split(\n X_train, y_train, test_size=0.25, random_state=1\n )\n\n print(\"X_train:\", len(X_train))\n print(\"y_train:\", len(y_train))\n print(\"X_val:\", len(X_val))\n print(\"y_val:\", len(y_val))\n print(\"X_test:\", len(X_test))\n print(\"y_test:\", len(y_test))\n\n return X_train, y_train, X_val, y_val, X_test, y_test", "def set_trainable(self, train_mode):\r\n if train_mode == 'graph':\r\n tf.add_to_collection('train_var_list', tf.get_collection(self.graph_scope))\r\n tf.add_to_collection('train_var_list', tf.get_collection(self.feature_scope))\r\n return tf.get_collection('train_var_list')\r\n elif train_mode == 'downstream':\r\n return []", "def get_train_data(path='.', is_train = True):\n\n try:\n with open('data/linky.pickle','rb') as file:\n Z = pickle.load(file)\n except FileNotFoundError:\n print('reading raw data')\n Z = pd.read_excel(\"data/linky.xlsx\")\n try:\n with open('data/linky.pickle','wb') as file:\n pickle.dump(Z, file)\n print('raw data pickled')\n except:\n print(\"Couldn't pickle raw file\")\n \n \n if is_train:\n # we are in train data\n start_row = 0\n end_row = int(Z.shape[0] * RATIO_TRAIN_TEST)\n else:\n start_row = int(Z.shape[0] * RATIO_TRAIN_TEST) \n end_row = Z.shape[0] \n \n Z = Z.iloc[start_row:end_row]\n if FAST_MODE:\n Z = Z.iloc[:3000]\n \n # print('get raw data, from row {} to row {}'.format(start_row, end_row))\n Y = Z.drop(columns = 'Id_client')\n Y = Y.fillna(0).values # hack to consider all columns and all rows (we can also drops rows or columns with NaN, or use an Imputer)\n return Z, Y", "def _prepare(self, data, train=True):\n if data is None:\n return None\n\n if hasattr(data, \"to_tfdataset\"):\n return data.to_tfdataset(train=train)\n else:\n return data", "def get_data(self, img_size=None, split=False, model_class=None):\n X, y = self.import_data(img_size)\n X = self.preprocess_data(X, model_class)\n\n if split:\n X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=7)\n return X_train, X_test, y_train, y_test\n\n else:\n return X, y", "def _get_training_dataset(self):\n\n return tf.data.Dataset.zip((self.conditioned_dataset, self.dataset))", "def getDataset(self, train=True):\n \n device = torch.device(\"cuda:0\" if torch.cuda.is_available() else \"cpu\") \n \n if self.dataset == \"ELLIPSE\":\n a = np.array([[0,1.0],[1.0,2.0]]) \n b = a*0.5 \n myE = el.ellipse(device, 500, 100, a, b) \n if train == True:\n return myE.create_dataset(myE.examples)\n return myE.create_dataset(myE.valid) \n \n if self.dataset == \"SWISS\": \n myS = sw.SwissRoll(device, 500, 0.2) \n if train == True:\n return myS.create_dataset(myS.examples)\n return myS.create_dataset(myS.valid)\n \n \n #open file\n myFile = h5py.File(self.dataString, 'r', self.driver)\n \n if train == True: \n inputString = \"train_inputs\"\n labelsString = \"train_labels\"\n \n else:\n inputString = \"test_inputs\"\n labelsString = \"test_labels\"\n \n #get hdf5 datsets\n features = myFile.get(inputString)\n labels = myFile.get(labelsString)\n \n #convert to tensors\n features = torch.from_numpy(np.array(features))\n labels = torch.from_numpy(np.array(labels))\n \n #close file to ensure dataset is in memory\n myFile.close()\n \n #conver to correct datatypes\n features = features.float()\n \n if self.conv_sg == False:\n labels = labels.long() \n \n dataset = torch.utils.data.TensorDataset(features, labels)\n \n return dataset", "def get_training_data(self):\n labels = self.get_labels()\n\n print 'Loading training data from ', self.train_folder , '...'\n train_index = []\n #train_ans = []\n train_text = []\n cnt = 0\n\n for f in listdir(self.train_folder):\n file_path = join(self.train_folder, f)\n if isfile(file_path):\n cnt += 1\n if cnt % 10000 == 0:\n print 'finished:', cnt # line counter\n #train_index.append(f[:-4])\n self.train_ans.append(labels[f[:-4]])\n with open(file_path, 'rb') as f:\n train_text.append( f.read() )\n\n return train_text", "def train(self, mode: bool = True):\n if self.nn_module.training != mode:\n self.nn_module.train(mode)", "def train(self):\n pass", "def train(self):\n pass", "def train(self):\n pass", "def train(self):\n pass", "def train(self):\n pass", "def get_model_data(filepath):\n \n data = load_data(filepath)\n data = clean_data(data)\n return train_test_split_data(data)", "def get_input_fn_training(Xtrain_ul, Xtrain_l, Xtest, ytrain_ul, ytrain_l, ytest, batch_size, num_labeled):\n dataset = input_data.Data(Xtrain_ul,\n Xtrain_l,\n Xtest,\n ytrain_ul,\n ytrain_l,\n ytest,\n num_labeled, \n batch_size, \n shuffle=True)\n return dataset.next_batch()", "def train():\n pass", "def load_data(self,split='train'):\n raise ValueError('Please implement me!')", "def get_data():\n iris = datasets.load_iris()\n xall = np.asarray(iris[\"data\"], dtype=np.float64)\n yall = np.asarray(iris[\"target\"], dtype=np.float64)\n xall = np.vstack([xall, (7, 2.0, 4.5, 1)])\n yall = np.append(yall, n_classes)\n X, Xval, y, yval = train_test_split(\n xall, yall, test_size=0.2, shuffle=True, random_state=12345\n )\n y = tf.one_hot(y, n_classes)\n yval = tf.one_hot(yval, n_classes)\n return X, y, Xval, yval", "def test_intent_classifier_get_training_samples(self):\n pass", "def _read_examples(self, split: base.Split) -> tf.data.Dataset:\n if split == base.Split.TEST:\n return tf.data.Dataset.range(self._num_test_examples)\n if split == base.Split.TRAIN:\n return tf.data.Dataset.range(self._num_train_examples)\n if split == base.Split.VAL:\n return tf.data.Dataset.range(self._num_validation_examples)", "def get_dataset(params, run_mode=\"train\"):\n tokenizer = get_tokenizer(params)\n # Use run_mode to decide input_folder, MR cols, MR max lens.\n msg_col, rsp_col = params.msg_col, params.rsp_col\n max_msg_len, max_rsp_len = params.max_msg_len, params.max_rsp_len\n if run_mode == \"train\":\n input_folder = params.train_input_dir\n elif run_mode == \"valid\":\n input_folder = params.valid_input_dir\n elif run_mode == \"gmr\":\n input_folder = params.gmr_input_dir\n if params.truncate is False:\n max_msg_len, max_rsp_len = np.inf, np.inf\n elif run_mode == \"rsp_set\":\n # TODO: What's the purpose of this mode?\n input_folder = params.rsp_input_dir\n msg_col, rsp_col = 0, params.rsp_text_col\n # TODO: These values should be global parameters instead of being hard coded like this\n # TODO: Why not just set these values to np.inf like above?\n if params.truncate is False:\n max_msg_len, max_rsp_len = 1000, 1000\n elif run_mode == \"eval\":\n input_folder = params.eval_input_dir\n elif run_mode == \"export\":\n # TODO: We should remove this mode from this function since it does nothing anyways\n return None, tokenizer\n else:\n raise ValueError(\"SystemLog: Invalid run mode %s.\" % run_mode)\n\n # We consider each file to be in a separate pytorch dataset. We then use ConcatDataset to combine individual datasets\n datasets = []\n total_file_processed = 0\n # This sorting of file is done to make sure that we get the same file order each time\n for file_idx, filename in enumerate(sorted(os.listdir(input_folder))):\n filepath = os.path.join(input_folder, filename)\n datasets.append(MRDataset(filepath, tokenizer, msg_col=msg_col,\n rsp_col=rsp_col, max_msg_len=max_msg_len,\n max_rsp_len=max_rsp_len, run_mode=run_mode, architecture=params.architecture, truncate=params.truncate))\n total_file_processed += 1\n if file_idx % 1000 == 0:\n print(\"SystemLog: %d files processed \" % file_idx)\n print(\"SystemLog: %d files processed in total.\" % total_file_processed)\n mr_dataset = ConcatDataset(datasets)\n\n return mr_dataset, tokenizer", "def get_train_examples(self):\n raise NotImplementedError()", "def get_train_examples(self, data_dir):\r\n raise NotImplementedError()", "def load_training_data(self) -> Tuple[List[np.ndarray], np.ndarray]:\n return self._load_set(config.TRAIN_DIR, True)", "def load_features_mode(feature_path, mode='test',\n num_workers=10, batch_size=128):\n feature_dataset = load_features(os.path.join(feature_path, f'features_{mode}'))\n feature_loader = ch.utils.data.DataLoader(feature_dataset, \n num_workers=num_workers,\n batch_size=batch_size, \n shuffle=False)\n\n feature_metadata = ch.load(os.path.join(feature_path, f'metadata_train.pth'))\n feature_mean, feature_std = feature_metadata['X']['mean'], feature_metadata['X']['std']\n \n\n features = []\n\n for _, (feature, _) in tqdm(enumerate(feature_loader), total=len(feature_loader)):\n features.append(feature)\n \n features = ch.cat(features).numpy()\n return features, feature_mean, feature_std", "def get_data_set(train=True):\n\n # 1\n train_or_test = \"train\" if train == True else \"test\"\n data_path = os.path.join(data_dir, \"aclImdb\",train_or_test)\n\n # 2\n pos_glob_pattern = os.path.join(data_path, \"pos\", \"*.txt\")\n neg_glob_pattern = os.path.join(data_path, \"neg\", \"*.txt\")\n pos_file_path_seq = glob.glob(pos_glob_pattern)\n neg_file_path_seq = glob.glob(neg_glob_pattern)\n\n # 3\n pos_dataset = [text_to_one_line(path) for path in pos_file_path_seq]\n neg_dataset = [text_to_one_line(path) for path in neg_file_path_seq]\n x = pos_dataset + neg_dataset\n y = [1.0] * len(pos_dataset) + [0.0] * len(neg_dataset)\n\n return x, y", "def read_ct_data(train_start, train_count, eval_start, eval_count):\n data = pd.read_csv('/opt/train.csv')\n\n # Dropping the id column\n data.drop(['ID_code'], axis=1, inplace=True)\n\n data = data.values\n return (data[train_start:train_start + train_count],\n data[eval_start:eval_start + eval_count])", "def __test_and_train(self):\n f = open(\"all_data_info.csv\")\n reader = csv.DictReader(f, delimiter=\",\")\n data = []\n for line in reader:\n if line['artist_group'] == \"train_and_test\" and line[\"in_train\"] == \"False\":\n # the img's artist is in training set\n # but the img is in test set only\n data.append((line['artist'], line['new_filename']))\n\n return data", "def model_data():\n x_train, y_train, x_val, y_val, x_test, y_test = read_data(\"src/tests/dataclassificationmodel/ferPlus_processed.pbz2\", False)\n return x_train, y_train, x_val, y_val, x_test, y_test", "def load_dataset_train():\n df_train = load_csv_file(\"31_train.csv\")\n df_train_target = load_csv_file(\"31_target_train.csv\")\n\n return df_train.values, df_train_target.values", "def test(self) -> tf.contrib.data.Dataset:\n return self.__test_dataset", "def train(self):\n raise NotImplementedError", "def load(cfg, train_mode, split, shot, query,\n bs, test_bs, num_workers, pin_memory,\n ret_name=False):\n if train_mode == \"train\":\n dataset = COCOTrain(cfg, split, shot, query, ret_name=ret_name)\n data_loader = DataLoader(dataset,\n batch_size=bs,\n shuffle=True,\n num_workers=num_workers,\n pin_memory=pin_memory,\n drop_last=False)\n else:\n dataset = COCOTest(cfg, split, shot, query, ret_name=ret_name)\n data_loader = DataLoader(dataset,\n batch_size=test_bs, # Large batch for evaluation\n shuffle=False,\n num_workers=num_workers,\n pin_memory=pin_memory,\n drop_last=False)\n num_classes = 80\n return dataset, data_loader, num_classes", "def dataloader(self):\n\n # load / split data\n train_data = self.data.get_train_data()\n if self.args.use_dev:\n train_data, dev_data = self.data.split_data(train_data)\n test_data = self.data.get_test_data()\n\n #print(train_data[0])\n #print(dev_data[0])\n #print(test_data[0])\n\n # build dataset\n train_dataset = self.loader.build_dataset(\n train_data, \n self.args.train_max_seq_len)\n train_loader = self.loader.build_dataloader(\n train_dataset, 'train')\n\n test_dataset = self.loader.build_dataset(\n test_data,\n self.args.eval_max_seq_len)\n test_loader = self.loader.build_dataloader(\n test_dataset, 'test')\n\n if self.args.use_dev:\n dev_dataset = self.loader.build_dataset(\n dev_data,\n self.args.eval_max_seq_len)\n dev_loader = self.loader.build_dataloader(\n dev_dataset, 'dev')\n return train_loader, dev_loader, test_loader\n else:\n return train_loader, test_loader", "def get_train_and_test_data(data_table: pd.DataFrame):\n\n y_label = get_label_data(data_table)\n x_features = get_features_data(data_table)\n\n # Convert categorical data to one-hot\n x_dummy_features = pd.get_dummies(x_features)\n\n return train_test_split(x_dummy_features, y_label, random_state=0)", "def get_test(self, preprocess=False):\n return self._dataset('test', self._directory, 'sharded_test_0shifted_affnist.tfrecords')", "def get_data(result_id=-1, train=True, root_path='Data/'):\n if train:\n x = get_dataset(get_file_path('x_train_gr_smpl.csv', path=root_path))\n else:\n x = get_dataset(get_file_path('x_test_gr_smpl.csv', path=root_path))\n filePicker = result_file_selector(result_id, train)\n y = get_dataset(get_file_path(filePicker, path=root_path))\n y.columns = ['y']\n return x, y", "def train(self, batch_training=False):\n raise NotImplementedError", "def training_data():\n global _MEAN # pylint: disable=global-statement\n _np.random.seed(1)\n view = _skdc10.view.OfficialImageClassificationTask()\n permutation = _np.random.permutation(range(50000))\n if _MEAN is None:\n _MEAN = view.train.x.reshape((50000 * 32 * 32, 3)).mean(axis=0)\n return ((view.train.x[:50000, :][permutation, :] - _MEAN).\n transpose((0, 3, 1, 2)).astype('float32'),\n view.train.y[:50000][permutation].reshape((50000, 1)).astype('float32'))", "def _load_training_data(base_dir):\n train_data = pd.read_csv(os.path.join(base_dir, \"train_vale.csv\")).adjclose.values\n return _data_transformation(train_data)", "def train(self, training_data, cfg, **kwargs):\n pass", "def get_dataloader(self, mode, label_mode, batch_size):\n cached_features_file = self._feature_file(mode, label_mode)\n logger.info('Loading features from cached file %s', cached_features_file)\n features = torch.load(cached_features_file)\n\n all_input_ids = torch.tensor([f.input_ids for f in features], dtype=torch.long)\n all_input_mask = torch.tensor([f.input_mask for f in features], dtype=torch.long)\n all_segment_ids = torch.tensor([f.segment_ids for f in features], dtype=torch.long)\n all_label_ids = torch.tensor([f.label_ids for f in features], dtype=torch.long)\n all_emph_probs = torch.tensor([f.emph_probs for f in features], dtype=torch.float)\n\n dataset = TensorDataset(all_input_ids, all_input_mask, all_segment_ids, all_label_ids, all_emph_probs)\n\n if mode == 'train':\n sampler = RandomSampler(dataset)\n else:\n sampler = SequentialSampler(dataset)\n\n dataloader = DataLoader(dataset=dataset, batch_size=batch_size, sampler=sampler)\n return dataloader", "def get_train_examples(self, data_dir):\n raise NotImplementedError()", "def get_train_examples(self, data_dir):\n raise NotImplementedError()", "def _load_training_data(self):\n self._save_training_data()", "def training_data(kind, depth = 5):\n\n if kind == 'unigram':\n return UnigramTrainingData.load(UNIGRAM_DIR + str(depth))\n\n if kind == 'rnn':\n return RNNTrainingData.load(RNN_DIR + str(depth))", "def get_test(self, even=None):\n\n self.get_train(even)", "def load_data(self, dataset='cifar10', label_mode='fine'):\n if dataset == 'cifar10':\n if self.root:\n x_train, y_train = self.load_from_path(\n [os.path.join(self.root, f'data_batch_{i}') for i in range(1, 6)])\n x_test, y_test = self.load_from_path(\n [os.path.join(self.root, 'test_batch')])\n x_test = x_test.astype(x_train.dtype)\n y_test = y_test.astype(y_train.dtype)\n return (x_train, y_train), (x_test, y_test)\n else:\n return tf.keras.datasets.cifar10.load_data()\n elif dataset in ['cifar20', 'cifar100']:\n if self.root:\n x_train, y_train = self.load_from_path(\n [os.path.join(self.root, 'train')], label_key=label_mode)\n x_test, y_test = self.load_from_path([os.path.join(self.root, 'test')])\n x_test = x_test.astype(x_train.dtype)\n y_test = y_test.astype(y_train.dtype)\n return (x_train, y_train), (x_test, y_test)\n else:\n return tf.keras.datasets.cifar100.load_data(label_mode=label_mode)", "def train(self):\n return self.with_transforms(\"train\")", "def test_text_classifier_get_training_samples(self):\n pass", "def load_data():\n print(\"PARSING TRAIN\")\n ys_train, x_train, ids_train = load_pickle_data(\"ys_train\"), load_pickle_data(\"x_train\"), load_pickle_data(\n \"ids_train\")\n if ys_train is None or x_train is None or ids_train is None:\n ys_train, x_train, ids_train = load_csv_data(\"{}/train.csv\".format(DATA_DIR))\n dump_pickle_data(ys_train, \"ys_train\")\n dump_pickle_data(x_train, \"x_train\")\n dump_pickle_data(ids_train, \"ids_train\")\n\n print(\"PARSING TEST\")\n x_test, ids_test = load_pickle_data(\"x_test\"), load_pickle_data(\"ids_test\")\n if x_test is None or ids_test is None:\n _, x_test, ids_test = load_csv_data(\"{}/test.csv\".format(DATA_DIR))\n dump_pickle_data(x_test, \"x_test\")\n dump_pickle_data(ids_test, \"ids_test\")\n\n return ys_train, x_train, ids_train, x_test, ids_test", "def train(self):\n self.mode = \"train\"\n self.online_net.train()", "def train(self):\n self.mode = \"train\"\n self.online_net.train()" ]
[ "0.71724296", "0.70787793", "0.7011355", "0.6915143", "0.67346495", "0.6732012", "0.66541547", "0.66510093", "0.66510093", "0.66229326", "0.6598379", "0.658925", "0.6583259", "0.65264785", "0.6473328", "0.645411", "0.6418035", "0.6416727", "0.6339897", "0.6322232", "0.6320401", "0.63181967", "0.62793654", "0.6268495", "0.6261093", "0.62438995", "0.6232759", "0.6232532", "0.6220166", "0.6191163", "0.61842275", "0.618006", "0.61747277", "0.61619323", "0.61570835", "0.6150088", "0.61465216", "0.6139409", "0.6126033", "0.611728", "0.6109946", "0.6086746", "0.60845244", "0.6083093", "0.60595924", "0.60406125", "0.60329515", "0.6030822", "0.6026278", "0.6026171", "0.6015131", "0.6010001", "0.60031396", "0.59891623", "0.5986167", "0.5972855", "0.5972855", "0.5972855", "0.5972855", "0.5972855", "0.5972835", "0.596707", "0.59652793", "0.59576976", "0.59483236", "0.59475315", "0.5935595", "0.5933211", "0.59310234", "0.59280133", "0.5925724", "0.5921319", "0.59197694", "0.591676", "0.59060615", "0.5902132", "0.5899176", "0.5895174", "0.5890626", "0.5888368", "0.5888153", "0.5888054", "0.588449", "0.588172", "0.58725446", "0.58690304", "0.58656824", "0.58632344", "0.5838398", "0.5837458", "0.5837458", "0.5835732", "0.5829535", "0.5826957", "0.5826603", "0.5823503", "0.58210945", "0.5818225", "0.58040863", "0.58040863" ]
0.6745248
4
Unnormalize a given image.
def unnormalize(self, image, transpose=False): return unnormalize(image, self.mean, self.std, transpose)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def normalise(image):", "def normalize_image(img):\n arr = np.array(img)\n new_img = Image.fromarray(normalize(arr).astype('uint8'),'L')\n return new_img", "def reverse_normalize(image):\n\n reverse = transforms.Normalize(mean=[-0.485 / 0.229, -0.456 / 0.224, -0.406 / 0.255],\n std=[1 / 0.229, 1 / 0.224, 1 / 0.255])\n return reverse(image)", "def normalization(image):\n return (image - np.min(image)) / (np.max(image) - np.min(image))", "def normalize(image):\r\n return image / 127.5 - 1.", "def normalize(image):\n return image / 127.5 - 1.", "def deprocess_image(img):\n # normalize tensor: center on 0., ensure std is 0.1\n img -= img.mean()\n img /= (img.std() + 1e-5)\n img *= 0.1\n\n # clip to [0, 1]\n img += 0.5\n img = np.clip(img, 0, 1)\n\n # convert to RGB array\n img *= 255\n\n # TF image format if channels = (1 or 3) towards the last rank.\n if img.shape[-1] != 3 and img.shape[-1] != 1:\n img = img.transpose((1, 2, 0))\n\n img = np.clip(img, 0, 255).astype('uint8')\n return img", "def normalize_image(image):\n return image / 255.", "def _normalize(image):\n return tf.multiply(tf.subtract(image, 0.5), 2.0)", "def normalize_image(image):\n image = image.astype(np.float32) / 255.0\n\n return image", "def undo_normalise(img):\n\treturn img + CONFIG.MEAN_PIXEL", "def normalize(img):\n # TODO: implement this function.\n min_img = min([min(i) for i in img])\n max_img = max([max(i) for i in img])\n\n for i in range(len(img)):\n \tfor j in range(len(img[0])):\n \t\timg[i][j] = ((img[i][j] - min_img) / (max_img - min_img))\n #raise NotImplementedError\n return img", "def _normalize_image(self, img: np.ndarray) -> np.ndarray:\n i2 = img.astype(float) - self.bg\n i2 /= i2.max()\n return i2", "def normalize(img):\r\n return ((img / 255.0) - 0.5) / 0.5", "def normalize(img):\n img = np.clip(img, 0, 255).astype(np.uint8)\n return img / 255", "def normalize(image):\n min = np.min(image)\n max = np.max(image)\n normalImg = 255*(image - min) / (max - min)\n return normalImg", "def normalize_data(img):\n nor = np.linalg.norm(img, axis = 1)\n nor = np.reshape(nor, (len(img), 1))\n img = np.divide(img, nor)\n return img", "def normalize_img(img: np.ndarray, bit_depth: int) -> np.ndarray:\n return img / ((1 << bit_depth) - 1)", "def reshape_normalise(img):\n\t# The image shape is expected to match the input of VGG19\n\timg = np.resize(img, (1, CONFIG.IMAGE_HEIGHT, CONFIG.IMAGE_WIDTH, CONFIG.COLOR_CHANNELS)).astype('float32')\n\timg -= CONFIG.MEAN_PIXEL\n\treturn img", "def normalize(self):\n self.image = rescale_intensity(self.image, out_range=(0, 255))", "def normalization_func(img):\n vmin, vmax = img.min(), img.max()\n if vmin != vmax:\n im = (img - vmin) / (vmax - vmin)\n else:\n im = np.ones(img.shape)\n return im", "def normalize(img):\n norm = cvCreateImage(cvSize(img.width, img.height), IPL_DEPTH_32F, 1)\n cvCopy(img, norm)\n cvNormalize(norm, norm, 1, 0, CV_MINMAX)\n norm_u = cvCreateImage(cvSize(img.width, img.height), IPL_DEPTH_8U, 1)\n cvConvertScale(norm, norm_u, 255)\n return norm_u", "def normalize_image(img):\n min_, max_ = float(np.min(img)), float(np.max(img))\n return (img - min_) / (max_ - min_)", "def unnormalize(images, mean, std):\n \n unnorm_images = images * std + mean\n \n \n return unnorm_images", "def turn_intensity_normalization_off(self):\n self.intensity_normalize_image = False", "def normalize_image(im):\n pixels = im.flatten()\n\n # scale pixels to range 0 to 1\n normalized_im = (pixels - np.min(pixels)) / (np.max(pixels) - np.min(pixels))\n\n # scale the pixels by 255\n normalized_im = (normalized_im.reshape(im.shape) * 255).astype(np.uint8)\n\n return normalized_im", "def normalize_image(img):\n\n # Load image and convert to grayscale\n img = rgb2gray(img)\n\n # Normalize values, range 0 to 255\n img = (img - img.min()) / (img.max() - img.min())\n img *= 255\n\n # Make int values\n img = img.astype(int)\n\n # Return new image\n return img", "def reshape_and_normalize_image(image):\n # Reshape image to mach expected input of VGG16\n image = np.reshape(image, ((1,) + image.shape))\n # Substract the mean to match the expected input of VGG16\n image = image - CONFIG.MEANS\n \n return image", "def normalize(img):\n img = img.astype(np.float32)\n img -= img.min()\n img /= img.max()\n img *= 255\n img = img.astype(np.uint8)\n\n return img", "def normalize_images(image_sitk):\n\n max = 400\n min = -1000\n\n image_np = sitk.GetArrayFromImage(image_sitk)\n\n # Normalization\n image_np = (image_np - min)/(max - min)\n image_np[image_np > 1] = 1\n image_np[image_np < 0] = 0\n\n # Convert back to SITK\n out_image_sitk = sitk.GetImageFromArray(image_np)\n out_image_sitk.CopyInformation(image_sitk)\n\n return out_image_sitk", "def applyMorphologicalCleaning(self, image):", "def img_normalize(image, label):\n mean, std = ds_stats\n image -= tf.constant(mean, shape=[1, 1, num_channels], dtype=image.dtype)\n image /= tf.constant(std, shape=[1, 1, num_channels], dtype=image.dtype)\n return image, label", "def normalize(image, label):\n image -= settings.DATASET_MEAN\n image /= settings.DATASET_STD\n\n return image, label", "def normalise_image(image, use_torch=True):\n if use_torch:\n image = torch.abs(image)\n else:\n image = np.abs(image)\n if (image.max() - image.min()) < 1e-5:\n return image - image.min() + 1e-5\n else:\n return (image - image.min()) / (image.max() - image.min())", "def imgNormalize(img): \n constant = np.sum(sitk.GetArrayFromImage(img))*np.prod(img.GetSpacing())\n return img/constant", "def normalize(self, image, transpose=False, data_type=None):\n return normalize(image, self.mean, self.std, transpose)", "def normalize(img):\n\n def normalize_pixel(x):\n return (x - 128) / 128\n\n normalize_vector = np.vectorize(normalize_pixel)\n return normalize_vector(img)", "def normalize(im: np.ndarray) -> np.ndarray:\n im = im.astype(np.float32)\n return (im - im.min()) / (im.max() - im.min())", "def normlized_image(image):\n if image.dtype != np.float32:\n image = image.astype(np.float32)\n if image.max() > 1:\n image /= 255\n\n return image", "def unmold_image(normalized_images, config=None):\n img = normalized_images[:, :, :3]\n mask = normalized_images[:, :, 3] > 0.5\n mask = np.expand_dims(mask, -1).astype(np.bool)\n if np.min(img) < 0:\n img = img*127.5 +127.5\n else:\n img = img * 255\n return img.astype(np.uint8), mask", "def normalize_image(self, image, colorspace=None):\n float_image = image.astype(numpy.float32)\n if float_image.__array_interface__[\n \"data\"][0] == image.__array_interface__[\"data\"][0]:\n float_image = float_image.copy()\n float_image -= float_image.min()\n max_value = float_image.max()\n if max_value:\n max_value /= 255.0\n float_image /= max_value\n else:\n float_image[:] = 127.5\n normalized_image = float_image.astype(numpy.uint8)\n if (colorspace != \"RGB\" and len(normalized_image.shape) == 3\n and normalized_image.shape[2] == 3):\n import cv2\n normalized_image = cv2.cvtColor(\n normalized_image, getattr(cv2, \"COLOR_\" + colorspace + \"2RGB\"))\n return normalized_image", "def _postprocess(img):\n img = _scale_to_zero_one(img)\n img = img.reshape(1, -1) # to avoid a scikit-learn deprecation warning later\n return img", "def unmold_image(normalized_images, config):\n return (normalized_images + config.MEAN_PIXEL).astype(np.uint8)", "def normalize(image, xbar, sigma):\n image = image.transpose(2, 0, 1) # Switch to channel-first\n mean, std = np.array(xbar), np.array(sigma)\n image = (image - mean[:, None, None]) / std[:, None, None]\n return image.transpose(1, 2, 0)", "def preprocess_img(img):\n # Make a copy of img as array\n img = np.array(img)\n\n # Convert into tensor\n img = torch.Tensor(img).permute(2, 0, 1) / 255.0\n\n # Normalize\n for t, m, s in zip(img, TORCH_IMG_MEAN, TORCH_IMG_STD):\n t.sub_(m).div_(s)\n\n return img", "def normalize(image):\n mean = image.mean()\n stddev = image.std()\n adjusted_stddev = max(stddev, 1.0/math.sqrt(image.size))\n standardized_image = (image - mean) / adjusted_stddev\n \n return standardized_image", "def normalize(image):\n mean = image.mean()\n stddev = image.std()\n adjusted_stddev = max(stddev, 1.0/math.sqrt(image.size))\n standardized_image = (image - mean) / adjusted_stddev\n \n return standardized_image", "def normalize(image):\n image = image.astype(np.float32)\n mean = np.mean(image)\n std = np.std(image)\n if std > 0:\n ret = (image - mean) / std\n else:\n ret = image * 0.\n return ret", "def normalize_image(image, img_size):\n res = tf.image.resize_image_with_crop_or_pad(image, target_height=img_size, target_width=img_size)\n res = (tf.to_float(res) - 127.5) / 127.5\n res = tf.reshape(res, (img_size, img_size, 1))\n res.shape.assert_is_compatible_with([img_size, img_size, 1])\n return res", "def deprocess_image(self, processed_img):\n img_to_unprocess = processed_img.copy()\n if len(img_to_unprocess.shape) == 4:\n img_to_unprocess = np.squeeze(img_to_unprocess, 0)\n img_to_unprocess[:, :, 0] += 103.939\n img_to_unprocess[:, :, 1] += 116.779\n img_to_unprocess[:, :, 2] += 123.68\n img_to_unprocess = img_to_unprocess[:, :, ::-1]\n\n img_to_unprocess = np.clip(img_to_unprocess, 0, 255).astype('uint8')\n return img_to_unprocess", "def unscale(image):\n return tf.cast(tf.math.multiply(image, 255), tf.uint8)", "def normalize_img(img):\n channel_mean = img.mean(axis=0, keepdims=True).mean(axis=1, keepdims=True)\n channel_std = img.std(axis=0, keepdims=True).mean(axis=1, keepdims=True)\n return (img - channel_mean) / channel_std", "def normalize_image(self):\n # The image normalization is identical to Cloud TPU ResNet.\n self._image = tf.image.convert_image_dtype(self._image, dtype=tf.float32)\n offset = tf.constant(DATASET_MEAN)\n offset = tf.expand_dims(offset, axis=0)\n offset = tf.expand_dims(offset, axis=0)\n self._image -= offset\n\n scale = tf.constant(DATASET_VAR)\n scale = tf.expand_dims(scale, axis=0)\n scale = tf.expand_dims(scale, axis=0)\n self._image /= scale", "def normalize(img):\r\n min = img.min()\r\n max = img.max()\r\n x = 2.0 * (img - min) / (max - min) - 1.0\r\n return x", "def deprocess_im(im, dtype=None):\n im = np.array(im)\n return ((255.0 * (im + 1.0))/2.0).astype(dtype)", "def image_normalize(image, image_dims):\r\n if image.dtype != 'float32':\r\n image = image.astype(dtype=np.float32)\r\n if np.size(image_dims) == 2:\r\n maxpx = np.max(image[:, :])\r\n if maxpx == float(0):\r\n maxpx = 1e-12 # fuzz factor\r\n minpx = np.min(image[:, :])\r\n image[:, :] = (image[:, :] - minpx) / (maxpx - minpx)\r\n else:\r\n for i in range(image_dims[2]): # find max/min for each channel\r\n maxpx = np.max(image[:, :, i])\r\n if maxpx == float(0):\r\n maxpx = 1e-12 # fuzz factor\r\n minpx = np.min(image[:, :, i])\r\n image[:, :, i] = (image[:, :, i] - minpx) / (maxpx - minpx)\r\n return image", "def normalize_array(image_array):\n\n array = image_array.astype(np.float)\n array /= 255.0\n return array", "def normalization(img):\n max_val = img.max()\n min_val = img.min()\n\n return ((img-min_val)*255)/(max_val-min_val)", "def preprocess(image):\n return image - MEAN_PIXEL", "def __normalize(input, type, a, b):\n return cv2.normalize(input, None, a, b, type)", "def denormalize(img, means, stds, resize_to_original=False):\n\n img = np.moveaxis(img, 0, 2)\n img = img*stds + means\n img = np.clip(img, 0, 255).astype('uint8')\n\n if resize_to_original:\n # revert def preprocess_image()\n img = img[:,(img_w//4): (img_w - img_w//4),:]\n img = cv2.copyMakeBorder( img, img.shape[0], 0,0,0, cv2.BORDER_CONSTANT) #, borderType)\n img = cv2.resize(img, (img_orig_w, img_orig_h))\n \n return img", "def preprocess(img):\n \n scaler=StandardScaler() ## scaler object to perform preprocessing\n img=scaler.fit_transform(img) ## zero-center and normalize\n \n return img", "def add_image_normalization(self):\n self.methods.append(self._normalize_image)\n self.args.append(None)", "def _normalize(images):\n images -= images.mean(axis=0, keepdims=True)\n images /= np.maximum(images.std(axis=0, keepdims=True), 3e-1)", "def _normalize(images):\n images -= images.mean(axis=0, keepdims=True)\n images /= np.maximum(images.std(axis=0, keepdims=True), 3e-1)", "def normalize(\n self,\n image: np.ndarray,\n data_format: Optional[Union[str, ChannelDimension]] = None,\n input_data_format: Optional[Union[str, ChannelDimension]] = None,\n ) -> np.ndarray:\n image = rescale(image=image, scale=1 / 127.5, data_format=data_format, input_data_format=input_data_format)\n image = image - 1\n return image", "def normalize((image)):\n new = np.zeros((7,7))\n index = 0\n for i in [0, 4, 7, 10, 14, 17, 21]:\n for j in [0, 4, 7, 10, 14, 17, 21]:\n new[index / 7][index % 7] += float(np.sum(image[i:(i+6),j:(j+6)]))/49\n index += 1\n return new", "def _normalize(self, x):\n # TODO: imagenet normalization\n\n return x", "def turn_intensity_normalization_on(self):\n self.intensity_normalize_image = True", "def unpropagateImage(self, dryrun):\n pass", "def imnormalize_tensor(self, img, mean, std, to_rgb=True):\n mean = np.float32(mean.reshape(1, -1))\n stdinv = 1 / np.float32(std.reshape(1, -1))\n if to_rgb:\n img = img[:, :, [2, 1, 0]]\n img = torch.sub(img, torch.tensor(mean).cuda())\n img = torch.mul(img, torch.tensor(stdinv).cuda())\n return img", "def unprocess(image):\n return image + MEAN_PIXEL", "def _preprocessing(self, input_image):\n if self.resize:\n input_image = self._np_resize_image(input_image,\n self.input_size,\n dtype='int')\n image = self._np_transpose(input_image)\n image = self._np_normalize(image)\n image = self._np_flip_n_cat(image)\n return image", "def normalize(batch_img: np.ndarray) -> np.ndarray:\n batch_img = batch_img.astype('float32')\n return batch_img / 127.5 - 1", "def variance_normalize(self):\n self.img = self.img / np.sqrt(np.sum(self.img ** 2))", "def undistort(self, image):\n dst = cv2.undistort(image, self.mtx, self.dist_coeff, None)\n\n if self.args.is_test:\n self.image_logger.save_image(dst, 'undistorted')\n images = [[{'title': 'Original', 'data': image},\n {'title': 'Undistorted', 'data': dst}]]\n self.image_logger.plot_results(images)\n return dst", "def preprocess(self, img):\n return img - np.mean(img)", "def __preprocesses_image(self, image, size):\n\n # Resize image\n image = cv2.resize(image, (size, size))\n\n # Normalize image (min-max)\n image = np.float32((image - np.min(image)) / (np.max(image) - np.min(image) + 0.00001))\n\n return image", "def normalise(dataset):\n # Scale images to the [0, 1] range\n dataset = dataset.astype(\"float32\") / 255\n # Make sure images have shape (28, 28, 1)\n return np.expand_dims(dataset, -1)", "def normalization_brain(img, mask):\n zone1 = img[mask != 0]\n imge = img.copy()\n imge[mask != 0] = (zone1 - zone1.min()) / (zone1.max() - zone1.min())\n imge[mask == 0] = 0\n return imge", "def un_normalize(tensor, mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]):\n mean = torch.FloatTensor(mean).view(1,3,1,1)\n std = torch.FloatTensor(std).view(1,3,1,1)\n \n image = tensor.cpu().detach()\n image = image*std+mean\n image = image.numpy()\n \n image = np.transpose(image, (0,2,3,1))\n \n #print(np.max(image))\n #print(np.min(image))\n return image", "def undistort(self, image):\n return cv2.undistort(image, self.camera_matrix, self.distortion_coeffs, None, self.camera_matrix)", "def undistort(self, image):\n return cv2.undistort(image, self.mtx, self.dst, None, self.mtx)", "def _normalize_images(self, images: th.Tensor) -> th.Tensor:\n output = ((images+2)/4 - self._norm_mean)/self._norm_std\n return output", "def normalize(image, ww=1500, wl=-400):\n\n low, high = wl - ww / 2, wl + ww / 2\n image = np.clip(image, low, high)\n image = (image - low) / ww\n return image", "def normalization(imgs):\n\n imgs = np.asarray(imgs).astype(np.float32)\n imgs = np.expand_dims(imgs / 255, axis=-1)\n return imgs", "def normalize_image(img_arr_uint):\n return img_arr_uint.astype(np.float64) * ONE_BYTE_SCALE", "def un_distort_image(image):\n global _remap_x, _remap_y\n image = cv2.UMat(image)\n res = cv2.remap(image, _remap_x, _remap_y, cv2.INTER_LINEAR) # 进行remap\n res = res.get()\n return res", "def image_normalize(im, axis=(0, 1), c=1e-8):\n return (im - im.mean(axis)) / (im.std(axis) + c)", "def min_max_normalize_one_image(image):\n\n image = image.astype(np.float32)\n for i in range(len(image)):\n max_int = image[i].max()\n min_int = image[i].min()\n image[i] = (image[i] - min_int) / (max_int - min_int)\n\n return image", "def unnormalize(tensor, mean, std, inplace: bool = False) :\n if not isinstance(tensor, torch.Tensor):\n raise TypeError('Input tensor should be a torch tensor. Got {}.'.format(type(tensor)))\n\n if tensor.ndim < 3:\n raise ValueError('Expected tensor to be a tensor image of size (..., C, H, W). Got tensor.size() = '\n '{}.'.format(tensor.size()))\n\n if not inplace:\n tensor = tensor.clone()\n\n dtype = tensor.dtype\n mean = torch.as_tensor(mean, dtype=dtype, device=tensor.device)\n std = torch.as_tensor(std, dtype=dtype, device=tensor.device)\n if (std == 0).any():\n raise ValueError('std evaluated to zero after conversion to {}, leading to division by zero.'.format(dtype))\n if mean.ndim == 1:\n mean = mean.view(-1, 1, 1)\n if std.ndim == 1:\n std = std.view(-1, 1, 1)\n tensor.mul_(std).add_(mean)\n return tensor", "def normalization_mask(img, mask):\n zone1 = img[mask != 0]\n zone2 = img[mask == 0]\n zone1 = (zone1 - zone1.min()) / (zone1.max() - zone1.min())\n zone2 = (zone2 - zone2.min()) / (zone2.max() - zone2.min())\n imge = img.copy()\n imge[mask != 0] = zone1\n imge[mask == 0] = zone2\n return imge", "def normalize_gray_image(img):\n dtype = img.dtype\n _max = img.max()\n _min = img.min()\n ret = (img - _min).astype(np.float64) / (_max - _min) * 255.0\n return ret.astype(dtype)", "def _normalize(self):\r\n self.dataframe['norm_intensity'] = self.dataframe['intensity']\r\n self.dataframe['norm_intensity'] -= self.dataframe['norm_intensity'].min()\r\n self.dataframe['norm_intensity'] /= self.dataframe['norm_intensity'].max() * 0.01", "def normalize(image, target=None):\n if target is None:\n target = np.array([[148.60, 41.56], [169.30, 9.01], [105.97, 6.67]])\n\n whitemask = cv2.cvtColor(image, cv2.COLOR_RGB2GRAY)\n whitemask = whitemask > 215\n\n imagelab = cv2.cvtColor(image, cv2.COLOR_RGB2LAB)\n\n imageL, imageA, imageB = cv2.split(imagelab)\n\n # mask is valid when true\n imageLM = np.ma.MaskedArray(imageL, whitemask)\n imageAM = np.ma.MaskedArray(imageA, whitemask)\n imageBM = np.ma.MaskedArray(imageB, whitemask)\n\n # Sometimes STD is near 0, or 0; add epsilon to avoid div by 0 -NI\n epsilon = 1e-11\n\n imageLMean = imageLM.mean()\n imageLSTD = imageLM.std() + epsilon\n\n imageAMean = imageAM.mean()\n imageASTD = imageAM.std() + epsilon\n\n imageBMean = imageBM.mean()\n imageBSTD = imageBM.std() + epsilon\n\n # normalization in lab\n imageL = (imageL - imageLMean) / imageLSTD * target[0][1] + target[0][0]\n imageA = (imageA - imageAMean) / imageASTD * target[1][1] + target[1][0]\n imageB = (imageB - imageBMean) / imageBSTD * target[2][1] + target[2][0]\n\n imagelab = cv2.merge((imageL, imageA, imageB))\n imagelab = np.clip(imagelab, 0, 255)\n imagelab = imagelab.astype(np.uint8)\n\n # Back to RGB space\n returnimage = cv2.cvtColor(imagelab, cv2.COLOR_LAB2RGB)\n # Replace white pixels\n returnimage[whitemask] = image[whitemask]\n\n return returnimage", "def norm_img(img):\n img_arr = np.array(img).astype(float)\n max_val = np.amax(img_arr)\n if max_val > 0:\n img_arr /= max_val\n return img_arr", "def clean(img):\n\n label_img = label(img, connectivity=2)\n props = sorted(regionprops(label_img), key=lambda x: x.area)\n clean = morphology.binary_closing(img)\n\n clean = morphology.remove_small_holes(clean)\n return morphology.remove_small_objects(clean,\n int(np.floor(props[-1].area) / 10), connectivity=2)", "def preprocess_img_inv(img):\n img = img.data.numpy().copy()\n\n img[0] = img[0] * TORCH_IMG_STD[0] + TORCH_IMG_MEAN[0]\n img[1] = img[1] * TORCH_IMG_STD[1] + TORCH_IMG_MEAN[1]\n img[2] = img[2] * TORCH_IMG_STD[2] + TORCH_IMG_MEAN[2]\n img = img.transpose(1, 2, 0) * 255.0\n\n return img.round().astype('uint8')", "def invert(self, img):\n return self.inverse()(img)", "def post_process_image(image):\n\n image = (image - np.min(image)) * (255 / (np.max(image) - np.min(image)))\n\n return image" ]
[ "0.78914344", "0.76616216", "0.7482308", "0.7246689", "0.7187682", "0.71326315", "0.71250784", "0.69919527", "0.6929422", "0.6927052", "0.6911609", "0.6908462", "0.6908327", "0.69034034", "0.6890873", "0.6881274", "0.6876974", "0.68578804", "0.6824972", "0.68119633", "0.67838544", "0.6769825", "0.6764126", "0.6737152", "0.6724794", "0.67195857", "0.66720265", "0.6648073", "0.6644086", "0.6641788", "0.6553747", "0.65370464", "0.6535497", "0.65252304", "0.652006", "0.6500258", "0.64625746", "0.6443028", "0.6441653", "0.64295125", "0.64196724", "0.6415836", "0.6403193", "0.63896465", "0.6377875", "0.63537854", "0.63537854", "0.6349227", "0.6337637", "0.6325106", "0.6324523", "0.6305438", "0.6301769", "0.6292054", "0.62781656", "0.62738365", "0.62670094", "0.6206356", "0.61923206", "0.61871016", "0.6166331", "0.61659336", "0.61646634", "0.614502", "0.614502", "0.61411506", "0.6129469", "0.6122059", "0.610477", "0.609783", "0.60932505", "0.6090339", "0.60839754", "0.60781324", "0.60572255", "0.6055053", "0.6053355", "0.60487527", "0.60421425", "0.6037916", "0.60234135", "0.6021111", "0.5999909", "0.59893924", "0.5977537", "0.59541714", "0.5950665", "0.5950108", "0.5947392", "0.5933104", "0.5927248", "0.5921733", "0.59119266", "0.58945674", "0.5886526", "0.5865397", "0.5859617", "0.5847507", "0.5830277", "0.5829971" ]
0.7967136
0
Normalize a given image.
def normalize(self, image, transpose=False, data_type=None): return normalize(image, self.mean, self.std, transpose)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def normalise(image):", "def normalize(image):\n min = np.min(image)\n max = np.max(image)\n normalImg = 255*(image - min) / (max - min)\n return normalImg", "def normalize_image(img):\n min_, max_ = float(np.min(img)), float(np.max(img))\n return (img - min_) / (max_ - min_)", "def normalize_image(image):\n return image / 255.", "def imgNormalize(img): \n constant = np.sum(sitk.GetArrayFromImage(img))*np.prod(img.GetSpacing())\n return img/constant", "def normalization(image):\n return (image - np.min(image)) / (np.max(image) - np.min(image))", "def normalize(img):\n # TODO: implement this function.\n min_img = min([min(i) for i in img])\n max_img = max([max(i) for i in img])\n\n for i in range(len(img)):\n \tfor j in range(len(img[0])):\n \t\timg[i][j] = ((img[i][j] - min_img) / (max_img - min_img))\n #raise NotImplementedError\n return img", "def normalize_image(img):\n arr = np.array(img)\n new_img = Image.fromarray(normalize(arr).astype('uint8'),'L')\n return new_img", "def normalize(img):\r\n return ((img / 255.0) - 0.5) / 0.5", "def normalize_image(image):\n image = image.astype(np.float32) / 255.0\n\n return image", "def normalization_func(img):\n vmin, vmax = img.min(), img.max()\n if vmin != vmax:\n im = (img - vmin) / (vmax - vmin)\n else:\n im = np.ones(img.shape)\n return im", "def normalize(self):\n self.image = rescale_intensity(self.image, out_range=(0, 255))", "def normalize_image(img):\n\n # Load image and convert to grayscale\n img = rgb2gray(img)\n\n # Normalize values, range 0 to 255\n img = (img - img.min()) / (img.max() - img.min())\n img *= 255\n\n # Make int values\n img = img.astype(int)\n\n # Return new image\n return img", "def normalize(image):\r\n return image / 127.5 - 1.", "def normalize(img):\n img = np.clip(img, 0, 255).astype(np.uint8)\n return img / 255", "def normalize(image):\n return image / 127.5 - 1.", "def normalize_img(img):\n channel_mean = img.mean(axis=0, keepdims=True).mean(axis=1, keepdims=True)\n channel_std = img.std(axis=0, keepdims=True).mean(axis=1, keepdims=True)\n return (img - channel_mean) / channel_std", "def normalize(image):\n mean = image.mean()\n stddev = image.std()\n adjusted_stddev = max(stddev, 1.0/math.sqrt(image.size))\n standardized_image = (image - mean) / adjusted_stddev\n \n return standardized_image", "def normalize(image):\n mean = image.mean()\n stddev = image.std()\n adjusted_stddev = max(stddev, 1.0/math.sqrt(image.size))\n standardized_image = (image - mean) / adjusted_stddev\n \n return standardized_image", "def normalize_data(img):\n nor = np.linalg.norm(img, axis = 1)\n nor = np.reshape(nor, (len(img), 1))\n img = np.divide(img, nor)\n return img", "def normalize(img):\n norm = cvCreateImage(cvSize(img.width, img.height), IPL_DEPTH_32F, 1)\n cvCopy(img, norm)\n cvNormalize(norm, norm, 1, 0, CV_MINMAX)\n norm_u = cvCreateImage(cvSize(img.width, img.height), IPL_DEPTH_8U, 1)\n cvConvertScale(norm, norm_u, 255)\n return norm_u", "def normalize_image(im):\n pixels = im.flatten()\n\n # scale pixels to range 0 to 1\n normalized_im = (pixels - np.min(pixels)) / (np.max(pixels) - np.min(pixels))\n\n # scale the pixels by 255\n normalized_im = (normalized_im.reshape(im.shape) * 255).astype(np.uint8)\n\n return normalized_im", "def normalize(image):\n image = image.astype(np.float32)\n mean = np.mean(image)\n std = np.std(image)\n if std > 0:\n ret = (image - mean) / std\n else:\n ret = image * 0.\n return ret", "def normalise_image(image, use_torch=True):\n if use_torch:\n image = torch.abs(image)\n else:\n image = np.abs(image)\n if (image.max() - image.min()) < 1e-5:\n return image - image.min() + 1e-5\n else:\n return (image - image.min()) / (image.max() - image.min())", "def normalize(img):\n\n def normalize_pixel(x):\n return (x - 128) / 128\n\n normalize_vector = np.vectorize(normalize_pixel)\n return normalize_vector(img)", "def normalize_image(self):\n # The image normalization is identical to Cloud TPU ResNet.\n self._image = tf.image.convert_image_dtype(self._image, dtype=tf.float32)\n offset = tf.constant(DATASET_MEAN)\n offset = tf.expand_dims(offset, axis=0)\n offset = tf.expand_dims(offset, axis=0)\n self._image -= offset\n\n scale = tf.constant(DATASET_VAR)\n scale = tf.expand_dims(scale, axis=0)\n scale = tf.expand_dims(scale, axis=0)\n self._image /= scale", "def normalize_image(self, image, colorspace=None):\n float_image = image.astype(numpy.float32)\n if float_image.__array_interface__[\n \"data\"][0] == image.__array_interface__[\"data\"][0]:\n float_image = float_image.copy()\n float_image -= float_image.min()\n max_value = float_image.max()\n if max_value:\n max_value /= 255.0\n float_image /= max_value\n else:\n float_image[:] = 127.5\n normalized_image = float_image.astype(numpy.uint8)\n if (colorspace != \"RGB\" and len(normalized_image.shape) == 3\n and normalized_image.shape[2] == 3):\n import cv2\n normalized_image = cv2.cvtColor(\n normalized_image, getattr(cv2, \"COLOR_\" + colorspace + \"2RGB\"))\n return normalized_image", "def normalize(img):\n img = img.astype(np.float32)\n img -= img.min()\n img /= img.max()\n img *= 255\n img = img.astype(np.uint8)\n\n return img", "def image_normalize(image, image_dims):\r\n if image.dtype != 'float32':\r\n image = image.astype(dtype=np.float32)\r\n if np.size(image_dims) == 2:\r\n maxpx = np.max(image[:, :])\r\n if maxpx == float(0):\r\n maxpx = 1e-12 # fuzz factor\r\n minpx = np.min(image[:, :])\r\n image[:, :] = (image[:, :] - minpx) / (maxpx - minpx)\r\n else:\r\n for i in range(image_dims[2]): # find max/min for each channel\r\n maxpx = np.max(image[:, :, i])\r\n if maxpx == float(0):\r\n maxpx = 1e-12 # fuzz factor\r\n minpx = np.min(image[:, :, i])\r\n image[:, :, i] = (image[:, :, i] - minpx) / (maxpx - minpx)\r\n return image", "def normalization(img):\n max_val = img.max()\n min_val = img.min()\n\n return ((img-min_val)*255)/(max_val-min_val)", "def normalize_images(image_sitk):\n\n max = 400\n min = -1000\n\n image_np = sitk.GetArrayFromImage(image_sitk)\n\n # Normalization\n image_np = (image_np - min)/(max - min)\n image_np[image_np > 1] = 1\n image_np[image_np < 0] = 0\n\n # Convert back to SITK\n out_image_sitk = sitk.GetImageFromArray(image_np)\n out_image_sitk.CopyInformation(image_sitk)\n\n return out_image_sitk", "def normalize(img):\r\n min = img.min()\r\n max = img.max()\r\n x = 2.0 * (img - min) / (max - min) - 1.0\r\n return x", "def _normalize(image):\n return tf.multiply(tf.subtract(image, 0.5), 2.0)", "def normalize(im: np.ndarray) -> np.ndarray:\n im = im.astype(np.float32)\n return (im - im.min()) / (im.max() - im.min())", "def _normalize(images):\n images -= images.mean(axis=0, keepdims=True)\n images /= np.maximum(images.std(axis=0, keepdims=True), 3e-1)", "def _normalize(images):\n images -= images.mean(axis=0, keepdims=True)\n images /= np.maximum(images.std(axis=0, keepdims=True), 3e-1)", "def normalize(image, xbar, sigma):\n image = image.transpose(2, 0, 1) # Switch to channel-first\n mean, std = np.array(xbar), np.array(sigma)\n image = (image - mean[:, None, None]) / std[:, None, None]\n return image.transpose(1, 2, 0)", "def preprocess_img(img):\n # Make a copy of img as array\n img = np.array(img)\n\n # Convert into tensor\n img = torch.Tensor(img).permute(2, 0, 1) / 255.0\n\n # Normalize\n for t, m, s in zip(img, TORCH_IMG_MEAN, TORCH_IMG_STD):\n t.sub_(m).div_(s)\n\n return img", "def img_normalize(image, label):\n mean, std = ds_stats\n image -= tf.constant(mean, shape=[1, 1, num_channels], dtype=image.dtype)\n image /= tf.constant(std, shape=[1, 1, num_channels], dtype=image.dtype)\n return image, label", "def normlized_image(image):\n if image.dtype != np.float32:\n image = image.astype(np.float32)\n if image.max() > 1:\n image /= 255\n\n return image", "def normalize_img(img: np.ndarray, bit_depth: int) -> np.ndarray:\n return img / ((1 << bit_depth) - 1)", "def normalize(image, label):\n image -= settings.DATASET_MEAN\n image /= settings.DATASET_STD\n\n return image, label", "def __preprocesses_image(self, image, size):\n\n # Resize image\n image = cv2.resize(image, (size, size))\n\n # Normalize image (min-max)\n image = np.float32((image - np.min(image)) / (np.max(image) - np.min(image) + 0.00001))\n\n return image", "def _normalize_image(self, img: np.ndarray) -> np.ndarray:\n i2 = img.astype(float) - self.bg\n i2 /= i2.max()\n return i2", "def reverse_normalize(image):\n\n reverse = transforms.Normalize(mean=[-0.485 / 0.229, -0.456 / 0.224, -0.406 / 0.255],\n std=[1 / 0.229, 1 / 0.224, 1 / 0.255])\n return reverse(image)", "def _compute_normalization(self, normalize=True):\n if normalize:\n if self._img_norm is None:\n if np.sum(self._data) == 0:\n self._img_norm = 1\n else:\n self._img_norm = self._compute_raw_image_norm()\n\n if self._img_norm != 0.0 and np.isfinite(self._img_norm):\n self._data /= (self._img_norm * self._normalization_correction)\n self._normalization_status = 0\n else:\n self._normalization_status = 1\n self._img_norm = 1\n warnings.warn('Overflow encountered while computing '\n 'normalization constant. Normalization '\n 'constant will be set to 1.', NonNormalizable)\n else:\n self._normalization_status = 2", "def normalize(\n self,\n image: np.ndarray,\n data_format: Optional[Union[str, ChannelDimension]] = None,\n input_data_format: Optional[Union[str, ChannelDimension]] = None,\n ) -> np.ndarray:\n image = rescale(image=image, scale=1 / 127.5, data_format=data_format, input_data_format=input_data_format)\n image = image - 1\n return image", "def image_normalize(im, axis=(0, 1), c=1e-8):\n return (im - im.mean(axis)) / (im.std(axis) + c)", "def normalize_image(image, img_size):\n res = tf.image.resize_image_with_crop_or_pad(image, target_height=img_size, target_width=img_size)\n res = (tf.to_float(res) - 127.5) / 127.5\n res = tf.reshape(res, (img_size, img_size, 1))\n res.shape.assert_is_compatible_with([img_size, img_size, 1])\n return res", "def __normalize(input, type, a, b):\n return cv2.normalize(input, None, a, b, type)", "def normalize(batch_img: np.ndarray) -> np.ndarray:\n batch_img = batch_img.astype('float32')\n return batch_img / 127.5 - 1", "def normalize_gray_image(img):\n dtype = img.dtype\n _max = img.max()\n _min = img.min()\n ret = (img - _min).astype(np.float64) / (_max - _min) * 255.0\n return ret.astype(dtype)", "def reshape_normalise(img):\n\t# The image shape is expected to match the input of VGG19\n\timg = np.resize(img, (1, CONFIG.IMAGE_HEIGHT, CONFIG.IMAGE_WIDTH, CONFIG.COLOR_CHANNELS)).astype('float32')\n\timg -= CONFIG.MEAN_PIXEL\n\treturn img", "def normalize_image(image, mean=(0.485, 0.456, 0.406), var=(0.229, 0.224, 0.225)):\n with tf.name_scope('NormalizeImage', values=[image]):\n image = tf.to_float(image)\n image /= 255.0\n\n image -= mean\n image /= var\n\n return image", "def normalize(img):\n tol = 355\n maxi = np.max(img)\n if maxi > tol:\n img = 255 * (img - (tol - 255)) / maxi\n # end if\n\n norm = np.round(img)\n norm[norm < 0] = 0\n norm[norm > 255] = 255\n\n return norm", "def normalize_image(self, factor, luminosity=None):\n if not luminosity:\n luminosity = self.average_luminosity()\n\n for i in range(len(self.pixels)):\n self.pixels[i] = self.pixels[i] * (factor / luminosity)", "def normalise(dataset):\n # Scale images to the [0, 1] range\n dataset = dataset.astype(\"float32\") / 255\n # Make sure images have shape (28, 28, 1)\n return np.expand_dims(dataset, -1)", "def reshape_and_normalize_image(image):\n # Reshape image to mach expected input of VGG16\n image = np.reshape(image, ((1,) + image.shape))\n # Substract the mean to match the expected input of VGG16\n image = image - CONFIG.MEANS\n \n return image", "def _normalize_images(self, images: th.Tensor) -> th.Tensor:\n output = ((images+2)/4 - self._norm_mean)/self._norm_std\n return output", "def variance_normalize(self):\n self.img = self.img / np.sqrt(np.sum(self.img ** 2))", "def preprocess(img):\n \n scaler=StandardScaler() ## scaler object to perform preprocessing\n img=scaler.fit_transform(img) ## zero-center and normalize\n \n return img", "def unnormalize(self, image, transpose=False):\n return unnormalize(image, self.mean, self.std, transpose)", "def normalize(self, m1=0., m2=1.):\n self.img = self.img - self.img.min()\n self.img = self.img / self.img.max()\n\n self.img = self.img * (m2 - m1) + m1", "def normalize_array(image_array):\n\n array = image_array.astype(np.float)\n array /= 255.0\n return array", "def normalize(image, target=None):\n if target is None:\n target = np.array([[148.60, 41.56], [169.30, 9.01], [105.97, 6.67]])\n\n whitemask = cv2.cvtColor(image, cv2.COLOR_RGB2GRAY)\n whitemask = whitemask > 215\n\n imagelab = cv2.cvtColor(image, cv2.COLOR_RGB2LAB)\n\n imageL, imageA, imageB = cv2.split(imagelab)\n\n # mask is valid when true\n imageLM = np.ma.MaskedArray(imageL, whitemask)\n imageAM = np.ma.MaskedArray(imageA, whitemask)\n imageBM = np.ma.MaskedArray(imageB, whitemask)\n\n # Sometimes STD is near 0, or 0; add epsilon to avoid div by 0 -NI\n epsilon = 1e-11\n\n imageLMean = imageLM.mean()\n imageLSTD = imageLM.std() + epsilon\n\n imageAMean = imageAM.mean()\n imageASTD = imageAM.std() + epsilon\n\n imageBMean = imageBM.mean()\n imageBSTD = imageBM.std() + epsilon\n\n # normalization in lab\n imageL = (imageL - imageLMean) / imageLSTD * target[0][1] + target[0][0]\n imageA = (imageA - imageAMean) / imageASTD * target[1][1] + target[1][0]\n imageB = (imageB - imageBMean) / imageBSTD * target[2][1] + target[2][0]\n\n imagelab = cv2.merge((imageL, imageA, imageB))\n imagelab = np.clip(imagelab, 0, 255)\n imagelab = imagelab.astype(np.uint8)\n\n # Back to RGB space\n returnimage = cv2.cvtColor(imagelab, cv2.COLOR_LAB2RGB)\n # Replace white pixels\n returnimage[whitemask] = image[whitemask]\n\n return returnimage", "def preprocess(self, img):\n return img - np.mean(img)", "def normalize(img, mean, std, data_format='CHW'):\n _assert_image_tensor(img, data_format)\n\n mean = paddle.to_tensor(mean, place=img.place)\n std = paddle.to_tensor(std, place=img.place)\n\n if _is_channel_first(data_format):\n mean = mean.reshape([-1, 1, 1])\n std = std.reshape([-1, 1, 1])\n\n return (img - mean) / std", "def min_max_normalize_one_image(image):\n\n image = image.astype(np.float32)\n for i in range(len(image)):\n max_int = image[i].max()\n min_int = image[i].min()\n image[i] = (image[i] - min_int) / (max_int - min_int)\n\n return image", "def norm_img(img):\n img_arr = np.array(img).astype(float)\n max_val = np.amax(img_arr)\n if max_val > 0:\n img_arr /= max_val\n return img_arr", "def _compute_normalization(self, normalize=True):\n self._normalization_constant = 1.0 / self._normalization_correction\n\n if normalize:\n # compute normalization constant so that\n # N*C*sum(data) = 1:\n if self._img_norm is None:\n self._img_norm = self._compute_raw_image_norm()\n\n if self._img_norm != 0.0 and np.isfinite(self._img_norm):\n self._normalization_constant /= self._img_norm\n self._normalization_status = 0\n\n else:\n self._normalization_constant = 1.0\n self._normalization_status = 1\n warnings.warn(\"Overflow encountered while computing \"\n \"normalization constant. Normalization \"\n \"constant will be set to 1.\", NonNormalizable)\n\n else:\n self._normalization_status = 2", "def normalization(imgs):\n\n imgs = np.asarray(imgs).astype(np.float32)\n imgs = np.expand_dims(imgs / 255, axis=-1)\n return imgs", "def normalization_brain(img, mask):\n zone1 = img[mask != 0]\n imge = img.copy()\n imge[mask != 0] = (zone1 - zone1.min()) / (zone1.max() - zone1.min())\n imge[mask == 0] = 0\n return imge", "def normalize(image_data, a=0.1, b=0.9):\n # Implement Min-Max scaling for image data\n return a + (((image_data-np.min(image_data)) * (b - a)) / (np.max(image_data) - np.min(image_data)))", "def add_image_normalization(self):\n self.methods.append(self._normalize_image)\n self.args.append(None)", "def _normalize(self, x):\n # TODO: imagenet normalization\n\n return x", "def normalize((image)):\n new = np.zeros((7,7))\n index = 0\n for i in [0, 4, 7, 10, 14, 17, 21]:\n for j in [0, 4, 7, 10, 14, 17, 21]:\n new[index / 7][index % 7] += float(np.sum(image[i:(i+6),j:(j+6)]))/49\n index += 1\n return new", "def imnormalize_tensor(self, img, mean, std, to_rgb=True):\n mean = np.float32(mean.reshape(1, -1))\n stdinv = 1 / np.float32(std.reshape(1, -1))\n if to_rgb:\n img = img[:, :, [2, 1, 0]]\n img = torch.sub(img, torch.tensor(mean).cuda())\n img = torch.mul(img, torch.tensor(stdinv).cuda())\n return img", "def turn_intensity_normalization_on(self):\n self.intensity_normalize_image = True", "def unnormalize(images, mean, std):\n \n unnorm_images = images * std + mean\n \n \n return unnorm_images", "def test_normalize(dummy_input):\n # Test the 2D image: H, W, C\n image, label = dummy_input(image_size=(512, 512, 3),\n label_size=(512, 512, 1))\n transform = Normalize(means=None, stds=None)\n _image, _label = transform(image, label, normalize_tags=[True, False])\n assert not (image == _image).all()\n assert (label == _label).all()\n\n # Test the 3D image: H, W, D, C\n image, label = dummy_input(image_size=(512, 512, 20, 3),\n label_size=(512, 512, 20, 1))\n transform = Normalize(means=None, stds=None)\n _image, _label = transform(image, label, normalize_tags=[True, False])\n assert not (image == _image).all()\n assert (label == _label).all()\n assert np.abs(np.mean(_image)-0) < 1e-8\n assert np.abs(np.std(_image)-1) < 1e-8", "def torchxrayvision_normalize(img, maxval=255, reshape=False):\n \n if img.max() > maxval:\n raise Exception(\"max image value ({}) higher than expected bound ({}).\".format(img.max(), maxval))\n \n img = (2 * (img.astype(np.float32) / maxval) - 1.) * 1024\n\n if reshape:\n # Check that images are 2D arrays\n if len(img.shape) > 2:\n img = img[:, :, 0]\n if len(img.shape) < 2:\n print(\"error, dimension lower than 2 for image\")\n\n # add color channel\n img = img[None, :, :] \n \n return img", "def undo_normalise(img):\n\treturn img + CONFIG.MEAN_PIXEL", "def _normalize(self):\r\n self.dataframe['norm_intensity'] = self.dataframe['intensity']\r\n self.dataframe['norm_intensity'] -= self.dataframe['norm_intensity'].min()\r\n self.dataframe['norm_intensity'] /= self.dataframe['norm_intensity'].max() * 0.01", "def norm_input(image, label):\n cropped_image = tf.image.resize_image_with_crop_or_pad(image, FLAGS.image_size, FLAGS.image_size)\n\n norm_image = tf.image.per_image_standardization(cropped_image)\n\n return norm_image, label", "def standardize(image, mean=[0.48462227599918, 0.45624044862054, 0.40588363755159], std=[0.22889466674951, 0.22446679341259, 0.22495548344775]):\n image = image.astype(np.float32) / 255.0\n image = np.divide(np.subtract(image, mean), std)\n return image", "def preProcessImage(im, norm=True, blur=None, equalize=False, quantize=None):\n\n #Convert to float to avoid any overflow or rounding issues\n im = np.array(im, dtype='float64')\n if blur and blur > 0:\n im = filters.gaussian_filter(im, blur)\n\n if norm:\n im = filters.normalize(im, 0.0, None)\n else:\n im = im/255. #convert to floats between 0 and 1 without normalizing\n\n if equalize: \n im = filters.image_histogram_equalization(im)\n\n if quantize:\n im = np.rint(im * (quantize-1))/(quantize-1)\n \n return im", "def normalize_images(data, blend_cat, Args):\n im = data['X_train']['blend_image']\n std = np.std(im)\n mean = np.mean(im)\n data['X_train']['blend_image'] = (im - mean) / std\n data['X_val']['blend_image'] = (data['X_val']['blend_image'] - mean) / std\n data['X_train'] = normalize_other_inputs(data['X_train'], Args)\n data['X_val'] = normalize_other_inputs(data['X_val'], Args)\n for key in data['Y_train'].keys():\n data['Y_train'][key] = (data['Y_train'][key] - mean) / std\n data['Y_val'][key] = (data['Y_val'][key] - mean) / std\n blend_cat['std'] = std\n blend_cat['mean'] = mean\n return data", "def normalize(image, ww=1500, wl=-400):\n\n low, high = wl - ww / 2, wl + ww / 2\n image = np.clip(image, low, high)\n image = (image - low) / ww\n return image", "def image_preprocess(image, image_size, mean_rgb, stddev_rgb):\n input_processor = dataloader.DetectionInputProcessor(image, image_size)\n input_processor.normalize_image(mean_rgb, stddev_rgb)\n input_processor.set_scale_factors_to_output_size()\n image = input_processor.resize_and_crop_image()\n image_scale = input_processor.image_scale_to_original\n return image, image_scale", "def preprocess(img, out_shape=None):\n if out_shape is not None:\n img = resize(img, out_shape, mode='constant')\n\n # Normalize the image\n mean = img.mean()\n std = img.std()\n return (img - mean) / std", "def normalize(input_tensor, output_tensor):\n image_dims = utils.get_img_shape(input_tensor)[1:]\n return output_tensor / np.prod(image_dims)", "def preprocess_image(image, model_image_size):\n #resized_image = cv2.resize(image, tuple(reversed(model_image_size)), cv2.INTER_AREA)\n resized_image = letterbox_resize(image, tuple(reversed(model_image_size)))\n image_data = np.asarray(resized_image).astype('float32')\n image_data = normalize_image(image_data)\n image_data = np.expand_dims(image_data, 0) # Add batch dimension.\n return image_data", "def preprocess_image(normalize_lighting=False, min_value=0., max_value=1.):\n if min_value > max_value:\n min_value, max_value = max_value, min_value\n rescaling_range = max_value - min_value\n\n def rescale(image):\n if image.max() > 1:\n # Project values interval on [0.0; 1.0]\n image = image / 255.\n # Project values on [min_value; max_value]\n image = image * rescaling_range + min_value\n return image\n\n def normalize_lighting_and_rescale(image):\n # Normalize image lighting (this results in an image with values from [0.0; 1.0])\n image = image / np.mean(image, axis=(0, 1))\n image = image / image.max()\n # Project values on [min_value; max_value]\n image = image * rescaling_range + min_value\n return image\n\n if normalize_lighting is True:\n return normalize_lighting_and_rescale\n else:\n return rescale", "def normalization_mask(img, mask):\n zone1 = img[mask != 0]\n zone2 = img[mask == 0]\n zone1 = (zone1 - zone1.min()) / (zone1.max() - zone1.min())\n zone2 = (zone2 - zone2.min()) / (zone2.max() - zone2.min())\n imge = img.copy()\n imge[mask != 0] = zone1\n imge[mask == 0] = zone2\n return imge", "def _normalize_(x: np.array) -> np.array:\n if x.max() != 0:\n x = x / x.max()\n return np.clip(x, 0, 1)# ensure that no values are >1\n else:\n raise ZeroDivisionError('Image Normalization')", "def normalise_intensity(image, thres_roi=10.0):\n val_l = np.percentile(image, thres_roi)\n roi = (image >= val_l)\n mu, sigma = np.mean(image[roi]), np.std(image[roi])\n eps = 1e-6\n image2 = (image - mu) / (sigma + eps)\n return image2", "def preprocess(img):\n if img.ndim != 3:\n raise TypeError('bad ndim of img')\n if img.dtype != np.uint8:\n raise TypeError('bad dtype of img')\n img = cv2.resize(img, (224, 224))\n img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)\n img = img.astype(np.float32)\n img *= (2.0/255) # normalize to: 0.0~2.0\n img -= 1.0 # subtract mean to make it: -1.0~1.0\n img = np.expand_dims(img, axis=0)\n return img", "def turn_intensity_normalization_off(self):\n self.intensity_normalize_image = False", "def _preprocessing(self, input_image):\n if self.resize:\n input_image = self._np_resize_image(input_image,\n self.input_size,\n dtype='int')\n image = self._np_transpose(input_image)\n image = self._np_normalize(image)\n image = self._np_flip_n_cat(image)\n return image", "def get_intensity_normalization(self):\n return self.intensity_normalize_image" ]
[ "0.82951486", "0.8158422", "0.81137586", "0.8056085", "0.8043467", "0.80207026", "0.79855645", "0.79698676", "0.79609406", "0.79318285", "0.79120314", "0.78832763", "0.78212625", "0.7805976", "0.78015596", "0.77725065", "0.77711695", "0.77530694", "0.77530694", "0.774932", "0.7730462", "0.77047706", "0.7703867", "0.7618317", "0.75890267", "0.75855726", "0.7557615", "0.7503972", "0.7501273", "0.74643624", "0.7458383", "0.7451265", "0.74202466", "0.740825", "0.7400206", "0.7400206", "0.7369546", "0.7356525", "0.73534274", "0.72939044", "0.7253777", "0.72514004", "0.72508603", "0.72425264", "0.7233559", "0.72317827", "0.7231426", "0.7202362", "0.7199026", "0.7195005", "0.7185088", "0.7141229", "0.71363693", "0.7099243", "0.70895636", "0.70895267", "0.70629734", "0.705547", "0.70546633", "0.70407957", "0.7037101", "0.7034394", "0.70198816", "0.6983604", "0.697452", "0.6973716", "0.6973384", "0.69689757", "0.69605345", "0.6948277", "0.6946298", "0.69166774", "0.6896359", "0.68843216", "0.6870841", "0.68268144", "0.6826548", "0.6798942", "0.6798708", "0.6764816", "0.6756739", "0.6747275", "0.67415893", "0.6694083", "0.66909593", "0.6641104", "0.66379493", "0.66368234", "0.66228557", "0.6620197", "0.6601335", "0.6592929", "0.6584989", "0.6580066", "0.65769106", "0.65606517", "0.65450513", "0.65388626", "0.6520296", "0.65039444" ]
0.73125774
39
Handle imbalanced dataset through sampler.
def create_class_imbalance_sampler(self): count = [0] * len(self.classes) for item in self.train_data.imgs: count[item[1]] += 1 weight_per_class = [0.] * len(self.classes) for i in range(len(self.classes)): weight_per_class[i] = float(sum(count)) / float(count[i]) weights = [0] * len(self.train_data.imgs) for idx, val in enumerate(self.train_data.imgs): weights[idx] = weight_per_class[val[1]] weights = torch.DoubleTensor(weights) self.sampler = torch.utils.data.sampler.WeightedRandomSampler( weights, len(weights) )
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def balanced_sampling(dat: pd.DataFrame, logger=None):\n if logger == None:\n logging.basicConfig(\n level=logging.INFO, format='%(asctime)s - %(name)s - %(levelname)s - %(message)s')\n logger = logging.getLogger(__name__)\n \n \n # upsampling\n logger.info('Start balanced sampling')\n subsample = []\n num_of_each_class = dat.iloc[:, -1].value_counts().to_numpy()\n if num_of_each_class.std()*1.0 / num_of_each_class.mean() < 0.1:\n logger.info('The given data is balance.')\n # the dataset is balanced\n return dat\n logger.info('Given dataset is unbalance')\n logger.info('Sampling data from each class to generate a new dataset')\n n_smp = num_of_each_class.max()\n for label in dat.iloc[:, -1].value_counts().index:\n samples = dat[dat.iloc[:, -1] == label]\n num_samples = len(samples)\n index_range = range(num_samples)\n # take all from the set\n indexes = list(np.random.choice(index_range, size=num_samples, replace=False))\n indexes2 = list(np.random.choice(\n index_range, size=n_smp-num_samples, replace=True)) # add random items\n indexes.extend(indexes2)\n subsample.append(samples.iloc[indexes, :])\n logger.info('End with sampling')\n out = pd.concat(subsample)\n out = out.sample(frac=1).reset_index(drop=True) # shuffle and re index\n return out", "def train_dataloader(self) -> data.DataLoader:\n # Random weighted sampler to approach the imbalanced dataset\n self.weights = [1.0 / i for i in self.weights]\n\n _sample_weights = [0] * len(self.datasets['train'])\n\n for idx, (_, label) in enumerate(self.datasets['train']):\n _weight = self.weights[label]\n _sample_weights[idx] = _weight\n\n random_sampler = data.WeightedRandomSampler(_sample_weights,\n len(self.datasets['train']), replacement=False)\n\n return data.DataLoader(dataset=self.datasets['train'], batch_size=self.batch_size,\n num_workers=self.num_workers, pin_memory=False,\n sampler=random_sampler)", "def sampler_weights(dataset):\n class_counts = [0, 0]\n for index in range(len(dataset)):\n _, label = dataset[index]\n class_counts[label] += 1\n\n divisor = 2 * class_counts[0] * class_counts[1]\n sample_weights = (class_counts[1] / divisor, class_counts[0] / divisor)\n weights = []\n for index in range(len(dataset)):\n _, label = dataset[index]\n weights.append(sample_weights[label])\n\n num_samples = 2 * min(class_counts[0], class_counts[1])\n return weights, num_samples", "def handle_imbalance(dataset, minority_class):\n for i, l in enumerate(dataset):\n if l == minority_class:\n dataset[i] = 2\n return dataset", "def balance_data(df, y, do_undersample):\n if do_undersample:\n print('Under sampling the \\'0\\' class of our outcome data...')\n # Under sample -50K so we can better learn.\n ones = df[df['binary_income']==1]\n zeros = df[df['binary_income']==0]\n \n subsampled_df = pd.concat([ones, zeros.sample(ones.shape[0])])\n subsampled_y = subsampled_df['binary_income']\n subsampled_df = subsampled_df.drop('binary_income',axis=1)\n \n return subsampled_df, subsampled_y\n \n else:\n return df, y", "def overSampling( self, feature, Class, random_state = 0 ):\n oversampler = SMOTE(random_state=0)\n feature_resample, Class_resample = oversampler.fit_sample(feature, \n Class)\n print(\"Warning: You are increasing the dataset to balance the data\\n\")\n return feature_resample, Class_resample", "def process_sample_train(self):\n raise NotImplementedError", "def learn(self):\n metrics_hist = dict()\n max_runs = 3\n for run in range(max_runs):\n all_indices, initial_indices = self._init_al_dataset()\n\n metrics_hist[str(run)] = dict()\n\n current_indices = list(initial_indices)\n \n for split in self.data_splits_frac:\n print(f'\\nRUN {run} - SPLIT - {split*100:0.0f}%')\n\n # Initialise models\n self._init_models(mode='svaal')\n\n # Do some label stuff\n unlabelled_indices = np.setdiff1d(list(all_indices), current_indices)\n unlabelled_sampler = data.sampler.SubsetRandomSampler(unlabelled_indices)\n unlabelled_dataloader = data.DataLoader(self.datasets['train'],\n sampler=unlabelled_sampler,\n batch_size=64,\n drop_last=False)\n\n print(f'Labelled: {len(current_indices)} Unlabelled: {len(unlabelled_indices)} Total: {len(all_indices)}')\n\n # TODO: Make the SVAAL allow 100% labelled and 0% unlabelled to pass through it. Breaking out of loop for now when data hits 100% labelled.\n if len(unlabelled_indices) == 0:\n break\n\n metrics, svae, discriminator = self.train(dataloader_l=self.labelled_dataloader,\n dataloader_u=unlabelled_dataloader,\n dataloader_v=self.val_dataloader,\n dataloader_t=self.test_dataloader,\n mode='svaal') \n print(f'Test Eval.: F1 Scores - Macro {metrics[0]*100:0.2f}% Micro {metrics[1]*100:0.2f}%') \n \n # Record performance at each split\n metrics_hist[str(run)][str(split)] = metrics\n\n \n sampled_indices = self.sample_adversarial(svae, discriminator, unlabelled_dataloader, indices=unlabelled_indices, cuda=True) # TODO: review usage of indices arg\n current_indices = list(current_indices) + list(sampled_indices)\n sampler = data.sampler.SubsetRandomSampler(current_indices)\n self.labelled_dataloader = data.DataLoader(self.datasets['train'], sampler=sampler, batch_size=self.batch_size, drop_last=True)\n \n # write results to disk\n with open('results.json', 'w') as fj:\n json.dump(metrics_hist, fj, indent=4)", "def get_dataset_sampler(self):\n return None", "def should_sample(self, span_context):\n raise NotImplementedError", "def prepare_dataset():\n with open('gold-posts.txt', encoding='utf-8') as f:\n posts = f.readlines()\n with open('gold-labels.txt', encoding='utf-8') as f:\n labels = f.readlines()\n\n def to_cat(x: str) -> int:\n if x == 'p':\n return 1\n elif x == 'n':\n return 2\n else:\n return 0\n X = np.array([x.strip() for x in posts])\n y = np.array([to_cat(x.strip()) for x in labels])\n\n # DOES NOT WORK - too imbalanced\n #skf = StratifiedKFold(n_splits=5, random_state=None, shuffle=False)\n #for train_index, test_index in skf.split(X, y):\n # X_train, X_test = X[train_index], X[test_index]\n # y_train, y_test = y[train_index], y[test_index]\n # break\n\n # WORKS better\n trI, teI = balanced_split(y)\n\n train_texts = X[trI].tolist()\n train_labels = y[trI].tolist()\n valid_texts = X[teI].tolist()\n valid_labels = y[teI].tolist()\n return train_texts, train_labels, valid_texts, valid_labels", "def separate(self):\n print(\"start dataset separating\")\n sum = 0\n for i in tqdm(range(len(self.itemlen))):\n il = self.itemlen[i]\n if il < 3:\n sum += il\n continue\n rarr = list(range(sum, sum+il))\n random.shuffle(rarr)\n self.train.append({\n 'input': self.input[rarr[0]],\n 'label': self.label[i]\n })\n self.val.append({\n 'input': self.input[rarr[1]],\n 'label': self.label[i]\n })\n for j in range(2, len(rarr)):\n self.test.append({\n 'input': self.input[rarr[j]],\n 'label': self.label[i]\n })\n sum += il", "def importance_sampler(raw_data, analysis_settings):\n pass", "def __call__(self, y, pred, sample_weight=None):", "def downsample_data(dataset):\n loss = dataset.loc[dataset[TARGET] == 'loss']\n good_gain = dataset.loc[dataset[TARGET] == 'good_gain']\n \n sample_size = min([loss.shape[0], good_gain.shape[0]])\n loss = loss.sample(n=sample_size, random_state=42)\n good_gain = good_gain.sample(n=sample_size, random_state=42)\n \n frames = [loss, good_gain]\n return shuffle(pd.concat(frames), random_state=0)", "def load_binary_imbalanced(classes=(1,7), ratio=0.1):\r\n train_set, train_set_target = load_data()\r\n \r\n # binarize\r\n mask_train_set_imb = np.logical_or(train_set_target == classes[0],train_set_target == classes[1])\r\n (data_set_imb,data_set_imb_target)= (train_set[mask_train_set_imb], train_set_target[mask_train_set_imb])\r\n\r\n # imbalance\r\n data_minority = data_set_imb[data_set_imb_target == classes[1]]\r\n data_minority_target = data_set_imb_target[data_set_imb_target == classes[1]]\r\n data_majority = data_set_imb[data_set_imb_target == classes[0]]\r\n data_majority_target = data_set_imb_target[data_set_imb_target == classes[0]]\r\n original_size = data_minority_target.shape[0]\r\n majority_size = data_majority_target.shape[0]\r\n target_size = int(np.floor(majority_size * ratio))\r\n indices = np.random.choice(original_size, size=target_size)\r\n data_minority = data_minority[indices]\r\n data_minority_target = data_minority_target[indices]\r\n\r\n # merge\r\n train_set = np.concatenate([data_minority, data_majority])\r\n train_set_target = np.concatenate([data_minority_target, data_majority_target])\r\n\r\n #shuffle\r\n train_set, train_set_target = np.hsplit(\r\n np.random.permutation(\r\n np.hstack((train_set, train_set_target.reshape((train_set_target.shape[0], 1))))\r\n ), [-1]\r\n )\r\n train_set_target = np.asarray(train_set_target, dtype='int').reshape((train_set_target.shape[0],))\r\n return (train_set[:],train_set_target[:])", "def initialise_sampler(self):\n raise NotImplementedError", "def _init_al_dataset(self):\n\n self._init_dataset()\n\n train_dataset = self.datasets['train']\n\n dataset_size = len(train_dataset)\n self.budget = math.ceil(self.budget_frac*dataset_size)\n Sampler.__init__(self, config, self.budget) # TODO: Weird place to initialise this\n\n all_indices = set(np.arange(dataset_size))\n k_initial = math.ceil(len(all_indices)*self.initial_budget_frac)\n initial_indices = random.sample(list(all_indices), k=k_initial)\n\n sampler_init = data.sampler.SubsetRandomSampler(initial_indices) # need to sample from training dataset\n\n self.labelled_dataloader = data.DataLoader(train_dataset, sampler=sampler_init, batch_size=self.batch_size, drop_last=True)\n self.val_dataloader = data.DataLoader(self.datasets['valid'], batch_size=self.batch_size, drop_last=False)\n self.test_dataloader = data.DataLoader(self.datasets['test'], batch_size=self.batch_size, drop_last=False)\n\n return all_indices, initial_indices", "def sample(self):\r\n raise NotImplementedError", "def __call__(self, samples_number):\n self.sampler.sample(samples_number)", "def sample_count(self):", "def _run(self, **params):\n# if softEvidence is None:\n# self.softEvidence = self.mln.softEvidence\n# else:\n# self.softEvidence = softEvidence\n # initialize chains\n chains = MCMCInference.ChainGroup(self)\n for i in range(self.chains):\n chain = GibbsSampler.Chain(self, self.queries)\n chains.chain(chain)\n# if self.softEvidence is not None:\n# chain.setSoftEvidence(self.softEvidence)\n # do Gibbs sampling\n# if verbose and details: print \"sampling...\"\n converged = 0\n steps = 0\n if self.verbose:\n bar = ProgressBar(color='green', steps=self.maxsteps)\n while converged != self.chains and steps < self.maxsteps:\n converged = 0\n steps += 1\n print('STEP {} / {}'.format(steps, self.maxsteps))\n for chain in chains.chains:\n chain.step()\n if self.verbose:\n bar.inc()\n bar.label('%d / %d' % (steps, self.maxsteps))\n# if self.useConvergenceTest:\n# if chain.converged and numSteps >= minSteps:\n# converged += 1\n# if verbose and details:\n# if numSteps % infoInterval == 0:\n# print \"step %d (fraction converged: %.2f)\" % (numSteps, float(converged) / numChains)\n# if numSteps % resultsInterval == 0:\n# chainGroup.getResults()\n# chainGroup.printResults(shortOutput=True)\n # get the results\n return chains.results()[0]", "def generate_sampler(dataset, sampler_option='random', step=1):\n\n df = dataset.df\n min_age = np.min(df.age)\n max_age = np.max(df.age)\n\n if (max_age - min_age) % step == 0:\n max_age += step\n\n bins = np.arange(min_age, max_age, step)\n count = np.zeros(len(bins))\n for idx in df.index:\n age = df.loc[idx, \"age\"]\n key = np.argmax(np.logical_and(age - step < bins, age >= bins)).astype(int)\n count[key] += 1\n\n # weight_per_class = (1 / np.array(count)) if count.any() != 0 else 0.\n weight_per_class = np.zeros_like(count).astype(float)\n np.divide(1., count, out=weight_per_class, where=count != 0)\n weights = [0] * len(df)\n\n for idx, age in enumerate(df.age.values):\n key = np.argmax(np.logical_and(age - 5 <= bins, age > bins)).astype(int)\n weights[idx] = weight_per_class[key]\n\n weights = torch.FloatTensor(weights)\n\n if sampler_option == 'random':\n s = sampler.RandomSampler(dataset, replacement=False)\n elif sampler_option == 'weighted':\n s = sampler.WeightedRandomSampler(weights, len(weights))\n else:\n raise NotImplementedError(\"The option %s for sampler is not implemented\" % sampler_option)\n\n return s", "def CrossCheck(dataloader):", "def IBP_sampler(mat):\n mat.val, mat.siblings[0].val = numba_mu.sample_2d_IBP(\n mat(),\n mat.siblings[0](),\n mat.layer.child().transpose(transpose_order),\n mat.layer.lbda(),\n mat.siblings[0].bernoulli_prior,\n mat.layer.alpha)", "def sample(self, seg_logit, seg_label):", "def sample(self):\n raise NotImplementedError", "def sample(self):\n raise NotImplementedError", "def create_weighted_sampler(local_df, test_label = 'protease_stability'):\n all_label_ids = torch.tensor([x for x in local_df[test_label]], dtype=torch.long)\n labels_unique, counts = np.unique(local_df[test_label], return_counts=True)\n print(labels_unique)\n\n class_weights = [sum(counts)/c for c in counts]\n print(class_weights)\n\n weights = [class_weights[e] for e in local_df[test_label]]\n\n print(len(local_df[test_label]))\n sampler = data_utils.WeightedRandomSampler(weights, len(local_df[test_label]))\n return sampler", "def __call__(self):\n if self.numbatches is None:\n pool = self.pooler()\n if self.batchsize is None:\n self.batchsize = self.pooler.nInPool()\n self.numbatches = self.pooler.nInPool()//self.batchsize\n for i in xrange(self.numbatches):\n pool = self.pooler()\n self._reset_batch()\n if self.samplemethod == 'balance' and len(self.keysamplers)>0:\n batchinds,keyids = self._samplebalanced(pool)\n elif self.samplemethod == 'uniform':\n batchinds,keyids = self._sampleuniform(pool)\n else:\n batchinds,keyids = self._samplesequential(i)\n batch = self._extractInds(pool,batchinds,keyids)\n for k in batch:\n batch[k][np.isnan(batch[k])] = self.nanreplacement\n yield batch", "def classify():\n yes_dataset = df[df[\"_class\"] == 1] # 470588\n no_dataset = df[df[\"_class\"] == 0] # 1971\n\n parameter_analysis = list()\n for criterion in np.arange(0.05, 0.91, 0.05):\n print(\"doing experiment at criterion = %s ...\" % criterion)\n rate_list = list()\n for i in range(10):\n # shuffle yes_dataset and no_dataset, so we can randomly choose 90% yes_dataset\n # + 90% no_dataset as train dataset, 10% yes_dataset + 10% no_dataset as test dataset\n yes_index = yes_dataset.index.tolist()\n random.shuffle(yes_index)\n no_index = no_dataset.index.tolist()\n random.shuffle(no_index)\n \n # concatenate 90%yes + 90%no, 10%yes + 10%no\n train = pd.concat([\n yes_dataset.loc[yes_index[:1774], :],\n no_dataset.loc[no_index[:423530], :]\n ])\n test = pd.concat([\n yes_dataset.loc[yes_index[1774:], :],\n no_dataset.loc[no_index[423530:], :]\n ]) \n \n # split data and label\n train_data, train_label = (train[[\"Revenue.Code\", \n \"Service.Code\", \n \"Procedure.Code\", \n \"Diagnosis.Code\", \n \"Subscriber.Index\"]], \n train[\"_class\"])\n test_data, test_label = (test[[\"Revenue.Code\", \n \"Service.Code\", \n \"Procedure.Code\", \n \"Diagnosis.Code\", \n \"Subscriber.Index\"]], \n test[\"_class\"])\n \n # apply classifier\n clf = GaussianNB()\n clf.fit(train_data, train_label)\n probability = clf.predict_proba(test_data).T[1]\n \n result = pd.DataFrame()\n result[\"_class\"] = test_label\n result[\"_predict\"] = probability >= criterion\n \n result_yes = result[result[\"_class\"] == 1]\n yes_yes_rate = sum(result_yes[\"_class\"] == result_yes[\"_predict\"])/len(result_yes[\"_predict\"])\n \n result_no = result[result[\"_class\"] == 0]\n no_no_rate = sum(result_no[\"_class\"] == result_no[\"_predict\"])/len(result_no[\"_predict\"])\n \n rate_list.append((yes_yes_rate, no_no_rate))\n\n rate_list = pd.DataFrame(rate_list)\n yes_yes_rate, no_no_rate = rate_list.mean()[0], rate_list.mean()[1]\n parameter_analysis.append((criterion, yes_yes_rate, no_no_rate))\n \n # save data to excel spreadsheet\n parameter_analysis = pd.DataFrame(parameter_analysis, columns=[\"criterion\", \"yes_yes_rate\", \"no_no_rate\"])\n writer = pd.ExcelWriter(\"parameter_analysis.xlsx\")\n parameter_analysis.to_excel(writer, \"parameter_analysis\", index=False)\n writer.save()", "def posterior_sample(self):\n pass", "def get_datasets(\n self, stage: str, num_samples_per_class: int = None\n ) -> \"OrderedDict[str, Dataset]\":\n num_samples_per_class = num_samples_per_class or 320\n\n datasets = super().get_datasets(stage=stage)\n datasets[\"train\"] = {\n \"dataset\": datasets[\"train\"],\n \"sampler\": BalanceClassSampler(\n labels=datasets[\"train\"].targets, mode=num_samples_per_class\n ),\n }\n return datasets", "def balance_dataset(dataset):\n \n print(\"Balancing dataset...\")\n n = len(dataset)\n labels = ch.Tensor([dataset[i][1] for i in range(n)]).int()\n n0 = sum(labels).item()\n I_pos = labels == 1\n\n idx = ch.arange(n)\n idx_pos = idx[I_pos]\n ch.manual_seed(0)\n I = ch.randperm(n - n0)[:n0]\n idx_neg = idx[~I_pos][I]\n idx_bal = ch.cat([idx_pos, idx_neg],dim=0)\n return Subset(dataset, idx_bal)", "def sampler_data(self) -> int:\n return self.__sampler_data", "def sample(self, observation):\n raise NotImplementedError", "def sample(self):", "def shuffle_dataset(self):\n # TODO explain approached used for selecting training and test data\n labels = self.dataset.label.unique()\n good_jobs = self.dataset[self.dataset.label == \"Good\"]\n bad_jobs = self.dataset[self.dataset.label == \"Bad\"]\n\n # TODO n>2 probablly won't work the way it's supposed to currently\n if len(labels) == 2:\n # oversample\n resize = max(len(good_jobs.label),len(bad_jobs.label))\n # undersample\n resize = min(len(good_jobs.label), len(bad_jobs.label))\n good_jobs_re = good_jobs.sample(resize)\n bad_jobs_re = bad_jobs.sample(resize)\n dataset = pd.concat([good_jobs_re, bad_jobs_re])\n elif len(labels) == 3:\n neutral_jobs = self.dataset[self.dataset.label == \"Neutral\"]\n # oversample\n resize = max(len(good_jobs.label), len(bad_jobs.label),len(neutral_jobs.label))\n # undersample\n resize = min(len(good_jobs.label), len(bad_jobs.label),len(neutral_jobs.label))\n\n good_jobs_re = good_jobs.sample(resize, replace=True)\n bad_jobs_re = bad_jobs.sample(resize, replace=True)\n neutral_jobs_re = bad_jobs.sample(resize, replace=True)\n dataset = pd.concat([good_jobs_re, bad_jobs_re,neutral_jobs_re])\n elif len(labels) == 4:\n neutral_jobs = self.dataset[self.dataset.label == \"Neutral\"]\n ideal_jobs = self.dataset[self.dataset.label == \"Ideal\"]\n\n # middle of the road approach\n resize = int(mean([len(good_jobs.label), len(bad_jobs.label),len(neutral_jobs.label),len(ideal_jobs.label)]))\n good_jobs_re = good_jobs.sample(resize, replace=True)\n bad_jobs_re = bad_jobs.sample(resize, replace=True)\n neutral_jobs_re = bad_jobs.sample(resize, replace=True)\n ideal_jobs_re = ideal_jobs.sample(resize,replace=True)\n dataset = pd.concat([good_jobs_re, bad_jobs_re,neutral_jobs_re,ideal_jobs_re])\n\n train,test = train_test_split(dataset,test_size=0.25,stratify = dataset.label,shuffle=True)\n #test = self.dataset[~self.dataset.isin(train)].dropna()\n #test = self.dataset[(~dataset.label.isin(self.dataset.label))&(~dataset.description.isin(self.dataset.description))]\n #0tr_hashes = [hash(tuple(d)) for d in train.description]\n #ytest = [val for iter,val in self.dataset.iterrows() if hash(tuple(val.description)) not in tr_hashes]\n\n self.y_train,self.y_test = train.label.values,test.label.values\n self.X_train,self.X_test = train.description.values,test.description.values", "def balance_set(X, Y, adr_labels_size, nonadr_labels_size):\n\n print(\"Performing Class Balancing...\")\n adr_samples_needed = nonadr_labels_size - adr_labels_size\n new_X = []\n new_Y = []\n adr_labels_size = 0\n nonadr_labels_size = 0\n\n for index, example in enumerate(X):\n if adr_samples_needed > 0:\n if Y[index] == ADR_MENTION_CLASS_LABEL:\n new_X.append(example) # add original 'ADR' sample\n new_Y.append(ADR_MENTION_CLASS_LABEL)\n new_X.append(example) # add duplicate 'ADR' sample to perform Over-Sampling\n new_Y.append(ADR_MENTION_CLASS_LABEL)\n\n adr_labels_size += 2\n adr_samples_needed -= 1\n else:\n # we don't add original 'No ADR Mention' sample to perform Under-Sampling\n adr_samples_needed -= 1\n\n else:\n if Y[index] == ADR_MENTION_CLASS_LABEL:\n adr_labels_size += 1\n else:\n nonadr_labels_size += 1\n\n new_X.append(example) # add original sample\n new_Y.append(Y[index]) # add original label\n\n print(\" Updated dataset size: {}\".format(len(new_X)))\n print(\" {} class size: {}\".format(ADR_MENTION_CLASS_NAME, adr_labels_size))\n print(\" {} class size: {}\".format(NON_ADR_MENTION_CLASS_NAME, nonadr_labels_size))\n\n return new_X, new_Y", "def sample(self):\n raise NotImplementedError(\"Override me!\")", "def test_non_blocked(self):\n _, model = simple_2model()\n with model:\n for sampler in self.samplers:\n assert isinstance(sampler(blocked=False), CompoundStep)", "def learnDataset(self, data_loader):\n\n print(\"learning dataset\")\n # we have 127940 sentences in total\n count = 0\n for sample in data_loader:\n input_sentence = sample[\"input\"][0]\n target_sentence = sample[\"target\"][0]\n\n # NOTE: target_word & input_word are actually indecies of words, instead of word strings\n # NOTE: the first word has index 1\n first_target = int(target_sentence[1])\n first_input = int(input_sentence[1])\n\n self.emiss_factors[0][(first_input, first_target)] += 1\n\n prev_target = first_target\n for word_idx in range(2, 16):\n # note that word_idx is 0 is always <BOS>\n target_word = int(target_sentence[word_idx])\n input_word = int(input_sentence[word_idx])\n\n self.emiss_factors[word_idx - 1][(input_word, target_word)] += 1\n self.trans_factors[word_idx - 2][(prev_target, target_word)] += 1\n prev_target = target_word\n\n print(\"{}/127940\".format(count), end = \"\\r\")\n count += 1\n print(\"127940/127940\")\n\n # all data updated, no need to do any insertion\n for i in range(15):\n self.emiss_factors[i].fixed()\n for i in range(14):\n self.trans_factors[i].fixed()", "def resample(X_train=None, y_train=None, df=None, balance=None, nclass=None):\n\n if balance == 'SMOTE':\n X_train, y_train = SMOTE().fit_sample(X_train, y_train)\n logger.info(f'Using {balance} oversampling')\n elif balance == 'RandomUnderSampler':\n rus = RandomUnderSampler(random_state=0)\n X_train, y_train = rus.fit_sample(X_train, y_train)\n logger.info(f'Using {balance} oversampling')\n elif balance == 'Bootstrap':\n logger.info(f'Using {balance} oversampling')\n df = bootstrap(df, nclass)\n return df\n elif balance == 'Handsample':\n logger.info(f'Using {balance} oversampling')\n df = bootstrap(df, nclass, if_new=True)\n return df\n\n return X_train, y_train", "def balancer():\n pass", "def score_samples(self, X):\n ...", "def _fetch_data(self, samples):\n pass", "def _request_and_measure(self, count):\n for i in range(count):\n self.rate_measurer.update_rate()\n\n def handle_request_error(result):\n self.rate_measurer.request_failed(result)\n write_failure(result)\n\n for i in range(self.request_rate):\n d = self.control_service.move_dataset(self.dataset_node.uuid,\n self.dataset_id)\n self.rate_measurer.request_sent()\n d.addCallbacks(self.rate_measurer.response_received,\n errback=handle_request_error)", "def samples(self, gp):\r\n raise NotImplementedError", "def balance(labels):\n # subsample positive labels if we have too many\n labels = subsample_positive_labels(labels)\n\n # subsample negative labels if we have too many\n labels = subsample_negative_labels(labels)\n\n return labels", "def bid_dataset_0():\n\n bm = BidManager()\n bm.add_bid(1, 3, 0, True, 0)\n bm.add_bid(2, 4, 1, True, 0)\n bm.add_bid(5, 1, 2, True, 0)\n\n bm.add_bid(4, 2, 3, False, 0)\n bm.add_bid(1, 1, 4, False, 0)\n bm.add_bid(5, 6, 5, False, 0)\n \n return bm.get_df()", "def bootstrap_data(self):\n for i in range(self.bootstraps):\n df_i = self.training_df.groupby(\n self.random_effect, group_keys=False\n ).apply(\n lambda x: x.sample(len(x), replace=True)\n )\n self.models.append(self.convert(df=df_i))", "def samples(self):\n pass", "def negative_sampling(self):\n \n self.train_arr = []\n sample_list = np.random.choice(list(range(self.item_count)), size = 10 * len(self.interactions) * self.num_ns)\n \n sample_idx = 0\n for user, pos_item, _ in self.interactions:\n ns_count = 0\n \n while True:\n neg_item = sample_list[sample_idx]\n if not is_visited(self.rating_mat, user, neg_item):\n self.train_arr.append((user, pos_item, neg_item))\n sample_idx += 1\n ns_count += 1\n if ns_count == self.num_ns:\n break\n \n sample_idx += 1", "def sample_negative(ratings):\r\n ## user_pool = set(ratings['userId'].unique())\r\n item_pool = set(ratings['itemId'].unique())\r\n\r\n interact_status = ratings.groupby('userId')['itemId'].apply(set).reset_index().rename(\r\n columns={'itemId': 'interacted_items'})\r\n interact_status['negative_items'] = interact_status['interacted_items'].apply(lambda x: item_pool - x)\r\n interact_status['negative_samples'] = interact_status['negative_items'].apply(lambda x: random.sample(x, 99))\r\n return interact_status[['userId', 'negative_samples']]", "def test_Bernoulli_NB_estimators():", "def bid_dataset_1():\n\n bm = BidManager()\n\n bm.add_bid(1, 6.7, 0, True, 0)\n bm.add_bid(1, 6.6, 1, True, 0)\n bm.add_bid(1, 6.5, 2, True, 0)\n bm.add_bid(1, 6.4, 3, True, 0)\n bm.add_bid(1, 6.3, 4, True, 0)\n bm.add_bid(1, 6, 5, True, 0)\n\n bm.add_bid(1, 1, 6, False, 0)\n bm.add_bid(1, 2, 7, False, 0)\n bm.add_bid(2, 3, 8, False, 0)\n bm.add_bid(2, 4, 9, False, 0)\n bm.add_bid(1, 6.1, 10, False, 0)\n \n return bm.get_df()", "def __balance_data(self):\n # Shuffle each class independently (This is useful in case of multiple root directories because it does not\n # discard only elements of the last listed root directory, but random elements of all root directories)\n start_index = 0\n for class_id, num_samples_in_this_class in enumerate(self.__samples_per_class):\n permutation = np.random.permutation(num_samples_in_this_class)\n self.__image_file_names[start_index:start_index + num_samples_in_this_class] = \\\n self.__image_file_names[start_index:start_index + num_samples_in_this_class][permutation]\n start_index += num_samples_in_this_class\n\n class_with_min_samples = np.argmin(self.__samples_per_class)\n num_min_samples = self.__samples_per_class[class_with_min_samples]\n\n # Remove all elements in the majority classes in order to balance their sample numbers to the minority class.\n start_index = 0\n elements_to_delete = []\n for num_samples_in_this_class in self.__samples_per_class:\n new_indices_to_delete = [i for i in\n range(start_index + num_min_samples, start_index + num_samples_in_this_class)]\n elements_to_delete.extend(new_indices_to_delete)\n start_index += num_samples_in_this_class\n\n self.__labels = np.delete(self.__labels, elements_to_delete)\n self.__image_file_names = np.delete(self.__image_file_names, elements_to_delete)\n\n # Check for class balance.\n cumulator = np.zeros(shape=3)\n for label in self.__labels:\n cumulator[label] += 1\n for i in range(2):\n if cumulator[i] != cumulator[i + 1]:\n raise RuntimeError(\"Error in data balancing: resulting label distribution: {}\".format(cumulator))\n\n self.__samples_per_class = [num_min_samples for _ in range(self.num_classes)]", "def process_sample_val(self):\n raise NotImplementedError", "def test_sampling1 ():\n cpus = list(range(C.N_PARALLEL))\n affinity = dict(cuda_idx=C.CUDA_IDX, workers_cpus=cpus)\n agent_ = findOptimalAgent(reward=None)\n agent = CategoricalPgAgent(AcrobotNet, \n initial_model_state_dict=agent_.state_dict())\n s0 = np.array([1, 0, 1/np.sqrt(2), 1/np.sqrt(2), 4, 2], dtype=np.float)\n sampler = SerialSampler(\n EnvCls=rlpyt_make,\n env_kwargs=dict(id=C.ENV, reward=None, internalStateFn=C.INTERNAL_STATE_FN, s0=s0),\n batch_T=500,\n batch_B=16,\n max_decorrelation_steps=0,\n )\n sampler.initialize(\n agent=agent,\n affinity=affinity,\n seed=0\n )\n _, traj_info = sampler.obtain_samples(0)\n print(np.mean([t['DiscountedReturn'] for t in traj_info]))", "def _call(self, dataset):\n # first cast to floating point dtype, because noise is most likely\n # floating point as well and '+=' on int would not do the right thing\n # XXX should we already deepcopy here to keep orig dtype?\n if not N.issubdtype(dataset.samples.dtype, N.float):\n dataset.setSamplesDType('float32')\n\n if __debug__:\n nfeatures = dataset.nfeatures\n\n sens_map = []\n\n # compute the datameasure on the original dataset\n # this is used as a baseline\n orig_measure = self.__datameasure(dataset)\n\n # do for every _single_ feature in the dataset\n for feature in xrange(dataset.nfeatures):\n if __debug__:\n debug('PSA', \"Analyzing %i features: %i [%i%%]\" \\\n % (nfeatures,\n feature+1,\n float(feature+1)/nfeatures*100,), cr=True)\n\n # make a copy of the dataset to preserve data integrity\n wdata = deepcopy(dataset)\n\n # add noise to current feature\n wdata.samples[:, feature] += self.__noise(size=wdata.nsamples)\n\n # compute the datameasure on the perturbed dataset\n perturbed_measure = self.__datameasure(wdata)\n\n # difference from original datameasure is sensitivity\n sens_map.append(perturbed_measure - orig_measure)\n\n if __debug__:\n debug('PSA', '')\n\n return N.array(sens_map)", "def init_data(dataset_config: dict):\n # train and dev will be in random order, test may be ordered according to labels\n if dataset_config[\"name\"] == \"CoLA\":\n train, dev, test, num_classes = load_cola(dataset_config)\n elif dataset_config[\"name\"] == \"AGNews\":\n train, dev, test, num_classes = load_ag_news(dataset_config)\n elif dataset_config[\"name\"] == \"DBPedia\":\n train, dev, test, num_classes = load_dbpedia(dataset_config)\n elif dataset_config[\"name\"] == \"YRF\":\n train, dev, test, num_classes = load_yrf(dataset_config)\n else:\n raise NameError(f\"Dataset {dataset_config['name']} not implemented.\")\n # etc.\n\n # shrink size if debugging\n if dataset_config[\"debug\"]:\n # choose a random subset using huggingface select function\n train = train.select(random.sample(range(len(train)), k=200))\n dev = dev.select(random.sample(range(len(dev)), k=40))\n test = test.select(random.sample(range(len(test)), k=200))\n\n # create class imbalance\n random.seed(dataset_config[\"seed\"])\n if dataset_config[\"pool_balance\"] == \"balanced\":\n pass\n elif dataset_config[\"pool_balance\"] == \"imbalanced\":\n train = train.filter(lambda example: create_imbalanced_dataset(example, dataset_config[\"imbalance_prop\"], dataset_config['imbalance_cls']))\n else:\n NameError(f\"pool_balance = {dataset_config['pool_balance']} not allowed\")\n\n if dataset_config[\"dev_balance\"] == \"balanced\":\n pass\n elif dataset_config[\"dev_balance\"] == \"imbalanced\":\n dev = dev.filter(lambda example: create_imbalanced_dataset(example, dataset_config[\"imbalance_prop\"], dataset_config['imbalance_cls']))\n else:\n NameError(f\"dev_balance = {dataset_config['dev_balance']} not allowed\")\n\n # get seed labelled pool indices (using the same seed data every time)\n random.seed(dataset_config[\"seed\"])\n if dataset_config[\"seed_balance\"] == \"balanced\":\n # this is random (will have some variance vs pool)\n indices = list(range(len(train)))\n unlabelled_pool_idx, labelled_pool_idx = split(\n indices,\n random_state=dataset_config[\"seed\"],\n test_size=dataset_config[\"seed_size\"]\n )\n elif dataset_config[\"seed_balance\"] == \"stratified\":\n # this is the same as the underlying train set (which may be unbalanced)\n indices = list(range(len(train)))\n unlabelled_pool_idx, labelled_pool_idx = split(\n indices,\n random_state=dataset_config[\"seed\"],\n test_size=dataset_config[\"seed_size\"],\n stratify=train['label']\n )\n elif dataset_config[\"seed_balance\"] == \"imbalanced\":\n # artificially sample an imbalanced seed set from the pool\n unlabelled_pool_idx, labelled_pool_idx = create_imbalanced_seed(\n train,\n num_classes,\n dataset_config[\"seed_size\"],\n dataset_config['imbalance_prop'],\n dataset_config['imbalance_cls']\n )\n else:\n raise NameError(f\"seed_balance = {dataset_config['seed_balance']} not allowed\")\n\n return train, dev, test, num_classes, labelled_pool_idx, unlabelled_pool_idx", "def preprocess(self):\n \n file_name_list = os.listdir(self.image_dir)\n random.seed(1234)\n random.shuffle(file_name_list)\n \n for i,d in enumerate(self.domains):\n self.attr2idx[d]=i \n\n for i, file_name in enumerate(file_name_list):\n if (file_name.startswith('X_')):\n continue\n \n parts = file_name.split(\"-\")\n label = int(parts[0])\n if label not in self.domains:\n continue\n img_name = file_name\n\n count=self.get_sample_count(label)\n if count<self.valid_set_size:\n # create holdout set on the fly\n utils.copy_file(self.image_dir,self.valid_set_dir,img_name)\n else:\n self.dataset.append([img_name, self.attr2idx[label]])\n \n self.increment_sample_count(label)\n\n print(\"Sample count per domain: \"+str(self.sample_count)+\" (including holdout set, holdout size per domain is: \"+str(self.valid_set_size)+\")\")\n print('Finished preprocessing the dataset...')", "def balance_frame(frame):\n tumor_sample_df = frame.loc[frame[\"Label\"] != \"0\"]\n no_nodule_df = frame.loc[frame[\"Label\"] == \"0\"]\n\n no_nodule_sample = no_nodule_df.sample(n=len(tumor_sample_df) // 2)\n\n return pd.concat([tumor_sample_df, no_nodule_sample])", "def test_DALearner(self):\n\n # Instantiate DomainAdaptationLearner\n\n est = AutomatedDomainAdaptationLearner(models=automl_model_reg(),\n final_models=automl_model_reg())\n\n est.fit(Y, T, X=X)\n _ = est.effect(X)", "def sample(self, like_params):\n\t\traise NotImplementedError", "def collect_data(self,sensation,action,reward,next_sensation):\n pass", "def sample(self, processor, batch_size=None):\n pass", "def Balance_Data(data, dependent):\n\t#print \"Balancing data...\"\n\tpositives = data[(data[dependent]==1)]\n\tnegatives = data.drop(positives.index).sample(n=len(positives))\n\tbalanced_data = pd.concat([positives,negatives])\n\treturn balanced_data", "def learn(self):\n pass", "def learn(self):\n pass", "def predict_collect(self, src, collector): # real signature unknown; restored from __doc__\n pass", "def test_metric_learning(smote_class):\n nn_params = {'metric': 'precomputed',\n 'metric_learning_method': 'ITML'}\n X, y = smote_class(nn_params=nn_params).sample(dataset['data'],\n dataset['target'])\n\n assert np.unique(y).shape[0] == 2\n assert X.shape[0] > 0", "def __init__(self):\n\n # sample must be between 0 and 1\n if self.sample <= 0 or self.sample > 1:\n raise Exception('sample {} should be > 0 and <= 1'.format(self.sample))\n\n # sample RDD if sample is specified AND rdd has not been pre-sampled\n if self.sample < 1 and not self.pre_sampled:\n self.rdd = self.rdd.sample(False, self.sample, self.seed)\n\n # Assign each RDD with counter. Reduce and collect.\n collectedCounts = self.rdd.reduceByKey(lambda x,y: x+y) \\\n .collect() # (id, count), number of times that count appears)\n\n # function that re-calculates coverage based on sampling\n approximateCounts = lambda counts, sample: int(counts * 1.0/sample)\n\n # restructure each record so record structure is (key: sampleId, value: (coverage, count))\n x = list(map(lambda x: (x[0][0], (x[0][1], approximateCounts(x[1], self.sample))), collectedCounts))\n\n # create dictionary where keys are the sampleId\n self.collectedCounts = collections.defaultdict(set)\n for k, v in x:\n self.collectedCounts[k].add(v)", "def split_dataset(dataset, test_size):\r\n random.shuffle(dataset)\r\n \r\n rating_negativ = []\r\n rating_positiv = []\r\n \r\n for row in dataset:\r\n if int(row[1]) == 0:\r\n rating_negativ.append(row)\r\n elif int(row[1]) == 1:\r\n rating_positiv.append(row)\r\n\r\n random.shuffle(rating_positiv)\r\n random.shuffle(rating_negativ) \r\n \r\n neg_train_data, neg_val_data = train_test_split(rating_negativ, test_size=test_size)\r\n pos_train_data, pos_val_data = train_test_split(rating_positiv, test_size=test_size)\r\n \r\n train_data = neg_train_data + pos_train_data\r\n val_data = neg_val_data + pos_val_data\r\n \r\n random.shuffle(train_data)\r\n random.shuffle(val_data)\r\n \r\n return train_data, val_data", "def __init__(self,\n base_dataset='cifar10',\n take_amount=None,\n take_amount_seed=13,\n add_svhn_extra=False,\n aux_data_filename=None,\n add_aux_labels=True,\n aux_take_amount=None,\n train=False,\n **kwargs):\n\n if base_dataset == 'cifar10':\n self.dataset = datasets.CIFAR10(train=train, **kwargs)\n else:\n raise ValueError('Dataset %s not supported' % base_dataset)\n self.base_dataset = base_dataset\n self.train = train\n\n if self.train:\n if take_amount is not None:\n rng_state = np.random.get_state()\n np.random.seed(take_amount_seed)\n take_inds = np.random.choice(len(self.sup_indices),\n take_amount, replace=False)\n np.random.set_state(rng_state)\n\n logger = logging.getLogger()\n logger.info('Randomly taking only %d/%d examples from training'\n ' set, seed=%d, indices=%s',\n take_amount, len(self.sup_indices),\n take_amount_seed, take_inds)\n self.targets = self.targets[take_inds]\n self.data = self.data[take_inds]\n\n self.sup_indices = list(range(len(self.targets)))\n self.unsup_indices = []\n\n if aux_data_filename is not None:\n aux_path = aux_data_filename\n print(\"Loading data from %s\" % aux_path)\n with open(aux_path, 'rb') as f:\n aux = pickle.load(f)\n aux_data = aux['data']\n aux_targets = aux['extrapolated_targets']\n orig_len = len(self.data)\n\n if aux_take_amount is not None:\n rng_state = np.random.get_state()\n np.random.seed(take_amount_seed)\n take_inds = np.random.choice(len(aux_data),\n aux_take_amount, replace=False)\n np.random.set_state(rng_state)\n\n logger = logging.getLogger()\n logger.info(\n 'Randomly taking only %d/%d examples from aux data'\n ' set, seed=%d, indices=%s',\n aux_take_amount, len(aux_data),\n take_amount_seed, take_inds)\n aux_data = aux_data[take_inds]\n aux_targets = aux_targets[take_inds]\n\n self.data = np.concatenate((self.data, aux_data), axis=0)\n\n if not add_aux_labels:\n self.targets.extend([-1] * len(aux_data))\n else:\n self.targets.extend(aux_targets)\n # note that we use unsup indices to track the labeled datapoints\n # whose labels are \"fake\"\n self.unsup_indices.extend(\n range(orig_len, orig_len+len(aux_data)))\n\n logger = logging.getLogger()\n logger.info(\"Training set\")\n logger.info(\"Number of training samples: %d\", len(self.targets))\n logger.info(\"Number of supervised samples: %d\",\n len(self.sup_indices))\n logger.info(\"Number of unsup samples: %d\", len(self.unsup_indices))\n logger.info(\"Label (and pseudo-label) histogram: %s\",\n tuple(\n zip(*np.unique(self.targets, return_counts=True))))\n logger.info(\"Shape of training data: %s\", np.shape(self.data))\n\n # Test set\n else:\n self.sup_indices = list(range(len(self.targets)))\n self.unsup_indices = []\n\n logger = logging.getLogger()\n logger.info(\"Test set\")\n logger.info(\"Number of samples: %d\", len(self.targets))\n logger.info(\"Label histogram: %s\",\n tuple(\n zip(*np.unique(self.targets, return_counts=True))))\n logger.info(\"Shape of data: %s\", np.shape(self.data))", "def sample_action(self, obs, explore_prob):\n raise NotImplementedError", "def main(model, balancing, in_file, balanced_directory, result_directory, unbalanced_filepath):\n if 'all' in balancing:\n balancing = ALL_BALANCE\n\n if 'all' in model:\n model = ALL_MODELS\n\n print(model, balancing, in_file, balanced_directory, result_directory, unbalanced_filepath)\n\n resultDict = {}\n\n for resampler_name in balancing:\n\n if resampler_name == 'none':\n balanced_filepath = in_file\n else:\n resampler = Resampler(resampler_name, in_file)\n balanced_filepath = resampler.resample_and_write_to_csv(balanced_directory)\n\n balancing_method_dict = {}\n\n for model_name in model:\n builder = ModelBuilder(model_name=model_name,\n filename=balanced_filepath,\n unbalanced_filename=unbalanced_filepath,\n separator=DEFAULT_SEPARATOR,\n labels_header=DEFAULT_YLABEL\n )\n results, pred_balanced, real_balanced, pred_unbalanced, real_unbalanced = builder.get_result()\n print(results)\n balancing_method_dict[model_name] = results\n\n resultDict[resampler_name] = balancing_method_dict\n\n scoring_starter(result_dict=resultDict, base_output_directory=\"results/\")\n\n return 0", "def __init__(self, data_source: any, batch_size: int=1):\n super(BucketingSampler, self).__init__(data_source)\n self.data_source = data_source\n ids = list(range(0, len(data_source)))\n # TODO: Optimise\n self.bins = [ids[i:i + batch_size] for i in range(0, len(ids), batch_size)]", "def skip_sample_for_balanced_class(self, img_data):\n class_in_img = False\n for bbox in img_data['bboxes']:\n cls_name = bbox['class']\n if cls_name == self.curr_class:\n class_in_img = True\n ## 更新一次,获取下一次的值\n self.curr_class = next(self.class_cycle)\n break \n if class_in_img:\n return False\n else:\n return True", "def gw_heritability(\n input_snp_filename: \"Data Input, use the SNPs file from dataParse\",\n output_summary_filename: 'output file for the genomewide results summary, use .csv',\n logger_filename: 'file for the logger, use a txt',\n sweeps: \"number of samples for each chain\" = 1000,\n burnin: \"number of burnin samples\" = 1000,\n n_chains: \"number of chains of the sampler\" = 4,\n n_cores: \"number of parallel cores to use\" = 4,\n N_1kG: \"number of SNPs onwhich the LD-score is calculates\" = 1290028,\n chromosome: \"chromosome on which the analysis is run\" = \"all\",\n sep: \"separator for the input files, use t for tab separated (not \\t)\" = \",\",\n model: 'regression model'='normal',\n fix_intercept = False,\n ):\n\n # Initialisation of the logger\n output_logger = log.setup_logger(\"output_logger\", logger_filename)\n log.initialise_log(output_logger,\n 'genome-wide regression, model: %s' %model,\n [input_snp_filename],\n [output_summary_filename],\n sweeps,\n burnin,\n chromosome = str(chromosome),\n other_params_diz = {'chains': n_chains, 'cores': n_cores})\n\n # Initialisation function, it reads the summary stats file, filters the SNPs,\n # creates the output files\n\n logging.info(\"Start Analysis\")\n\n snps = s.Snps()\n # read table\n snps.read_table(input_snp_filename, separator=sep)\n # generate chi squared stats\n snps.generate_stats()\n # update the summary stats\n snps.update_summary()\n output_logger.info(\" Sample size \" + str(snps.n_patients) + \"\\n\")\n\n\n snps.apply_filter_table(s.baghera_filter)\n snps.update_summary()\n output_logger.info(\"After baghera init filter.\\nNumber of SNPs: %s\\nNumber of genes: %s\\n\" \\\n %(str(snps.n_snps), str(snps.n_genes)) )\n\n # Non coding SNPs are assigned to a dummy gene, such that the regression is done on the entire SNPs' set\n snps.rename_non_annotated(name='NonCoding')\n\n if chromosome != \"all\":\n snps.apply_filter_table(snps.cut_single_chrom, **{'chromosome': chromosome})\n output_logger.info(\n \"Analysis restricted to chr %s\" %str(chromosome) )\n\n snps.update_summary()\n output_logger.info(\"Analysis. Number of SNPs: %s\\n, Number of genes: %s\\n\" \\\n %(str(snps.n_snps), str(snps.n_genes)) )\n\n\n if model =='normal':\n [intercept, slope] = heritability.gw_normal(snps, output_summary_filename, output_logger,\n sweeps, burnin, n_chains, n_cores, N_1kG, fix_intercept)\n elif model=='gamma':\n [intercept, slope] = heritability.gw_normal(snps, output_summary_filename, output_logger,\n sweeps, burnin, n_chains, n_cores, N_1kG, fix_intercept)\n else:\n logging.info('Normal model by default')\n [intercept, slope] = heritability.gw_normal(snps, output_summary_filename, output_logger,\n sweeps, burnin, n_chains, n_cores, N_1kG, fix_intercept)\n logging.info(\"Analysis complete\")", "def run_model_sampler(Y, latent_dim, n_iter):\n\tF_sample = []\n\tloading_sample = []\n\tvariance_sample = []\n\ttrace_sample = []\n\tmse_history = []\n\tF = initiate_factors(Y, latent_dim)\n\tfor i in tqdm(range(n_iter)):\n\t\tF, loading_matrix, Y_variance, gp_traces, mse = gibbs_sampling(F, Y)\n\t\tF_sample.append(F)\n\t\tloading_sample.append(loading_matrix)\n\t\tvariance_sample.append(Y_variance)\n\t\ttrace_sample.append(gp_traces)\n\t\tmse_history.append(mse)\n\treturn F_sample, loading_sample, variance_sample, trace_sample, mse_history", "def learn(self):\n event_batch = self.memory.sample(self.batch_size)\n \n if event_batch is None:\n return\n\n event_batch = self.memory.deserialize(event_batch)\n self.update_critic(event_batch)\n self.update_actor(event_batch)\n self.update_target(self.local_actor, self.target_actor)\n self.update_target(self.local_critic, self.target_critic)", "def _oversample_data(\n self,\n _labels: np.ndarray,\n valid_indices: np.ndarray,\n elm_start: np.ndarray,\n elm_stop: np.ndarray,\n index_buffer: int = 20,\n ) -> np.ndarray:\n # indices for sampling data\n sample_indices = valid_indices\n\n # oversample active ELM periods to reduce class imbalance\n fraction_elm = np.count_nonzero(_labels >= 0.5) / _labels.shape[0]\n LOGGER.info(f\"Active ELM fraction (raw data): {fraction_elm:.3f}\")\n oversample_count = int((1 - fraction_elm) / fraction_elm) - 1\n LOGGER.info(\n f\"Active ELM oversampling for balanced data: {oversample_count}\"\n )\n if self.balance_classes:\n for i_start, i_stop in zip(elm_start, elm_stop):\n assert np.all(_labels[i_start : i_stop + 1] >= 0.5)\n active_elm_window = np.arange(\n i_start - index_buffer, i_stop + index_buffer, dtype=\"int\"\n )\n active_elm_window = np.tile(\n active_elm_window, [oversample_count]\n )\n sample_indices = np.concatenate(\n [sample_indices, active_elm_window]\n )\n fraction_elm = (\n np.count_nonzero(_labels[sample_indices] >= 0.5)\n / sample_indices.size\n )\n LOGGER.info(\n f\"Active ELM fraction (balanced data): {fraction_elm:.3f}\"\n )\n return sample_indices", "def histogram_classification_test(self, dataset, labels, number_of_labels, \n method, noise_ratio, sparsity_coeff,\n sensitivity):\n net_activity = self.full_net_dataset_response(dataset, method, \n noise_ratio, \n sparsity_coeff,\n sensitivity)\n last_layer_activity = net_activity[-1]\n histograms = []\n normalized_histograms = []\n n_basis = self.features_number[-1]\n # Normalization factor for building normalized histograms\n input_spikes_per_recording = np.zeros(len(dataset))\n for recording in range(len(dataset)):\n histograms.append(np.zeros(n_basis*(2**(self.layers-1))))\n normalized_histograms.append(np.zeros(n_basis*(2**(self.layers-1))))\n for recording in range(len(dataset)):\n input_spikes_per_recording[recording] += len(dataset[recording][0])\n for sublayer in range(len(last_layer_activity)):\n recording_histogram = sum(last_layer_activity[sublayer][recording])\n histograms[recording][n_basis*sublayer:n_basis*(sublayer+1)] += recording_histogram \n for recording in range(len(dataset)):\n normalized_histograms[recording] = histograms[recording]/input_spikes_per_recording[recording]\n # compute the distances per each histogram from the models\n distances = []\n predicted_labels = []\n for recording in range(len(dataset)):\n single_recording_distances = []\n for label in range(number_of_labels):\n single_label_distances = [] \n single_label_distances.append(distance.euclidean(histograms[recording],self.histograms[label]))\n single_label_distances.append(distance.euclidean(normalized_histograms[recording],self.normalized_histograms[label]))\n Bhattacharyya_array = np.array([np.sqrt(a*b) for a,b in zip(normalized_histograms[recording], self.normalized_histograms[label])]) \n single_label_distances.append(-np.log(sum(Bhattacharyya_array)))\n single_recording_distances.append(single_label_distances)\n single_recording_distances = np.array(single_recording_distances)\n single_recording_predicted_labels = np.argmin(single_recording_distances, 0)\n distances.append(single_recording_distances)\n predicted_labels.append(single_recording_predicted_labels)\n self.test_histograms = histograms\n self.test_normalized_histograms = normalized_histograms \n # Computing the results\n eucl = 0\n norm_eucl = 0\n bhatta = 0\n for recording,true_label in enumerate(labels):\n eucl += (predicted_labels[recording][0] == true_label)/len(labels)\n norm_eucl += (predicted_labels[recording][1] == true_label)/len(labels)\n bhatta += (predicted_labels[recording][2] == true_label)/len(labels)\n prediction_rates = [eucl, norm_eucl, bhatta]\n if self.verbose is True:\n print(\"Testing ended, you can also look at the test histograms with in\"+\n \" the attribute .test_histograms and .test_normalized_histograms, \"+\n \"or using the .plot_histograms method\")\n return prediction_rates, distances, predicted_labels", "def __init__(self):\n\n #call super class's __init__ method\n super(TRiseSampler, self).__init__(name=\"trise\", observed=False)", "def mnist_custom_split(split_ratio=0.8, random_seed=0, shuffle_dataset=True, dataset='mnist'):\n if dataset[:5] == 'mnist':\n dataset = datasets.MNIST(definitions.DATA_PATH)\n elif dataset[:6] == 'hmnist':\n dataset = datasets.DatasetFolder(definitions.HMNIST_DATA_FOLDER, data_loader, ALL_EXTS),\n elif dataset[:8] == 'diamonds':\n dataset = datasets.DatasetFolder(definitions.DIAMONDS_DATA_FOLDER, data_loader, ALL_EXTS),\n else:\n print('[ERROR] Unknown dataset for split_and_train! => %s' % dataset)\n exit(1)\n\n dataset_size = len(dataset)\n\n indices = list(range(dataset_size))\n split = int(np.floor(split_ratio * dataset_size))\n logger.debug('Split dataset {}'.format(split))\n if shuffle_dataset:\n np.random.seed(random_seed)\n np.random.shuffle(indices)\n # ==> Mistakes\n # train_indices, val_indices = indices[split:], indices[:split]\n train_indices, val_indices = indices[:split], indices[split:]\n\n # Creating PT data samplers and loaders:\n train_sampler = torch.utils.data.SubsetRandomSampler(train_indices)\n valid_sampler = torch.utils.data.SubsetRandomSampler(val_indices)\n\n return train_sampler, valid_sampler", "def my_assign_weights(context, data):\n pass", "def splitmetric(self, dataset, attr, target_attr):\n freq = {}\n splitinfo = 0.0\n \n #Call information gain\n gain = ID3.splitmetric(self, dataset, attr, target_attr);\n samplenumbers = len(dataset)\n # Calculate the frequency of each of the values in the split attribute\n for record in dataset:\n if (record[attr] in freq):\n freq[record[attr]] += 1.0\n else:\n freq[record[attr]] = 1.0\n \n #Calculate split info, entropy of splitter\n for val in list(freq.values()):\n splitinfo += (- val / samplenumbers) * math.log(val / samplenumbers, 2)\n \n #Split info equals 0 when there only one class in data set\n if splitinfo == 0:\n splitinfo = 0.00000001\n \n return gain / splitinfo", "def sampling(train_set, train_meta, klass, label, n_samples_pos, rate_neg, fold, path_idxs):\n\tprint('-- SAMPLING TRAINNING')\n\tdirectory_idxs = path_idxs+fold+'/'+str(int(klass))+'/'\n\tif(os.path.isdir(directory_idxs)):\n\t\tprint('loading indexes...')\n\t\tidxs_class_pos = np.loadtxt(directory_idxs+'idxs_pos_train.txt', dtype=int)\n\t\tidxs_class_neg = np.loadtxt(directory_idxs+'idxs_neg_train.txt', dtype=int)\n\telse:\n\t\tidxs_class_pos = (train_meta[ : , label] == klass).nonzero()[0]\n\t\tidxs_class_neg = (train_meta[ : , label] != klass).nonzero()[0]\n\t\tif(n_samples_pos < len(idxs_class_pos)):\n\t\t\tidxs_class_pos = np.random.choice(idxs_class_pos, n_samples_pos)\n\t\tidxs_class_neg = np.random.choice(idxs_class_neg, int(n_samples_pos*rate_neg))\n\t\tprint('saving indexes...')\n\t\tos.makedirs(directory_idxs)\n\t\tnp.savetxt(directory_idxs+'idxs_pos_train.txt', idxs_class_pos, fmt='%d')\n\t\tnp.savetxt(directory_idxs+'idxs_neg_train.txt', idxs_class_neg, fmt='%d')\n\n\ttrain_set = np.vstack((train_set[idxs_class_pos], train_set[idxs_class_neg]))\n\ttrain_meta = np.vstack((train_meta[idxs_class_pos], train_meta[idxs_class_neg]))\n\ttrain_meta[:, label] = 1\n\ttrain_meta[len(idxs_class_pos):, label] = -1\n\treturn [train_set, train_meta]", "def train(self, dataset):\n \"*** YOUR CODE HERE ***\"\n batch_size = 1\n while True:\n error = False\n for x, y in dataset.iterate_once(batch_size):\n y_pred = self.get_prediction(x)\n y = nn.as_scalar(y)\n if y != y_pred:\n error = True\n nn.Parameter.update(self.get_weights(),x,y)\n if error == False:\n break", "def sample_posterior(self):\n if(self.Bayesian):\n for i in range(self.num_layers):\n getattr(self, 'LSTMCell%i'%(i+1)).sample_posterior()", "def sample(self, state, model_args, model_kwargs):\n raise NotImplementedError", "def learnDataset(self, data_loader):\n print(\"learning dataset\")\n\n count = 0\n for sample in data_loader:\n input_sentence = sample[\"input\"][0]\n target_sentence = sample[\"target\"][0]\n\n prev_word = None\n for word_idx in range(1, 16):\n target_word = int(target_sentence[word_idx])\n self.model_parts[word_idx - 1].populateFactors(\n input_sentence, target_word, prev_word\n )\n prev_word = target_word\n\n print(\"{}/127940\".format(count), end = \"\\r\")\n count += 1\n print(\"127940/127940\")\n\n print(\"before fixed\", list(self.model_parts[0].factors[0].d.keys())[:10])\n for i in range(15):\n self.model_parts[i].fixed()\n print(\"after fixed\", self.model_parts[0].factors[0].keys[:10])", "def rate(self, dataset, targets):\n raise NotImplementedError", "def main(dataset_name, disease_label):\n # ----------------------------------------------------------------------------\n n_bootstrap = 1000\n\n model_name = 'supervised_aae'\n\n participants_path = PROJECT_ROOT / 'data' / dataset_name / 'participants.tsv'\n freesurfer_path = PROJECT_ROOT / 'data' / dataset_name / 'freesurferData.csv'\n\n hc_label = 1\n\n # ----------------------------------------------------------------------------\n bootstrap_dir = PROJECT_ROOT / 'outputs' / 'bootstrap_analysis'\n model_dir = bootstrap_dir / model_name\n ids_path = PROJECT_ROOT / 'outputs' / (dataset_name + '_homogeneous_ids.csv')\n\n # ----------------------------------------------------------------------------\n clinical_df = load_dataset(participants_path, ids_path, freesurfer_path)\n clinical_df = clinical_df.set_index('participant_id')\n\n tpr_list = []\n auc_roc_list = []\n effect_size_list = []\n\n for i_bootstrap in tqdm(range(n_bootstrap)):\n bootstrap_model_dir = model_dir / '{:03d}'.format(i_bootstrap)\n\n output_dataset_dir = bootstrap_model_dir / dataset_name\n output_dataset_dir.mkdir(exist_ok=True)\n\n analysis_dir = output_dataset_dir / '{:02d}_vs_{:02d}'.format(hc_label, disease_label)\n analysis_dir.mkdir(exist_ok=True)\n\n # ----------------------------------------------------------------------------\n normalized_df = pd.read_csv(output_dataset_dir / 'normalized.csv', index_col='participant_id')\n reconstruction_df = pd.read_csv(output_dataset_dir / 'reconstruction.csv', index_col='participant_id')\n reconstruction_error_df = pd.read_csv(output_dataset_dir / 'reconstruction_error.csv',\n index_col='participant_id')\n\n # ----------------------------------------------------------------------------\n # Compute effect size of the brain regions for the bootstrap iteration\n diff_df = np.abs(normalized_df - reconstruction_df)\n region_df = compute_brain_regions_deviations(diff_df, clinical_df, disease_label)\n effect_size_list.append(region_df['effect_size'].values)\n region_df.to_csv(analysis_dir / 'regions_analysis.csv', index=False)\n\n # ----------------------------------------------------------------------------\n # Compute AUC-ROC for the bootstrap iteration\n roc_auc, tpr = compute_classification_performance(reconstruction_error_df, clinical_df, disease_label)\n auc_roc_list.append(roc_auc)\n tpr_list.append(tpr)\n\n (bootstrap_dir / dataset_name).mkdir(exist_ok=True)\n comparison_dir = bootstrap_dir / dataset_name / ('{:02d}_vs_{:02d}'.format(hc_label, disease_label))\n comparison_dir.mkdir(exist_ok=True)\n\n # ----------------------------------------------------------------------------\n # Save regions effect sizes\n effect_size_df = pd.DataFrame(columns=COLUMNS_NAME, data=np.array(effect_size_list))\n effect_size_df.to_csv(comparison_dir / 'effect_size.csv')\n\n # Save AUC bootstrap values\n auc_roc_list = np.array(auc_roc_list)\n auc_roc_df = pd.DataFrame(columns=['AUC-ROC'], data=auc_roc_list)\n auc_roc_df.to_csv(comparison_dir / 'auc_rocs.csv', index=False)\n\n # ----------------------------------------------------------------------------\n # Create Figure 3 of the paper\n tpr_list = np.array(tpr_list)\n mean_tprs = tpr_list.mean(axis=0)\n tprs_upper = np.percentile(tpr_list, 97.5, axis=0)\n tprs_lower = np.percentile(tpr_list, 2.5, axis=0)\n\n plt.plot(np.linspace(0, 1, 101),\n mean_tprs,\n 'b', lw=2,\n label='ROC curve (AUC = {:0.3f} ; 95% CI [{:0.3f}, {:0.3f}])'.format(np.mean(auc_roc_list),\n np.percentile(auc_roc_list, 2.5),\n np.percentile(auc_roc_list, 97.5)))\n\n plt.fill_between(np.linspace(0, 1, 101),\n tprs_lower, tprs_upper,\n color='grey', alpha=0.2)\n\n plt.plot([0, 1], [0, 1], 'r--')\n plt.xlim([0, 1])\n plt.ylim([0, 1.05])\n plt.ylabel('True Positive Rate')\n plt.xlabel('False Positive Rate')\n plt.legend(loc='lower right')\n plt.savefig(comparison_dir / 'AUC-ROC.eps', format='eps')\n plt.close()\n plt.clf()\n\n # --------------------------------------------------------------------------------------------\n # Create figure for supplementary materials\n effect_size_df = effect_size_df.reindex(effect_size_df.mean().sort_values().index, axis=1)\n\n plt.figure(figsize=(16, 20))\n plt.hlines(range(101),\n np.percentile(effect_size_df, 2.5, axis=0),\n np.percentile(effect_size_df, 97.5, axis=0))\n\n plt.plot(effect_size_df.mean().values, range(101), 's', color='k')\n plt.axvline(0, ls='--')\n plt.yticks(np.arange(101), effect_size_df.columns)\n plt.xlabel('Effect size')\n plt.ylabel('Brain regions')\n plt.tight_layout()\n plt.savefig(comparison_dir / 'Regions.eps', format='eps')\n plt.close()\n plt.clf()\n\n # --------------------------------------------------------------------------------------------\n # Create Figure 4 of the paper\n effect_size_sig_df = effect_size_df.reindex(effect_size_df.mean().sort_values().index, axis=1)\n lower_bound = np.percentile(effect_size_sig_df, 2.5, axis=0)\n higher_bound = np.percentile(effect_size_sig_df, 97.5, axis=0)\n\n for i, column in enumerate(effect_size_sig_df.columns):\n if (lower_bound[i] < 0) & (higher_bound[i] > 0):\n effect_size_sig_df = effect_size_sig_df.drop(columns=column)\n\n n_regions = len(effect_size_sig_df.columns)\n\n plt.figure()\n plt.hlines(range(n_regions),\n np.percentile(effect_size_sig_df, 2.5, axis=0),\n np.percentile(effect_size_sig_df, 97.5, axis=0))\n\n plt.plot(effect_size_sig_df.mean().values, range(n_regions), 's', color='k')\n plt.axvline(0, ls='--')\n plt.yticks(np.arange(n_regions), effect_size_sig_df.columns)\n plt.xlabel('Effect size')\n plt.ylabel('Brain regions')\n plt.tight_layout()\n plt.savefig(comparison_dir / 'Significant_regions.eps', format='eps')\n plt.close()\n plt.clf()", "def split_data_metrics_learning(cfg):\n actual_pose = cfg['actual_pose']\n target = cfg['target']\n person_ids = cfg['person_ids']\n \n # Split train and val data based on the person ids.\n all_ids = np.arange(1, 21)\n val_ids = cfg['val_ids']\n train_ids = set(all_ids).symmetric_difference(val_ids)\n \n anchor_gallery_split_size = cfg['anchor_gallery_split_size']\n window_width = cfg['window_width']\n overlap = cfg['overlap']\n random_state = cfg['random_state']\n \n # Get only the training set data and the label.\n X_train, y_train = get_req_ids(actual_pose, target, train_ids, person_ids)\n \n # Select the evaluation data that measures the performance of the model on the training set.\n train_accuracy_ids = random.sample(train_ids, len(val_ids))\n X_train_acc, y_train_acc = get_req_ids(actual_pose, target, train_accuracy_ids, person_ids)\n \n # Anchor/Gallery set split for the training set.\n X_train_gal, X_train_anchor, y_train_gal, y_train_anchor = train_test(X_train = X_train_acc, y_train = y_train_acc, \n test_size=anchor_gallery_split_size, \n random_state=random_state, stratify=y_train_acc)\n \n # Subsample the gait sequences of the anchor/gallery set of the training set based on the window width and the overlap.\n X_train_gal, y_train_gal = subsample(cfg, X_train_gal, y_train_gal, window_width=window_width, overlap=overlap)\n X_train_anchor, y_train_anchor = subsample(cfg, X_train_anchor, y_train_anchor, window_width=window_width, overlap=overlap)\n \n # Get only the validation set data and the label.\n X_val, y_val = get_req_ids(actual_pose, target, val_ids, person_ids)\n \n # Anchor/Gallery set split for the validation set.\n X_val_gal, X_val_anchor, y_val_gal, y_val_anchor = train_test(X_train = X_val, \n y_train = y_val, \n test_size=anchor_gallery_split_size, \n random_state=random_state, \n stratify=y_val)\n \n \n # If data augmentation parameter is set to True in the configuration dictionary, data augmentation is done for the training set.\n if cfg['augment_data']:\n X_train, y_train = augment_data(X_train, y_train)\n \n # Subsample the gait sequences of the whole training set based on the window width and the overlap.\n X_train, y_train = subsample(cfg, X_train, y_train, window_width=window_width, overlap=overlap)\n \n # Subsample the gait sequences of the anchor/gallery set of the validation set based on the window width and the overlap.\n X_val_gal, y_val_gal = subsample(cfg, X_val_gal, y_val_gal, window_width=window_width, overlap=overlap)\n X_val_anchor, y_val_anchor = subsample(cfg, X_val_anchor, y_val_anchor, window_width=window_width, overlap=overlap)\n \n # Concatenate the gallery and anchor set of the validation data and label as a whole. This is just to maintain the train-val uniformity and \n # is not used anywhere in the project.\n X_val, y_val = np.concatenate((X_val_gal, X_val_anchor)), np.concatenate((y_val_gal, y_val_anchor))\n \n return X_train, X_val, X_train_gal, X_train_anchor, X_val_gal, X_val_anchor, y_train, y_val, y_train_gal, y_train_anchor, y_val_gal, y_val_anchor", "def train(self, dataset) -> None:\n raise NotImplementedError()", "def test_multiclass_balance(self):\n dataset = make_fixture(binary=False, split=False)\n\n oz = ClassBalance()\n assert oz.fit(dataset.y) is oz\n assert oz._mode == BALANCE\n\n # oz.finalize()\n self.assert_images_similar(oz)", "def __init__(self, learning_rate=0.1):\n self.learning_rate = learning_rate\n self._b = 0.0 # y-intercept\n self._w = None # weights assigned to input features\n # count of errors during each iteration\n self.misclassified_samples = []", "def setUp(self):\r\n # Single sample, 6 observations, one of which isn't observed in sample.\r\n self.biom_table1 = parse_biom_table(biom_table_str1)\r\n self.estimator1 = ObservationRichnessEstimator(self.biom_table1,\r\n Chao1MultinomialPointEstimator)" ]
[ "0.62204057", "0.61685914", "0.5826256", "0.5715278", "0.5707387", "0.5623797", "0.561231", "0.55672514", "0.5549182", "0.54543215", "0.5443967", "0.5442228", "0.5435967", "0.54301053", "0.5364784", "0.53321433", "0.53314674", "0.5326067", "0.5314146", "0.5309174", "0.53041863", "0.52870476", "0.5278337", "0.52631384", "0.5234448", "0.5223024", "0.5210587", "0.5210587", "0.5202301", "0.51957947", "0.5192575", "0.51890016", "0.5177264", "0.5157087", "0.51391363", "0.5138633", "0.5137445", "0.51052076", "0.5100113", "0.50981116", "0.509234", "0.50845957", "0.5081525", "0.50767666", "0.50690764", "0.5056726", "0.5052208", "0.50517493", "0.5045796", "0.5045412", "0.50323075", "0.5025166", "0.5018344", "0.50161934", "0.501079", "0.5007035", "0.5003505", "0.49916053", "0.49796742", "0.4972548", "0.4967955", "0.49652842", "0.49646786", "0.49584466", "0.49543694", "0.49500865", "0.49380344", "0.49334908", "0.49297762", "0.49297762", "0.49280125", "0.490627", "0.4903564", "0.49021912", "0.48970738", "0.4886009", "0.48703218", "0.48675308", "0.48660427", "0.48621482", "0.48496193", "0.4848768", "0.48467147", "0.48439085", "0.48432603", "0.48383403", "0.48332515", "0.48310694", "0.48270845", "0.4820164", "0.48188683", "0.4815954", "0.48155198", "0.48131293", "0.4811451", "0.48101753", "0.48096058", "0.48045263", "0.48022583", "0.47983557" ]
0.6245745
0
Load user with specified user ID.
def load_user(user_id): return User.query.get(int(user_id))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def load_user(user_id):\n return User.get_by_id(int(user_id))", "def load_user(user_id):\n return User.get_by_id(int(user_id))", "def load_user(user_id):\n return User.get_by_id(int(user_id))", "def load_user(user_id):\r\n return User.query.get(int(user_id))", "def load_user(id):\n return User.get_by_id(int(id))", "def load_user(user_id):\n\n return User.query.get(int(user_id))", "def load_user(id):\n\treturn User.query.get(int(id))", "def load_user(self, user_id):\n return self.get_user_by_id(int(user_id))", "def load_user(user_id):\n return User.query.get(user_id)", "def load_user(user_id):\n return User.query.get(user_id)", "def load_user(id):\n return User.query.get(int(id))", "def load_user(id):\n return User.query.get(int(id))", "def load_user(id):\n return User.query.get(int(id))", "def load_user(id):\n return User.query.get(int(id))", "def load_user(user_id):\n return Users.query.get(user_id)", "def load_user(id):\n\n return User.query.get(int(id))", "def load_user (userid):\n return User.from_id(userid)", "def load_user(user_id):\n return models.UserModel.query.get(int(user_id))", "def load_user():\n\n return User.query.get(int(id))", "def load_user(user_id):\n if user_id is not None:\n return Users.query.get(user_id)\n return None", "def load_user(id):\n return Users.query.get(id)", "def load_user(user_id):\n if user_id:\n return User.query.get(user_id)\n return None", "def load_user(id):\r\n\r\n\tuser = User.query.get(int(id))\r\n\tif user is not None:\r\n\t\tuser.id = session['user_id']\r\n\t\treturn user\r\n\telse:\r\n\t\treturn None", "def load_user(user_id):\r\n if user_id is not None:\r\n return User.query.get(user_id)\r\n return None", "def load_user(user_id):\n if user_id is not None:\n return User.query.get(user_id)\n return None", "def load_user(user_id):\n if user_id is not None:\n return User.query.get(user_id)\n return None", "def load_user(user_id):\n if user_id is not None:\n return User.query.get(user_id)\n return None", "def load_user(user_id):\n return session.query(User).get(user_id)", "def load_user(user_id):\n return app.user_models.query.get(int(user_id))", "def user_loader(user_id):\n if user_id != app.config['ADMIN_USERNAME']:\n return None\n user = User()\n user.id = user_id\n return user", "def load_user(userid):\n try:\n user = Users.get(userid)\n except:\n return None\n return user", "def load_user(id):\n user = db.session.query(User).filter(User.id == id).first()\n return user", "def user_loader(user_id):\r\n return User.query.get(user_id)", "def load_user(uid):\n return User.query.get(uid)", "def load_user(user_id):\n\n # user = User()\n user = UserMixin()\n user.id = user_id\n return user", "def load_user(_id):\n if utils.api_preflight():\n return User(_id=_id)", "def user_loader(user_id):\n return User.query.get(user_id)", "def user_loader(user_id):\n return User.query.get(user_id)", "def user_loader(user_id):\n return User.query.get(user_id)", "def user_loader(user_id):\n return User.query.get(user_id)", "def load_user(userid):\n client = get_db_client(app, g)\n d = client.is_driver(userid)\n return User(userid, d)", "def user_loader(user_id):\n return Users.query.get(user_id)", "def load_user(user_id: str) -> Optional[User]:\n if user_id is not None:\n return User.query.get(user_id)", "def load_user(id):\n return Admin.query.get(int(id))", "def user_loader(user_id):\n return db.session.query(User).filter(User.id.__eq__(user_id)).first()", "def load_user(character_id):\n return User.query.get(character_id)", "def load_user():\n if session.get(\"user_id\"):\n user = User.query.filter_by(user_id=session[\"user_id\"]).first()\n else: \n user = None\n g.user = user", "def load_user(user_id):\n if user_id > 0:\n user = BeUser()\n user.set(\"id\", user_id)\n user.load()\n session = Session()\n session.set_user_id(user.get_id())\n if session.load():\n session_user = user.create_session_user()\n session_user.ip_address = request.remote_addr\n session_user.user_agent = request.user_agent\n session_user.token = session.get_token()\n session_user.timestamp = session.get_timestamp()\n hash = session.get_user_hash_string(session_user)\n if session.is_valid(session.encryption.get_generic_hash(hash)):\n return session_user\n else:\n session.delete()\n return None", "def read(self, user_id):\n if not user_id in self.user_ids:\n raise KeyError(\"Invalid user ID. Check the read-only property \" \\\n \"user_ids for a list of valid IDs.\")\n user_load = self._store[\"id_\" + str(user_id)]\n self._loads[user_id] = user_load\n return user_load", "def load_logged_in_user():\n user_id = session.get(\"user_id\")\n\n if user_id is None:\n g.user = None\n else:\n g.user = (\n get_db().execute(\"SELECT * FROM user WHERE id = ?\", (user_id,)).fetchone()\n )", "def load_logged_in_user():\n user_id = session.get(\"user_id\")\n\n if user_id is None:\n g.user = None\n else:\n g.user = (\n get_db().execute(\"SELECT * FROM user WHERE id = ?\", (user_id,)).fetchone()\n )", "def load_user(userid):\n user = User.query.get(userid)\n\n #On update la derniere connection du user\n if user is not None:\n \tuser.update_last_connection()\n return user", "def load_logged_in_user():\n\n user_id = session.get('user_id')\n\n if user_id is not None:\n g.user = get_db().execute(\n 'SELECT * FROM user WHERE id = ?', [user_id]\n ).fetchone()\n else:\n g.user = None", "def load_user(student_id):\n return Student.query.get(student_id)", "def user_loader(payload):\n user = payload[\"user\"]\n return db.query(User).filter(User.id == user[\"id\"]).first()", "def load_logged_in_user():\n user_id = session.get(\"user_id\")\n if user_id is None:\n g.user = None\n else:\n g.user = {\n \"user_id\": user_id,\n \"user_name\": session.get(\"user_name\")\n }", "def user_loader(self, email):\n if not self.um.check_file_exists(\"users.json\"):\n return\n\n if email not in self.users:\n return\n\n user = User()\n user.id = email\n return user", "def load_logged_in_user():\n user_id = session.get(\"user_id\")\n\n if user_id is None:\n g.user = None\n g.user_json = None\n else:\n g.user = User.query.filter_by(id=user_id).first()\n g.user_json = user_safe_schema.dump(g.user)", "def load_user(self, _user):\n self.conn_cursor.execute(\"SELECT bank FROM users WHERE id=?\", (_user.id,))\n data = self.conn_cursor.fetchone()\n if data:\n _user.set_bank(int(data[0]))", "def _load_user():\n user = session.get('user')\n\n if user is None:\n g.user = None\n else:\n g.user = user", "def load_user_object(user_id):\n logger.debug(\"entering function load_user_object\")\n\n if user_id in local_user_cache:\n time_diff = time.time() - local_user_cache[user_id][\"prev_time\"]\n if time_diff < config.USER_CACHE_TIME:\n logger.info(\"got user object from local cache for user id %s\", user_id)\n return local_user_cache[user_id][\"obj\"]\n\n find_query = {\"user_id\": user_id}\n project_query = {\"_id\": 0, \"user_id\": 1, \"email\": 1}\n result = run_find_one_query(config.USERS_COL, find_query, project_query, error=False)\n if result is not None:\n logger.info(\"loaded the user obj for user id = %s from db\", user_id)\n user_obj = User(user_id, result[\"email\"])\n local_user_cache[user_id] = {\"prev_time\": time.time(), \"obj\": user_obj}\n result = user_obj\n\n logger.debug(\"exiting function load_user_object\")\n return result", "def user_loader(username):\n\n users = app.users\n if username not in users:\n return\n\n user = User()\n user.id = username\n user.registrations = app.load_config(username)\n return user", "def load_user(user_email):\n return User.query.get(user_email)", "def read_user_by_id(\n user_id: int = Path(description=\"User id\", example=1),\n) -> schemas.User:\n user = next((usr for usr in users if usr.id == user_id), None)\n if not user:\n raise HTTPException(\n status_code=status.HTTP_404_NOT_FOUND,\n detail=f\"User with id={user_id} doesn't exist\",\n )\n return user", "def retrieve_user_from_id(self, user_id):\n if self.database is None:\n raise Exception(\"No database.\")\n if user_id is None:\n raise Exception(\"Bad parameter.\")\n return self.database.retrieve_user_from_id(user_id)", "def getUser(self, id):\n if not isinstance(id, int):\n # Must be a string. Get the UserId first.\n id = self.getUserId(id)\n u = self.users[id]\n while isinstance(u, int):\n id = u\n u = self.users[id]\n u.id = id\n return u", "def read_user_by_id(\n user_id: PyObjectId,\n current_user: Users = Depends(deps.get_current_active_user),\n) -> Optional[User]:\n user = dao.user.get_user_by_id(id=user_id)\n if user == current_user:\n return user\n if not user:\n raise HTTPException(\n status_code=400, detail=\"The user doesn't exist.\"\n )\n # TODO: toggle this if we only want admins to be able to see profiles other than their own.\n # if not dao.user.is_superuser(current_user):\n # raise HTTPException(\n # status_code=400, detail=\"The user doesn't have enough privileges\"\n # )\n return user", "def get_user(cls, user_id):\n try:\n return User.objects.get(pk=user_id)\n except User.DoesNotExist:\n return None", "def get_user_by_id(self, id):\n\t\treturn self.users.get(id)", "async def fetch_user(self, id: str):\n user = await self.http.get_user(id)\n return User(state=self.http, data=user)", "def loadUser(self):\n if os.path.isfile(self.user_file):\n logging.info(\"Found User File\")\n with open(self.user_file, \"r\") as json_file:\n self.user = json.load(json_file)\n self.checkFiles()\n else:\n self.user = {\n \"Save\": \"\",\n \"Of\": \"\",\n \"Depth\": \"\",\n \"Video\": \"\",\n \"Colour\": \"#1a1a1b\",\n \"GT\": \"\",\n }\n self.saveUser()", "def get_user(self, user_id):\n User = get_user_model()\n try:\n return User.objects.get(pk=user_id)\n except User.DoesNotExist:\n return None", "def get(self, user_id):\n return User.get(user_id)", "def get_user(user_id):\n user = storage.get(\"User\", user_id)\n if user is None:\n abort(404)\n return user.to_dict()", "def load(self):\n\n self.refresh_token()\n\n endpoint = app.config['API']['url'] + 'user/get/' + self._id\n response = requests.get(\n endpoint,\n verify = app.config['API']['verify_ssl'],\n headers = {\n 'Authorization': self.token,\n 'API-Key': app.config['API_KEY']\n },\n )\n\n if response.status_code != 200:\n self.logger.error('[%s] %s' % (\n response.status_code, response.reason)\n )\n err = \"Could not retrieve user %s from KDM API @ %s\"\n raise utils.Logout(err % (self._id, app.config['API']['url']))\n\n user_attribs = utils.convert_json_dict(response.json()['user'])\n for attrib in user_attribs.keys():\n if attrib not in ['_id', 'password']:\n setattr(self, attrib, user_attribs[attrib])", "async def read_user(user_id: str = Path(..., description=\"ID value of the desired user\"), db_handler: DBHandler = Depends(database_dependency)):\n user_record = await db_handler.select_user(user_id=user_id)\n user_record = init_BaseUser(user_record)\n\n return user_record", "def get_user(self, user_id):\n try:\n User = get_user_model()\n return User.objects.get(pk=user_id)\n except User.DoesNotExist:\n return None", "def get_user(id):\n pass", "def user_by_id(self, user_id):\n\n cur = self.db.cursor()\n cur.execute(\n \"\"\"SELECT user_id, username, password, phone, email, role\n FROM users WHERE user_id = %s\"\"\", (user_id, ))\n \n user_from_db = cur.fetchone()\n if cur.rowcount == 1: \n user_id, username, password, phone, email, role = user_from_db\n resp = dict(user_id=user_id, username=username, password=password, phone=phone, email=email, role=role)\n \n return resp\n return None", "def __setitem__(self, user_id, user_load):\n if not user_id in self.user_ids:\n raise KeyError(\"Invalid user ID. Check the read-only property \" \\\n \"user_ids for a list of valid IDs.\")\n self._loads[user_id] = user_load", "def load_user(user_id, current_user=None):\r\n logging.warn(\"Loading user {}\".format(user_id))\r\n user = User.query.get(user_id)\r\n\r\n user_followers, user_following = [], []\r\n\r\n # FIXME: This try/except block is ugly as hell. Refactor please!\r\n try:\r\n followers = [f.follower_id for f in UserFollower.filter(user_id=user_id).limit(20)]\r\n following = [f.following_id for f in UserFollowing.filter(user_id=user_id).limit(20)]\r\n\r\n cassa_users = user_list_from_ids(set(followers + following), dict=True)\r\n\r\n def expand(o):\r\n extras = o.extended\r\n dict_val = o._as_dict()\r\n dict_val.update(**extras)\r\n return dict_val\r\n\r\n user_followers = [filter(lambda x: x.get('id') == u, cassa_users)[0] for u in followers]\r\n user_following = [filter(lambda x: x.get('id') == u, cassa_users)[0] for u in following]\r\n\r\n except Exception, ex:\r\n logging.warn(ex)\r\n sentry_client.captureException()\r\n\r\n return user, user_followers, user_following", "async def get_user_by_id(self, roblox_id: int) -> User:\n r = await self.request.request(url=f'https://api.roblox.com/users/{roblox_id}', method=\"GET\", noerror=True)\n json = r.json()\n if r.status_code != 200 or not json.get('Id') or not json.get('Username'):\n return None\n return User(self.request, json['Id'], json['Username'])", "def get_user_from_id(user_id):\n return Users.query.filter_by(id=user_id).first()", "def user_get_by_id(user_id):\n obj = storage.get(\"User\", user_id)\n if obj is None:\n abort(404)\n else:\n return jsonify(obj.to_dict())", "def lookup(self, user_id):\n return self.users.get(str(user_id))", "def get(self, id):\n\t\ttry:\n\t\t\tflask_app.logger.debug('We are getting the user: %d', id)\n\t\t\treturn user_service.get(id)\n\t\texcept AssertionError as e:\n\t\t\tuser_space.abort(400, e.args[0], status = \"Could not get user\", statusCode = \"400\")\n\t\texcept Exception as e:\n\t\t\tuser_space.abort(500, e.args[0], status = \"Could not get user\", statusCode = \"500\")", "def get_user(self, user_id=None):\n raise NotImplementedError", "def get(cls,id):\n result = execute_query(\"\"\"SELECT * FROM Users Where username = ?\n \"\"\",\n [id])\n try:\n user = User(id,result[0][1])\n except Exception as e:\n return None\n \n return user", "def load_test_user() -> User:\n new_user = User.generate_new_user(Driver.user_test_data)\n return new_user", "def get_user_by_id(cls, userid):\n\n user = User.query.filter_by(user_id=userid).one()\n\n return user", "def lookupUser_byID(self, user_id):\n sql = \"SELECT * FROM Users WHERE id='%s'\"\\\n % (user_id)\n res = self.execute(sql)\n reslist = res.fetchall()\n if reslist == []:\n return None\n else:\n return reslist[0]", "def get_user(user_id):\n try:\n return UserModel.objects.get(id=user_id)\n except UserModel.DoesNotExist:\n return None", "def get_user(self, user_id=None, nick=None):\n if user_id in self:\n return self[user_id]\n else:\n return User(self, user_id, nick=nick)", "def get_user(self, user_id):\n return self.my_get_user(self.get_all_dbusers(), user_id)", "def get_user_by_id(user_id):\n return User.query.get(user_id)" ]
[ "0.8794154", "0.8794154", "0.8794154", "0.86955374", "0.8691932", "0.8611956", "0.857735", "0.8548633", "0.8518415", "0.8518415", "0.8510647", "0.8510647", "0.8510647", "0.8510647", "0.8504149", "0.84574586", "0.8455236", "0.84108514", "0.83591974", "0.8340407", "0.83372307", "0.8331845", "0.82702106", "0.82557803", "0.8234937", "0.8234937", "0.8234937", "0.8230656", "0.82221943", "0.8166227", "0.80994153", "0.8020958", "0.7983261", "0.7983078", "0.79823786", "0.79799277", "0.7936105", "0.7936105", "0.7936105", "0.7936105", "0.7901871", "0.77996105", "0.7788959", "0.7711273", "0.76757187", "0.76513505", "0.765079", "0.7564491", "0.74471885", "0.7431388", "0.7431388", "0.7415195", "0.7374087", "0.73203576", "0.71087426", "0.70991844", "0.70444256", "0.69956106", "0.6967408", "0.69225323", "0.6897659", "0.6871405", "0.68637425", "0.68166363", "0.68076277", "0.6776304", "0.6741053", "0.6740598", "0.6740228", "0.6739956", "0.6739679", "0.67352057", "0.6726002", "0.6714216", "0.6703225", "0.6696668", "0.6690278", "0.6671699", "0.6658323", "0.6651263", "0.66455907", "0.66340846", "0.66330475", "0.662902", "0.6607955", "0.6604293", "0.659927", "0.6593556", "0.65605223", "0.65521187", "0.6551982", "0.65446305", "0.6543667", "0.6533179", "0.6528455" ]
0.8645434
10
Get the selected locale from user settings.
def get_locale(): setting = Setting.query.filter(Setting.name == 'default_language').first() if setting is not None: return setting.value # Return default language when none found return 'en'
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_locale():\n localLang = request.args.get('locale')\n supportLang = app.config['LANGUAGES']\n if localLang in supportLang:\n return localLang\n userId = request.args.get('login_as')\n if userId:\n localLang = users[int(userId)]['locale']\n if localLang in supportLang:\n return localLang\n localLang = request.headers.get('locale')\n if localLang in supportLang:\n return localLang\n return request.accept_languages.best_match(app.config['LANGUAGES'])", "def get_current_locale(self, req):\n if req.view_args and 'locale' in req.view_args:\n for locale in self.locales:\n if locale.code == req.view_args['locale']:\n return locale\n\n # Return the default locale\n return self.default_locale", "def get_locale_for_user(self):\n return 'en_US' # TODO(psimakov): choose proper locale from profile", "def get_locale(self):\n return self.locale", "async def get_user_locale(self, action: str, args: Tuple[Any]) -> str:\n\n tg_user = types.User.get_current()\n user = await get_user(tg_user.id)\n super_locale = await super().get_user_locale(action, args)\n\n if user.locale is not None: # if user set his locale\n return user.locale\n else:\n if super_locale in LANGUAGES:\n return super_locale\n if tg_user.locale in LANGUAGES:\n return tg_user.locale\n else: # else, return default\n return DEFAULT_USER_LOCALE", "def get_locale():\n return babel.Locale.parse(_get_locale())", "def get_current_locale(self) -> str:\n return self.locale", "def get_locale():\n if (session.get(\"language\") is not None):\n return session.get('language')['charcode']\n return request.accept_languages.best_match(app.config['LANGUAGES'].keys())", "def locale(self):\n return self.__locale", "def get_locale():\n return \"he\"", "def _get_locale() -> str:\n languages = flask.current_app.config['LANGUAGES'].keys()\n locale = flask.request.accept_languages.best_match(languages)\n\n # If no locale could be determined, fall back to the default.\n if locale is None:\n locale = flask.current_app.config['BABEL_DEFAULT_LOCALE']\n\n return locale", "def default_locale(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"default_locale\")", "def default_locale(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"default_locale\")", "def get_locale(self):\n raise Unimplemented()", "def init_language(self):\n\n if 'HTTP_COOKIE' in os.environ:\n cookies = os.environ['HTTP_COOKIE'].split(';')\n for cookie in cookies:\n (key, value) = cookie.split('=')\n if key == Intuition.COOKIE_USERLANG:\n return value\n \n return self.default_language", "def get_default_language():\n return getattr(thread_locals, 'DEFAULT_LANGUAGE',\n settings.DEFAULT_LANGUAGE)", "def get_language(self):\n return self.language if self.language is not None else get_language()", "def get_language(self):\r\n return self.language", "def get_language(self):\n return self.lang", "def get_locale(lang):\n lang = babel_format_locale_map.get(lang) or lang\n try:\n return Locale.parse(lang, sep='-')\n except (UnknownLocaleError, ValueError):\n return Locale(*settings.LANGUAGE_CODE.split('-'))", "def get_language():\n try:\n from leaves.middleware import request_context\n return request_context.language\n except:\n return get_site().preferences.default_language", "def get_lang(self):\n return self.langs.lang", "def get_language(self) -> str:\n return settings.LANGUAGE_CODE", "def requestLanguage(request):\n # Return the user language preferences for registered users\n if request.user.valid and request.user.language:\n return request.user.language\n\n # Or try to return one of the user browser accepted languages, if it\n # is available on this wiki...\n available = wikiLanguages()\n if not request.cfg.language_ignore_browser:\n for lang in browserLanguages(request):\n if lang in available:\n return lang\n \n # Or return the wiki default language...\n if request.cfg.language_default in available:\n lang = request.cfg.language_default\n # If everything else fails, read the manual... or return 'en'\n else:\n lang = 'en'\n return lang", "def getVKBLanguage(self):\r\n\r\n return self.phone.sx('(send (send (get-input-locale-manager) get-current-locale) get-iso)', convertToString=False)", "def get_locale(self):\n\n return to_locale(settings.LANGUAGE_CODE).replace(\"_\", \"-\")", "def getLanguage(self):\n return self.getOrDefault(self.language)", "def GetMUILanguage(self):\n mui_resource = self.GetMUIResource()\n if not mui_resource:\n return None\n\n return mui_resource.language", "def get_user_lang(user: str = None) -> str:\n\tuser = user or frappe.session.user\n\tlang = frappe.cache.hget(\"lang\", user)\n\n\tif not lang:\n\t\t# User.language => Session Defaults => frappe.local.lang => 'en'\n\t\tlang = (\n\t\t\tfrappe.db.get_value(\"User\", user, \"language\")\n\t\t\tor frappe.db.get_default(\"lang\")\n\t\t\tor frappe.local.lang\n\t\t\tor \"en\"\n\t\t)\n\n\t\tfrappe.cache.hset(\"lang\", user, lang)\n\n\treturn lang", "def get_user_lang(user=None):\n\tif not user:\n\t\tuser = frappe.session.user\n\n\t# via cache\n\tlang = frappe.cache().hget(\"lang\", user)\n\n\tif not lang:\n\n\t\t# if defined in user profile\n\t\tlang = frappe.db.get_value(\"User\", user, \"language\")\n\t\tif not lang:\n\t\t\tlang = frappe.db.get_default(\"lang\")\n\n\t\tif not lang:\n\t\t\tlang = frappe.local.lang or 'en'\n\n\t\tfrappe.cache().hset(\"lang\", user, lang)\n\n\treturn lang", "def get_localization(self):\n return self._request_data(\"/lokarria/localization\")", "def get_locale_from_accept_header(request):\n header = request.headers.get(\"Accept-Language\", '')\n parsed = parse_accept_language_header(header)\n if parsed is None:\n return None\n locale_list_sorted_by_q = sorted(parsed.iterkeys(), reverse=True)\n locale = Locale.negotiate(locale_list_sorted_by_q, config.locales, sep='_')\n return str(locale)", "def get_user_language() -> str:\n languages = {\n \"arabic\": \"arb\",\n \"chinese\": \"cmn-CN\",\n \"danish\": \"da-DK\",\n \"english\": \"en-GB\",\n \"french\": \"fr-FR\",\n \"german\": \"de-DE\",\n \"portuguese\": \"pl-PT\",\n \"spanish\": \"es-ES\"\n }\n textlang = input(\"What language do you want to hear?\")\n try:\n return languages[textlang.lower()]\n except KeyError as e:\n print(\"Enter a valid language.\")\n sys.exit(1)", "def language(self):\r\n return self._get('language', {})", "def get_language(lang_list: list = None) -> str:\n\tis_logged_in = frappe.session.user != \"Guest\"\n\n\t# fetch language from form_dict\n\tif frappe.form_dict._lang:\n\t\tlanguage = get_lang_code(frappe.form_dict._lang or get_parent_language(frappe.form_dict._lang))\n\t\tif language:\n\t\t\treturn language\n\n\t# use language set in User or System Settings if user is logged in\n\tif is_logged_in:\n\t\treturn frappe.local.lang\n\n\tlang_set = set(lang_list or get_all_languages() or [])\n\n\t# fetch language from cookie\n\tpreferred_language_cookie = get_preferred_language_cookie()\n\n\tif preferred_language_cookie:\n\t\tif preferred_language_cookie in lang_set:\n\t\t\treturn preferred_language_cookie\n\n\t\tparent_language = get_parent_language(language)\n\t\tif parent_language in lang_set:\n\t\t\treturn parent_language\n\n\t# fetch language from request headers\n\taccept_language = list(frappe.request.accept_languages.values())\n\n\tfor language in accept_language:\n\t\tif language in lang_set:\n\t\t\treturn language\n\n\t\tparent_language = get_parent_language(language)\n\t\tif parent_language in lang_set:\n\t\t\treturn parent_language\n\n\t# fallback to language set in System Settings or \"en\"\n\treturn frappe.db.get_default(\"lang\") or \"en\"", "def Language(self, default=None):\n return self.data.get('language', default)", "def get_language(self) -> str:\n return self.language", "def get_lang(self):\n props = getToolByName(self.context,\n 'portal_properties')\n return props.site_properties.getProperty('default_language') or 'en'", "def language(self):\n if self.consent:\n self.consent.language\n translation.activate(self.consent.language)\n self._language = translation.get_language()\n else:\n self._language = settings.LANGUAGE_CODE\n return self._language", "def get_lang(self):\n\n path = self.get_lang_path()\n for language in self.languages:\n if language in path:\n return language", "def locale(self) -> \"Locale\":\n raise NotImplementedError", "def logon_language(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"logon_language\")", "def get_notification_language(user):\r\n if getattr(settings, 'NOTIFICATION_LANGUAGE_MODULE', False):\r\n try:\r\n app_label, model_name = settings.NOTIFICATION_LANGUAGE_MODULE.split('.')\r\n model = models.get_model(app_label, model_name)\r\n language_model = model._default_manager.get(user__id__exact=user.id)\r\n if hasattr(language_model, 'language'):\r\n return language_model.language\r\n except (ImportError, ImproperlyConfigured, model.DoesNotExist):\r\n raise LanguageStoreNotAvailable\r\n raise LanguageStoreNotAvailable", "def select_language(preset_value: str = None) -> str:\n\tkb_lang = list_keyboard_languages()\n\t# sort alphabetically and then by length\n\tsorted_kb_lang = sorted(sorted(list(kb_lang)), key=len)\n\n\tselected_lang = Menu(\n\t\t_('Select keyboard layout'),\n\t\tsorted_kb_lang,\n\t\tpreset_values=preset_value,\n\t\tsort=False\n\t).run()\n\n\tif selected_lang.value is None:\n\t\treturn preset_value\n\n\treturn selected_lang.value", "def _get_theme_selected():\n\n try:\n theme_selected = Configuration.objects.filter(group='theme', key='selected')[0]\n theme_name = theme_selected.value\n except:\n theme_name = 'default'\n\n return theme_name", "def get_default_language():\n utility = queryUtility(ILanguageAvailability)\n if utility is not None:\n return utility.getDefaultLanguage()\n return DEFAULT_LANGUAGE", "def lang(self):\n return self._lang", "def get_language(chat_id):\n db_connection = DatabaseConnection()\n language = db_connection.get_setting(chat_id=chat_id, setting=LANGUAGE_SETTING)\n db_connection.close()\n\n return language", "def _getLang(self, language):\n if language == None:\n language = self.getDefaultLanguage()\n\n return language", "def language(self):\n if \"language\" in self._prop_dict:\n return self._prop_dict[\"language\"]\n else:\n return None", "def get_language_name(self, locale: Locale | str | None = None) -> str | None:\n if locale is None:\n locale = self\n locale = Locale.parse(locale)\n return locale.languages.get(self.language)", "def get_language_key(host, domain, user):\n\n # Get lang from authenticated user\n if not user.is_anonymous():\n value = user.language\n\n # Get lang based on request host and global language settings\n else:\n current_subdomain = host[:-len(domain) - 1]\n default_language = settings.LANGUAGE_CODE\n valid_languages = [l[0] for l in settings.LANGUAGES]\n valid_subdomains = list(settings.SUBDOMAIN_URLCONFS)\n default_language_domains = []\n\n for d in valid_subdomains:\n if (d is default_language) or (d not in valid_languages):\n default_language_domains.append(d)\n\n if current_subdomain in default_language_domains:\n value = default_language\n else:\n value = current_subdomain\n\n return value", "def get_proper_language():\n lang = config['summernote'].get('lang')\n\n if not lang:\n return config['lang_matches'].get(get_language(), 'en-US')\n\n return lang", "def default_language(self):\n return self._default_language", "def _get_lang(self, *args, **kwargs):\n if \"lang\" in kwargs:\n if kwargs[\"lang\"] in self._available_languages:\n self.lang = kwargs[\"lang\"]", "def bot_locales(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['BotLocaleArgs']]]]:\n return pulumi.get(self, \"bot_locales\")", "def __translationLanguage(self):\n return self.transLanguageComboBox.itemData(\n self.transLanguageComboBox.currentIndex())", "def set_locale(cls, force=None):\n # disable i18n if config.locales array is empty or None\n if not config.locales:\n return None\n # 1. force locale if provided\n locale = force\n if locale not in config.locales:\n # 2. retrieve locale from url query string\n locale = cls.request.get(\"hl\", None)\n if locale not in config.locales:\n # 3. retrieve locale from cookie\n locale = cls.request.cookies.get('hl', None)\n if locale not in config.locales:\n # 4. retrieve locale from accept language header\n locale = get_locale_from_accept_header(cls.request)\n if locale not in config.locales:\n # 5. detect locale from IP address location\n territory = get_territory_from_ip(cls) or 'ZZ'\n locale = str(Locale.negotiate(territory, config.locales))\n if locale not in config.locales:\n # 6. use default locale\n locale = i18n.get_store().default_locale\n i18n.get_i18n().set_locale(locale)\n # save locale in cookie with 26 weeks expiration (in seconds)\n cls.response.set_cookie('hl', locale, max_age = 15724800)\n return locale", "def get_localization(self, language: str) -> Localization:\n ...", "def gpwDefaultLanguage(self):\n parent = self.getFolderWhenPortalFactory()\n if hasattr(parent, 'getRawLanguage') and parent.getRawLanguage():\n return parent.getRawLanguage()\n tool = getToolByName(self, 'portal_languages', None)\n if tool is not None:\n return tool.getDefaultLanguage()\n return config.LANGUAGE_DEFAULT", "def getDefaultLocaleLanguage():\n # Setup textdomain\n try:\n locale.bindtextdomain(TEXT_DOMAIN, DEFAULT_LOCALE_PATH)\n except AttributeError:\n log_func.warning(u'Locale module not support text domain')\n\n language = locale.getlocale()[0]\n\n if sys_func.isWindowsPlatform():\n if language in WINDOWS2UNIX_LANGUAGE:\n language = WINDOWS2UNIX_LANGUAGE.get(language, DEFAULT_LOCALE)\n else:\n try:\n item1, item2 = language.split('_')\n language = '_'.join((item1[:2].lower(), item2[:2].upper()))\n except:\n log_func.fatal(u'Error get language')\n language = DEFAULT_LOCALE\n return language", "def language(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"language\")", "def default_locale(category: str | None = None, aliases: Mapping[str, str] = LOCALE_ALIASES) -> str | None:\n varnames = (category, 'LANGUAGE', 'LC_ALL', 'LC_CTYPE', 'LANG')\n for name in filter(None, varnames):\n locale = os.getenv(name)\n if locale:\n if name == 'LANGUAGE' and ':' in locale:\n # the LANGUAGE variable may contain a colon-separated list of\n # language codes; we just pick the language on the list\n locale = locale.split(':')[0]\n if locale.split('.')[0] in ('C', 'POSIX'):\n locale = 'en_US_POSIX'\n elif aliases and locale in aliases:\n locale = aliases[locale]\n try:\n return get_locale_identifier(parse_locale(locale))\n except ValueError:\n pass\n return None", "def locale(value):\r\n return '{}, {}'.format(QLocale.languageToString(value.language()), \r\n QLocale.countryToString(value.country()))", "def default_language(self) -> str:\n return self.raw_config.get(\"default_language\", \"en\")", "def language(self):\n portal_state = self.context.unrestrictedTraverse(\"@@plone_portal_state\")\n return aq_inner(self.context).Language() or portal_state.default_language()", "def GetLanguageInfo(*args, **kwargs):\n return _gdi_.Locale_GetLanguageInfo(*args, **kwargs)", "def language(self):\n return self._language", "def language(self):\n return self._language", "def language(self):\n return self._language", "def language(self):\n return self._language", "def activate(locale, path=None):\r\n if path is None:\r\n path = _DEFAULT_LOCALE_PATH\r\n if locale not in _TRANSLATIONS:\r\n translation = gettext_module.translation('humanize', path, [locale])\r\n _TRANSLATIONS[locale] = translation\r\n _CURRENT.locale = locale\r\n return _TRANSLATIONS[locale]", "def getLang(lang, localedir=os.path.expanduser(\"~\") + \"/share/locale\"):\n return gettext.translation(\"bridgedb\", localedir=localedir, \n languages=[lang], fallback=\"en\")", "def Locale_GetLanguageInfo(*args, **kwargs):\n return _gdi_.Locale_GetLanguageInfo(*args, **kwargs)", "def Locale_GetInfo(*args, **kwargs):\n return _gdi_.Locale_GetInfo(*args, **kwargs)", "def get_display_name(self, locale: Locale | str | None = None) -> str | None:\n if locale is None:\n locale = self\n locale = Locale.parse(locale)\n retval = locale.languages.get(self.language)\n if retval and (self.territory or self.script or self.variant):\n details = []\n if self.script:\n details.append(locale.scripts.get(self.script))\n if self.territory:\n details.append(locale.territories.get(self.territory))\n if self.variant:\n details.append(locale.variants.get(self.variant))\n if self.modifier:\n details.append(self.modifier)\n detail_string = ', '.join(atom for atom in details if atom)\n if detail_string:\n retval += f\" ({detail_string})\"\n return retval", "def getLocales(self):\n pass", "def get_default_culture(self):\n if \"default_culture\" in self.config:\n return self.config.default_culture\n return configuration.default_culture", "def getTRSLanguage():\n try:\n return sys.argv[1]\n except IndexError as error:\n print(\"No language argument\\n\")\n sys.exit()", "def preferred_language(self):\n if \"preferredLanguage\" in self._prop_dict:\n return self._prop_dict[\"preferredLanguage\"]\n else:\n return None", "def preferred_language(self):\n if \"preferredLanguage\" in self._prop_dict:\n return self._prop_dict[\"preferredLanguage\"]\n else:\n return None", "def preferred_language(self):\n if \"preferredLanguage\" in self._prop_dict:\n return self._prop_dict[\"preferredLanguage\"]\n else:\n return None", "def bot_locales(self) -> pulumi.Output[Optional[Sequence['outputs.BotLocale']]]:\n return pulumi.get(self, \"bot_locales\")", "def language_selector(context):\r\n output = \"\"\r\n from django.conf import settings\r\n i18 = getattr(settings, 'USE_I18N', False)\r\n if i18:\r\n template = \"admin/language_selector.html\"\r\n context['i18n_is_set'] = True\r\n try:\r\n output = render_to_string(template, context)\r\n except:\r\n pass\r\n return output", "def get_setting(which, default=None):\n settings = QSettings('USGS', 'guanoeditor')\n if default is None:\n return settings.value(which)\n else:\n return settings.value(which, default)", "def get_language(mgroups):\n\n if mgroups:\n lang = mgroups[0].strip('[').strip(']')\n return lang.lower().strip()\n return None", "def _try_to_get_an_english_value(self, localized_values):\n if not localized_values:\n return None\n\n for localized_value in localized_values:\n if localized_value.language in self.ENGLISH_LANGUAGE_CODES:\n return localized_value.value\n\n return first_or_default(localized_values).value", "def get_locale_name(code):\n language_map = dict(django.conf.global_settings.LANGUAGES)\n\n # check for exact match\n if code in language_map:\n return language_map[code]\n\n # try for the language, fall back to just using the code\n language = code.split(\"-\")[0]\n return language_map.get(language, code)", "def get_setting(self, name, default=None):\n w = self.choices['which']\n if w == 'global_default':\n return self.settings.get_global_default(name, default)\n elif w == 'project_default':\n return self.settings.get_project_default(name, default)\n elif w == 'global_variant':\n return self.settings.get_global_variant(self.choices['variant'],\n name, default)\n elif w == 'project_variant':\n return self.settings.get_project_variant(self.choices['variant'],\n name, default)\n elif w == 'project_package_default':\n return self.settings.get_project_package_default(\n self.choices['package'], name, default)\n elif w == 'project_package_variant':\n return self.settings.get_project_package_variant(\n self.choices['package'], self.choices['variant'], name, default)\n elif w == 'project_package_target':\n return self.settings.get_project_package_target(\n self.choices['package'], self.choices['target'], name, default)\n else:\n raise AssertionError(w)", "def get_gettext():\n local_path = os.path.realpath(os.path.dirname(sys.argv[0])) + \\\n '/translations'\n langs = []\n lc, encoding = locale.getdefaultlocale()\n if (lc):\n langs = [lc]\n osLanguage = os.environ.get('LANGUAGE', None)\n if (osLanguage):\n langs += osLanguage.split(\":\")\n langs += [\"en_US\"]\n lang = gettext.translation('wicd', local_path, languages=langs, \n fallback=True)\n _ = lang.gettext\n return _", "def get_lang_path(self):\n\n sql = \"SELECT current_lang_path FROM Users WHERE username = '\" + self.username + \"'\"\n self.cursor.execute(sql, )\n return self.cursor.fetchall()[0][0]", "def language(self):\n # type: () -> string_types\n return self._language", "def language(self):\n # type: () -> string_types\n return self._language", "def _get_local_preference(self):\n return self.__local_preference", "def settings():\n return _get_settings()[1]", "def get_language_name(self):\n return self.language_name", "def user_country(self):\n return utils.to_country(lib.sp_session_user_country(self._sp_session))", "def guess_language(lang_list=None):\n\tlang_codes = frappe.request.accept_languages.values()\n\tif not lang_codes:\n\t\treturn frappe.local.lang\n\n\tguess = None\n\tif not lang_list:\n\t\tlang_list = get_all_languages() or []\n\n\tfor l in lang_codes:\n\t\tcode = l.strip()\n\t\tif not isinstance(code, text_type):\n\t\t\tcode = text_type(code, 'utf-8')\n\t\tif code in lang_list or code == \"en\":\n\t\t\tguess = code\n\t\t\tbreak\n\n\t\t# check if parent language (pt) is setup, if variant (pt-BR)\n\t\tif \"-\" in code:\n\t\t\tcode = code.split(\"-\")[0]\n\t\t\tif code in lang_list:\n\t\t\t\tguess = code\n\t\t\t\tbreak\n\n\treturn guess or frappe.local.lang", "def Locale_GetLanguageName(*args, **kwargs):\n return _gdi_.Locale_GetLanguageName(*args, **kwargs)", "def setPortalLocale( self ):\n info = getLanguageInfo( self )\n\n # find default and effective locale settings\n def_locale = info.get( sys.platform + '_locale' ) or info.get( os.name + '_locale' )\n cur_locale = getlocale()\n cur_locale = None not in cur_locale and '.'.join( cur_locale ) or ''\n\n # check whether locale is already ok\n if def_locale is None or cur_locale.lower() == def_locale.lower():\n return\n\n # change effective locale\n try:\n setlocale( LC_ALL, def_locale )\n except Exceptions.LocaleError:\n pass" ]
[ "0.7412174", "0.7108176", "0.69538045", "0.69421023", "0.68359756", "0.67491263", "0.67384523", "0.6731309", "0.650032", "0.63808835", "0.6361166", "0.63441944", "0.63441944", "0.6340159", "0.62725174", "0.61259097", "0.6097418", "0.6096102", "0.60663605", "0.60374147", "0.6036436", "0.6031369", "0.60306174", "0.60046834", "0.5967092", "0.5957959", "0.5913882", "0.59019977", "0.587678", "0.58416796", "0.5803752", "0.5803198", "0.57916236", "0.57715046", "0.57507586", "0.5729792", "0.57146895", "0.5705824", "0.567902", "0.56787616", "0.5636775", "0.5628542", "0.56254506", "0.561564", "0.5610895", "0.557504", "0.5572497", "0.5569761", "0.553677", "0.5535012", "0.5528296", "0.54974294", "0.5495751", "0.54953045", "0.5471301", "0.54627067", "0.5459039", "0.544239", "0.5407262", "0.53888476", "0.53744924", "0.5373949", "0.5314767", "0.530426", "0.529806", "0.5272397", "0.526758", "0.52640086", "0.52640086", "0.52640086", "0.52640086", "0.52483827", "0.52480304", "0.5244365", "0.5240376", "0.52178365", "0.5213127", "0.52030116", "0.5186868", "0.5184599", "0.5184599", "0.5184599", "0.51744294", "0.5162153", "0.5161248", "0.51514167", "0.5140219", "0.5115806", "0.51117927", "0.51061547", "0.5098513", "0.5095186", "0.5095186", "0.50842893", "0.5081917", "0.5073803", "0.5066917", "0.5064049", "0.5061692", "0.5052451" ]
0.73380387
1
Jinja2 filter to slugify text.
def slug(value): return slugify(value)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def slugified(text):\n return re.sub(\"-+\", \"-\", re.sub(r\"\\W\", \"-\", text.lower()))", "def slugify(text: str) -> str:\n return text.strip().replace(', ', '-').replace(' ', '_').lower()", "def slugify(text):\n concatenated = re.sub('\\s+', '-', text.lower())\n return re.sub('[^A-Za-z0-9_-]', '', concatenated)", "def slugify(text):\n non_safe = [c for c in text if c in non_url_safe]\n if non_safe:\n for c in non_safe:\n text = text.replace(c, '')\n # Strip leading, trailing and multiple whitespace, convert remaining whitespace to _\n text = u'_'.join(text.split())\n return text", "def format_slug(text):\n allowed_chars = (\n \"abcdefghijklmnopqrstuvxyzABCDEFGHIJKLMNOPQRSTUVWXYZ\" # Alphabet\n \"01234567890\" # Numbers\n \"_-\" # Symbols\n )\n # Replace seperators with dash\n seperators = [\" \", \",\", \".\"]\n for sep in seperators:\n text = text.replace(sep, \"-\")\n # Strip unacceptable characters\n text = \"\".join([c for c in text if c in allowed_chars])\n # Enforce max length\n return truncate(text, max_len=50).lower()", "def format_slug(text: str) -> str:\n allowed_chars = (\n string.ascii_lowercase # Alphabet\n + string.digits # Numbers\n + \"_-\" # Symbols\n )\n # Convert to lowercase\n text = text.lower()\n # Replace separators with dash\n text = text.translate({ord(sep): ord('-') for sep in \" ,.\"})\n # Strip unacceptable characters\n text = ''.join(c for c in text if c in allowed_chars)\n # Enforce max length\n return truncate(text, max_len=50)", "def namify(text):\n return slugify(text).replace('-','_')", "def slugify(value):\n return '_' + value.replace(' ', '-').lower()", "def slugify(s):\n from future.builtins import str\n return django_slugify(unidecode(str(s)))", "def slugify(s: str) -> str:\n\n return re.sub(re_forbidden, ' ', s.strip()).strip().replace(' ', '-')", "def slugify(text):\n # Sorry: This is really goofy, but it was really easy to write and\n # \"good enough\".\n text = ''.join([c if c.isalnum() else '-' for c in text.lower()])\n\n while '--' in text:\n text = text.replace('--', '-')\n\n text = text.strip('-')\n\n return text", "def slugify(value, substitutions=()):\r\n # TODO Maybe steal again from current Django 1.5dev\r\n value = Markup(value).striptags()\r\n # value must be unicode per se\r\n import unicodedata\r\n from unidecode import unidecode\r\n # unidecode returns str in Py2 and 3, so in Py2 we have to make\r\n # it unicode again\r\n value = unidecode(value)\r\n if isinstance(value, six.binary_type):\r\n value = value.decode('ascii')\r\n # still unicode\r\n value = unicodedata.normalize('NFKD', value).lower()\r\n for src, dst in substitutions:\r\n value = value.replace(src.lower(), dst.lower())\r\n value = re.sub('[^\\w\\s-]', '', value).strip()\r\n value = re.sub('[-\\s]+', '-', value)\r\n # we want only ASCII chars\r\n value = value.encode('ascii', 'ignore')\r\n # but Pelican should generally use only unicode\r\n return value.decode('ascii')", "def create_slug(val):\n slug = slugify(unidecode(val))\n for s in settings.DJCAT_SLUG_RESERVED:\n slug = slug.replace(s, '')\n return slug", "def slugify2(value):\n try:\n value = unicodedata.normalize('NFC', value)\n value = downcode(value)\n value = unicodedata.normalize('NFD', value).encode('ascii', 'ignore')\n value = unicode(re.sub('[^\\w\\s-]', '', value).strip().lower())\n return safestring.mark_safe(re.sub('[-\\s]+', '-', value))\n except:\n if settings.TEMPLATE_DEBUG:\n raise\n else:\n return u''", "def slugify(value):\n value = re.sub('[^\\w\\s-]', '', value).strip()\n return re.sub('[-\\s]+', '-', value)", "def slugify(value):\n #import unicodedata\n #value = unicodedata.normalize('NFKD', value).encode('ascii', 'ignore')\n value = unicode(re.sub('[^\\w\\s-]', '', value).strip().lower())\n value = re.sub('[-\\s]+', '-', value)\n return value", "def slugify(value: str) -> str:\n value = re.sub(r\"[^\\w\\s-]\", \"\", value.lower())\n return re.sub(r\"[-\\s]+\", \"-\", value).strip(\"-_\")", "def slugify(value):\n import unicodedata\n import re\n from django.utils.safestring import mark_safe\n value = unicodedata.normalize('NFKD', value).encode('ascii', 'ignore')\n value = unicode(re.sub('[^\\w\\s-]', '', value).strip().lower())\n return mark_safe(re.sub('[-\\s]+', '-', value))", "def slugify(name):\n return name.lower().strip().replace(' ', '-').replace('+', '_')", "def slugify(value):\n value = unicode(str(value))\n value = unicodedata.normalize('NFKD', value).encode('ascii', 'ignore').decode('ascii')\n value = re.sub(r'[^\\w\\s-]', '', value).strip().lower()\n return re.sub(r'[-\\s]+', '-', value)", "def sluggify(cls, name: str) -> str:\n return \"-\".join([word for word in cls.split_on_non_word.split(name.strip().lower()) if word])", "def slugify(s):\n s = reg_nonchar.sub('-', s).lower()\n s = reg_dashes.sub('-', s)\n s = reg_outer_dashes.sub('', s)\n return s", "def slugify(s):\n slug = s.lower() # force to lower case\n slug = re.sub('[^a-z0-9 -]', '', slug) # remove invalid chars\n slug = re.sub(r'\\s+', '-', slug) # collapse whitespace and replace by -\n slug = re.sub('-+', '-', slug) # collapse dashes\n if not slug:\n slug = 'untitled'\n return slug", "def _slugify(value):\n import unicodedata\n if not isinstance(value, unicode):\n value = unicode(value)\n value = unicodedata.normalize('NFKD', value).encode('ascii', 'ignore')\n value = unicode(_slugify_strip_re.sub('', value).strip().lower())\n return _slugify_hyphenate_re.sub('-', value)", "def slugify(s):\n\treturn re.sub('[!@#$%^&*()\\\\\\\\/:.\"\"]+', '', s).replace(' ', '-').replace('--', '-').lower()", "def _slugify(value):\r\n try:\r\n import unicodedata\r\n value = unicodedata.normalize('NFKD', value)\r\n except ImportError:\r\n pass\r\n value = value.encode('ascii', 'ignore').decode()\r\n value = _slugify_strip_re.sub('', value).strip().lower()\r\n return _slugify_hyphenate_re.sub('-', value)", "def slugify(value, delimiter='-'):\n words = slug_pattern.split(value)\n return delimiter.join(filter(truth, words)).lower()", "def slugify(value):\n if type(value) == unicode:\n import unicodedata\n value = unicodedata.normalize('NFKD', value).encode('ascii', 'ignore')\n value = unicode(re.sub('[^\\w\\s-]', '', value).strip().lower())\n return re.sub('[-\\s]+', '-', value)", "def slugify(value):\n # import unicodedata\n # value = unicodedata.normalize('NFKD', value).encode('ascii', 'ignore')\n # value = unicode(re.sub('[^\\w\\s-]', '', value).strip().lower())\n # value = unicode(re.sub('[-\\s]+', '-', value))\n value = re.sub('[^\\w\\s-]', '', value).strip().lower()\n value = re.sub('[-\\s]+', '-', value)\n return value", "def deslugify(_slug):\n return string.capwords(_slug.replace('-', ' '))", "def _slugify(value):\n value = unicodedata.normalize('NFKD', value).encode('ascii', 'ignore')\n value = re.sub(r'[^\\w\\s-]', '', value.decode('utf-8', 'ignore'))\n value = value.strip().lower()\n value = re.sub(r'[-\\s]+', '-', value)\n return value", "def slugify(value):\n value = unicode(value)\n value = unicodedata.normalize('NFKD', value).encode('ascii', 'ignore').decode('ascii')\n value = re.sub('[^\\w\\s-]', '', value).strip().lower()\n return re.sub('[-\\s]+', '-', value)", "def get_slug(self, headline):\n exclude = set(string.punctuation)\n s = ''.join(ch for ch in headline if ch not in exclude)\n return s.lower().replace(\" \", \"-\")", "def slugify(value):\n import unicodedata\n value = unicode(value)\n value = unicodedata.normalize('NFKD', value).encode('ascii', 'ignore')\n value = unicode(re.sub('[^\\w\\s-]', '', value).strip().lower())\n value = unicode(re.sub('[-\\s]+', '-', value))\n return value", "def slugify(value):\n value = unicodedata.normalize('NFKD', value).encode('ascii', 'ignore')\n value = str(re.sub('[^\\w\\s-]', '', str(value)).strip().lower())\n return re.sub('[-\\s]+', '-', str(value))", "def slugify(text, delim=u\"-\"):\n result = []\n for word in _punct_re.split(text.lower()):\n result.extend(unidecode(word).split())\n result = unicode(delim.join(result))\n return \"untitled\" if not result else result", "def slug(self):\n slug = slugify(self.text)\n return \"%s/%s\" % (self.uid, slug[:36]) # max 24 characters", "def _slug(self, value):\n if isinstance(value, basestring):\n try:\n converted = value\n except Exception, exception:\n logger.error(exception)\n raise\n elif isinstance(value, str):\n try:\n converted = unicode(value, \"utf-8\")\n except Exception, exception:\n logger.error(exception)\n raise\n elif isinstance(value, (int, long, float)):\n self.assertNotIsInstance(value, basestring)\n try:\n converted = str(value)\n converted = unicode(converted)\n except Exception, exception:\n logger.error(exception)\n raise\n else:\n self.assertNotIsInstance(value, basestring)\n try:\n converted = unicode(value)\n except Exception, exception:\n logger.error(exception)\n raise\n output = converted.lower().strip().replace(\" \", \"-\")\n output = re.sub(r'[^a-z0-9]+', '-', output).strip('-')\n output = re.sub(r'[-]+', '-', output)\n output = re.sub(r\"[^\\w-]\", \"\", output)\n if isinstance(output, basestring):\n number_of_spaces = output.count(\" \")\n if number_of_spaces == 0:\n return output\n else:\n return False", "def slugify(text, delimitation=u'-'):\n result = []\n for word in _punctuation_regex.split(text.lower()):\n result.extend(unidecode(word).split())\n return str(delimitation.join(result))", "def slugify(self, value, allow_unicode=False):\n value = str(value)\n if allow_unicode:\n value = unicodedata.normalize('NFKC', value)\n else:\n value = unicodedata.normalize('NFKD', value).encode('ascii', 'ignore').decode('ascii')\n value = re.sub(r'[^\\w\\s-]', '', value.lower()).strip()\n return re.sub(r'[-\\s]+', '-', value)", "def xslugify(value):\n value = unicodedata.normalize('NFKD', value).encode('ascii', 'ignore').decode('ascii')\n value = re.sub('[^\\w\\s/-]', '', value).strip().lower()\n return mark_safe(re.sub('[-\\s/]+', '-', value))", "def slugify(self, space='-'):\n #strtitle = unicode(self.title)\n strtitle = self.title\n import unicodedata\n import re\n strtitle = unicodedata.normalize('NFKD', strtitle).encode('ascii', 'ignore')\n strtitle = unicode(re.sub('[^\\w\\s-]', '', strtitle).strip().lower())\n return re.sub('[-\\s]+', space, strtitle)", "def super_flat(cls, s):\n if s is None:\n return ''\n\n return cls.sql_safe(cls.slugify(s).upper().replace('-', ''))", "def unique_slugify(text: str, slugs_in_use: list):\n slug = slugify(text)\n if not slug:\n raise ValueError(\"Unable to slugify input\")\n if slug in slugs_in_use:\n i = 2\n slug = f\"{slug}-{i}\"\n while slug in slugs_in_use:\n i += 1\n slug = f\"{slug[:-2]}-{i}\"\n return slug", "def slugify(value, delim=\"-\"):\n value = re.sub(r\"[^\\w\\s-]\", \"\", value).strip()\n value = re.sub(r\"[-\\s]+\", delim, value)\n return value", "def slugify(text, delim=u'-'):\n result = []\n for word in _punct_re.split(text.lower()):\n word = word.encode('translit/long')\n if word:\n result.append(word)\n return unicode(delim.join(result))", "def slugify(text, delim=u'-'):\n result = []\n for word in _punct_re.split(text.lower()):\n word = word.encode('translit/long')\n if word:\n result.append(word)\n return unicode(delim.join(result))", "def generate_slug(self):\n return slugify(self.name)", "def slugify(text, delim=u\"-\"):\n _punct_re = re.compile(r'[\\t !\"#$%&\\'()*\\-/<=>?@\\[\\\\\\]^_`{|},.]+')\n result = []\n for word in _punct_re.split(text.lower()):\n word = normalize(\"NFKD\", word).encode(\"ascii\", \"ignore\")\n if word:\n result.append(word)\n return unicode(delim.join(result))", "def slugify(value):\n import unicodedata\n try:\n unicode_type = unicode\n except NameError:\n unicode_type = str\n if not isinstance(value, unicode_type):\n value = unicode_type(value)\n value = unicodedata.normalize('NFKD', value).encode('ascii', 'ignore').decode('ascii')\n value = unicode_type(_slugify_strip_re.sub('', value).strip())\n return _slugify_hyphenate_re.sub('_', value)", "def normalize_hospital_name(name):\n normalized_name = name.lower()\n\n stopword_list = stopwords.words('english')\n filtered_words = [word for word in wordpunct_tokenize(normalized_name) if word not in stopword_list]\n\n slug = slugify(' '.join(filtered_words))\n\n return slug", "def slugify(value):\n import unicodedata\n value = unicodedata.normalize('NFKD', value).encode('ascii', 'ignore').decode('ascii')\n value = re.sub(r'(?u)[^-\\w .]', '', value).strip()\n value = value.replace(\":\", \"\")\n return value", "def slugify(text, delim=u'-'):\n result = []\n for word in _punct_re.split(text.lower()):\n result.extend(unidecode(word).split())\n return unicode(delim.join(result))", "def slugify(value: Any, sep: str = \"-\") -> Optional[str]:\n text = stringify(value)\n if text is None:\n return None\n text = text.replace(sep, WS)\n # run this first because it'll give better results on special\n # characters.\n text = category_replace(text, SLUG_CATEGORIES)\n text = latinize_text(text, ascii=True)\n if text is None:\n return None\n text = text.lower()\n text = \"\".join([c for c in text if c in VALID_CHARS])\n text = collapse_spaces(text)\n if text is None or len(text) == 0:\n return None\n return text.replace(WS, sep)", "def slugify(value):\n import unicodedata\n import re\n # value = unicodedata.normalize('NFKD', value).encode('ascii', 'ignore')\n value = str(re.sub('[^\\w\\s-]', '', value).strip().lower())\n\n value = str(re.sub('[-\\s]+', '-', value))\n rstr = r\"[\\/\\\\\\:\\*\\?\\\"\\<\\>\\|]\" # '/ \\ : * ? \" < > |'\n new_title = re.sub(rstr, \"_\", value) # 替换为下划线\n if new_title is None or len(new_title) == 0:\n random_title = get_random_string(5)\n print('文件名称:{}非法,将生成随机名称:{}'.format(new_title, random_title))\n return random_title\n return new_title\n # return value", "def slugify(s):\n s = s.lower()\n\n for c in [' ', '-', '.', '/']:\n s = s.replace(c, '_')\n\n s = re.sub('\\W', '', s)\n s = s.replace('_', ' ')\n s = re.sub('\\s+', ' ', s)\n s = s.strip()\n s = s.replace(' ', '-')\n\n return s", "def slugify(value, allow_unicode=False):\n value = str(value)\n if allow_unicode:\n value = unicodedata.normalize('NFKC', value)\n else:\n value = unicodedata.normalize('NFKD', value).encode('ascii', 'ignore').decode('ascii')\n value = re.sub(r'[^\\w\\s-]', '', value).strip().lower()\n return re.sub(r'[-\\s]+', '-', value)", "def slugify(value, allow_unicode=False):\n value = str(value)\n if allow_unicode:\n value = unicodedata.normalize('NFKC', value)\n else:\n value = unicodedata.normalize('NFKD', value).encode('ascii', 'ignore').decode('ascii')\n value = re.sub(r'[^\\w\\s-]', '', value).strip().lower()\n return re.sub(r'[-\\s]+', '-', value)", "def slugify(text: str, delim: str=u'-'):\n result = []\n for word in _punct_re.split(text.lower()):\n word = codecs.encode(word, 'translit/long')\n if word:\n result.append(word)\n return str(delim.join(result))", "def _slug_strip(self,\n value,\n separator='-'):\n separator = separator or ''\n if separator == '-' or not separator:\n re_sep = '-'\n else:\n re_sep = '(?:-|%s)' % re.escape(separator)\n # Remove multiple instances and if an alternate separator is provided,\n # replace the default '-' separator.\n if separator != re_sep:\n value = re.sub('%s+' % re_sep, separator, value)\n # Remove separator from the beginning and end of the slug.\n if separator:\n if separator != '-':\n re_sep = re.escape(separator)\n value = re.sub(r'^%s+|%s+$' % (re_sep, re_sep), '', value)\n return value\n\n # TODO implement the unique_slugify so the slug can be used as user url.\n # def unique_slugify(self):\n # slug = slugify(self.name)\n #\n # user = CustomUser.objects.get(slug=slug)\n #\n # while user is not None:\n # slug += \"-%i\" % random.randint(1,2000)\n # user = CustomUser.objects.get(slug=slug)\n #\n # self.slug = slug\n\n # TODO test this code found online", "def slugify(text, delim=u'-'):\n result = []\n for word in _punct_re.split(text.lower()):\n word = normalize('NFKD', word).encode('ascii', 'ignore')\n if word:\n result.append(word)\n return unicode(delim.join(result))", "def slugify(text, delim=u'-'):\n result = []\n for word in _punct_re.split(text.lower()):\n word = normalize('NFKD', word).encode('ascii', 'ignore')\n if word:\n result.append(word)\n return unicode(delim.join(result))", "def slugify(value, allow_unicode=False):\n value = str(value)\n if allow_unicode:\n value = unicodedata.normalize('NFKC', value)\n else:\n value = unicodedata.normalize('NFKD', value).encode(\n 'ascii', 'ignore').decode('ascii')\n value = re.sub(r'[^\\w\\s-]', '', value.lower()).strip()\n return re.sub(r'[-\\s]+', '-', value)", "def __calculate_slug(self):\n\n return slugify.slugify(self.metadata['title'])", "def slugify(value, allow_unicode=False):\n value\n if allow_unicode:\n value = unicodedata.normalize('NFKC', value)\n else:\n value = unicodedata.normalize('NFKD', value).encode('ascii', 'ignore').decode('ascii')\n value = re.sub(r'[^\\w\\s-]', '', value).strip().lower()\n\n return re.sub(r'[-\\s]+', '-', value)", "def slugify(text, delim=u'-'):\n result = []\n for word in _punct_re.split(text.lower()):\n word = normalize('NFKD', unicode(word)).encode('ascii', 'ignore')\n if word:\n result.append(word)\n return unicode(delim.join(result))", "def slugify2(text, delim=u'-'):\n result = []\n for word in _punct_re.split(text.lower()):\n word = normalize('NFKD', word).encode('ascii', 'ignore')\n if word:\n result.append(word)\n return unicode(delim.join(result))", "def slugify(value, allow_unicode=False):\n try: # PY2\n value = unicode(value)\n except NameError: # PY3\n value = str(value)\n if allow_unicode:\n value = unicodedata.normalize(\"NFKC\", value)\n else:\n value = (\n unicodedata.normalize(\"NFKD\", value)\n .encode(\"ascii\", \"ignore\")\n .decode(\"ascii\")\n )\n value = re.sub(r\"[^\\w\\s-]\", \"\", value).strip().lower()\n return re.sub(r\"[-\\s]+\", \"-\", value)", "def slugify(text, delim=b'-'):\n result = []\n for word in _punct_re.split(text.lower()):\n word = normalize('NFKD', word).encode('ascii', 'ignore')\n if word:\n result.append(word)\n return delim.join(result).decode(\"utf-8\", \"strict\")", "def slugify(value, allow_unicode=False):\n file, ext = splitext(value)\n value = str(file)\n if allow_unicode:\n value = unicodedata.normalize('NFKC', value)\n else:\n value = unicodedata.normalize('NFKD', value).encode('ascii', 'ignore').decode('ascii')\n value = re.sub(r'[^\\w\\s-]', '', value).strip().lower()\n return re.sub(r'[-\\s]+', '-', value) + ext", "def to_slug(value, incoming=None, errors=\"strict\"):\r\n value = safe_decode(value, incoming, errors)\r\n # NOTE(aababilov): no need to use safe_(encode|decode) here:\r\n # encodings are always \"ascii\", error handling is always \"ignore\"\r\n # and types are always known (first: unicode; second: str)\r\n value = unicodedata.normalize(\"NFKD\", value).encode(\r\n \"ascii\", \"ignore\").decode(\"ascii\")\r\n value = SLUGIFY_STRIP_RE.sub(\"\", value).strip().lower()\r\n return SLUGIFY_HYPHENATE_RE.sub(\"-\", value)", "def _get_slug(str):\n return str.replace(\" \", \"_\")", "def slugify(_string):\n valid_chars = frozenset(\n \"-_.{}{}\".format(string.ascii_letters, string.digits))\n return ''.join(c if c in valid_chars else '_' for c in _string)", "def slugify(filename):\n filename = os.path.relpath(filename, \"\") # The relative path from current folder to the yaml file.\n # Remove \"experiments\" in filename.\n if filename.startswith(\"experiments/\"):\n filename = filename[len(\"experiments/\"):]\n return os.path.splitext(filename)[0].lower().replace(\"/\", \"_\").replace(\".\", \"_\")", "def uglify(text):\n return text.lower().replace(' ', '_')", "def transform(text: str) -> str:\n return text.title()", "def slugify(string):\n string = unicodedata.normalize('NFKC', string)\n # remove all non-word characters (except '-')\n string = re.sub(r'[^\\s\\w-]', '', string).strip().lower()\n # replace spaces (or groups of spaces and dashes) with dashes\n string = re.sub(r'[-\\s]+', '-', string)\n return string", "def slugify3(text, delim=u'-'):\n result = []\n for word in _punct_re.split(text.lower()):\n result.extend(unidecode(word).split())\n return unicode(delim.join(result))", "def test_slug():\n slugify = fields.SlugField().adapt\n\n for input, expect in [\n ('01 HDR test', '01-hdr-test'),\n ('--&*$#(8$jjsdsd77-----test phrase12 123--', '8jjsdsd77-test-phrase12-123'),\n ('1234', '1234'),\n ('abcdEFG', 'abcdefg'),\n ]:\n assert slugify(input) == expect", "def generate_slug(string: str) -> str:\n string = string.replace(\" \", \"-\").lower()\n # Remove any non letters, numbers, and non-dashes\n return re.sub(r\"[^a-zA-Z0-9\\-\\+]+\", \"\", string)", "def get_slug(self):\n slug = self.kwargs.get(self.slug_url_kwarg, None)\n if slug is not None:\n return slug.strip()\n else:\n return slug", "def sanitize_txt(x):\n return '_'.join(smart_split(x.lower()))", "def clean_slug(self):\n slug = self.cleaned_data.get('slug', None)\n if slug is None or len(slug) == 0 and 'title' in self.cleaned_data:\n slug = slugify(self.cleaned_data['title'])\n return slug", "def create_slug_from_tag_name(sender, instance, *args, **kwargs):\n if not instance.slug:\n instance.slug = slugify(instance.name)", "def camel_to_slug(self, name):\n name = re.sub(r'([a-z])([A-Z])', r'\\1 \\2', name).title().replace(\" \", \"\").replace(\"_\", \"\")\n s1 = re.sub('(.)([A-Z][a-z]+)', r'\\1_\\2', name)\n slug = re.sub('([a-z0-9])([A-Z])', r'\\1_\\2', s1).lower()\n return slug", "def slug(self) -> str:\n return pulumi.get(self, \"slug\")", "def slugify_title(title, datetimeon):\n year, month = datetimeon.year, datetimeon.month\n the_slug_start = f'{year}/{month}/'\n max_length = 200 - len(the_slug_start)\n return the_slug_start + slugify(title, max_length=max_length)", "def sanitize(cls, query_item):\n\n return cls.__RE_SANITIZE.sub(\"\", query_item)", "def _slug_strip(value, separator='-'):\n separator = separator or ''\n if separator == '-' or not separator:\n re_sep = '-'\n else:\n re_sep = '(?:-|%s)' % re.escape(separator)\n # Remove multiple instances and if an alternate separator is provided,\n # replace the default '-' separator.\n if separator != re_sep:\n value = re.sub('%s+' % re_sep, separator, value)\n # Remove separator from the beginning and end of the slug.\n if separator:\n if separator != '-':\n re_sep = re.escape(separator)\n value = re.sub(r'^%s+|%s+$' % (re_sep, re_sep), '', value)\n return value", "def clean_word(self, word):\n return self.filter_pattern.sub(u'', word.lower())", "def clean(self):\n for lang in tuple(x[0] for x in settings.LANGUAGES):\n slug_field = 'slug_{}'.format(lang)\n slug = getattr(self, slug_field)\n if slug:\n slug_stripped = re.sub(r'[^\\w\\s-]', '', slug).strip(\"-\")\n slug_valid = slugify(slug_stripped)\n setattr(self, slug_field, slug_valid)\n super(AbstractBasePage, self).clean()", "def escape_django_tags(txt):\n for source, dest in ENTITIES.iteritems():\n txt = txt.replace(source, dest)\n return txt", "def encoded_name(self):\n return slugify(str(self).lower())", "def sanitize(text):\n #text = re.sub(r'[*]',r'\\*',text) \n text = re.sub(r'~',r'\\~',text) \n #text = re.sub(r'<',r'\\textless',text) \n #text = re.sub(r'>',r'\\textgreater',text) \n text = re.sub(r'\\|',r'\\|',text) \n text = re.sub(r'_',r'\\\\_',text) \n return text", "def set_slug(self):\n if not self.slug:\n self.slug = slugify(self.name)[:50]", "def __filter( self, text ):\n return text", "def smart_slugify(name, min_length=3):\n url_slug = slugify(name)\n if len(url_slug) >= min_length:\n return url_slug\n # try converting to latin characters:\n for lang in ('ru', 'ka', 'hy', 'el', 'bg', 'uk'):\n trans_url_slug = slugify(translit(name, lang, reversed=True))\n if len(trans_url_slug) >= min_length:\n return trans_url_slug\n # if the url is random lets make it slightly longer\n min_length = 6\n return url_slug + ''.join([random.choice(string.ascii_lowercase) for _ in range(min_length - len(url_slug))])", "def tweet_sanitize(tweet: str) -> str:\n pipeline = [strip_links, strip_mentions, strip_hashtags, strip_all_entities,\n remove_special_characters]\n for fun in pipeline:\n tweet = fun(tweet)\n return tweet", "def _apply_filters(self, text, tag):\n\n # The order of the filters below is important\n # and should not be changed\n\n # intial_quotes needs to happen at this point so that\n # attribute values introduced later on do not get affected\n text = self.initial_quotes(text)\n text = self.smarty_pants(text)\n text = self.amp(text)\n text = self.caps(text)\n\n return text", "def normalize_for_url(text: str) -> str:\n\n # German is our main language, so we are extra considerate about it\n # (unidecode turns ü into u)\n text = text.replace(\"ü\", \"ue\")\n text = text.replace(\"ä\", \"ae\")\n text = text.replace(\"ö\", \"oe\")\n clean = _unwanted_url_chars.sub('-', unidecode(text).strip(' ').lower())\n clean = _double_dash.sub('-', clean)\n clean = clean.strip('-')\n\n return clean" ]
[ "0.7214419", "0.71937776", "0.70771027", "0.6860676", "0.6856058", "0.6818637", "0.68092954", "0.67907774", "0.6787663", "0.67330647", "0.67148125", "0.66974", "0.66480976", "0.6628109", "0.6619934", "0.6606258", "0.6593922", "0.6590047", "0.6588239", "0.65801233", "0.65548736", "0.6517051", "0.6512793", "0.65075135", "0.64812416", "0.64625907", "0.64620703", "0.6454177", "0.6445361", "0.6415703", "0.64010495", "0.6397689", "0.63609385", "0.6356682", "0.6338212", "0.6334522", "0.6308917", "0.62924534", "0.6267139", "0.6261368", "0.62609696", "0.6191994", "0.6142937", "0.61343193", "0.6120595", "0.61185366", "0.61185366", "0.61118615", "0.6055944", "0.60555005", "0.60409063", "0.600391", "0.59895754", "0.59872967", "0.5980476", "0.5968606", "0.59446543", "0.59446543", "0.5943009", "0.59418124", "0.59363145", "0.59363145", "0.59275514", "0.5922086", "0.5915711", "0.5905621", "0.5884752", "0.5839212", "0.582551", "0.581774", "0.57922727", "0.57741725", "0.5746825", "0.5718308", "0.5685384", "0.5637411", "0.5620026", "0.5604579", "0.5599677", "0.55577224", "0.5530197", "0.55026084", "0.5501628", "0.54996824", "0.5498156", "0.5485053", "0.5484116", "0.5472054", "0.5443046", "0.5431918", "0.54117966", "0.54104924", "0.5407495", "0.5394127", "0.53937876", "0.53667665", "0.5353019", "0.53133905", "0.5303413", "0.5295212" ]
0.7127615
2
Jinja2 filter to get language object from language code.
def language_name(value): return pycountry.languages.get(alpha_2=value)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_lang(context, field):\n lang = json.load(open(\"json/lang.json\", \"r\"))\n conf = json.load(open(\"json/serverconfig.json\", \"r\"))\n return lang[conf[str(context)][\"lang\"]][field]", "def language_code(self) -> str:\n return pulumi.get(self, \"language_code\")", "def get_language(self, language):\n found_lang = None\n for lang in self.catalog['languages']:\n if lang['identifier'] == language['identifier']:\n found_lang = lang\n break\n if not found_lang:\n self.catalog['languages'].append(language)\n else:\n language = found_lang\n if 'resources' not in language:\n language['resources'] = []\n return language", "def get_language(lang_code) -> str:\n langs = defaultdict(lambda: \"en\", {\"ru\": \"ru\"})\n return langs[lang_code.split(\"-\")[0]] if lang_code else \"en\"", "def language(self):\r\n return self._get('language', {})", "def get_for_language(self, language):\r\n assert isinstance(language, str)\r\n\r\n language = language.strip().lower()\r\n if language in self.__languages:\r\n code = self.__languages[language]\r\n return code\r\n return None", "def get_localization(self, language: str) -> Localization:\n ...", "def get_locale():\n if (session.get(\"language\") is not None):\n return session.get('language')['charcode']\n return request.accept_languages.best_match(app.config['LANGUAGES'].keys())", "def language(self):\n if \"language\" in self._prop_dict:\n return self._prop_dict[\"language\"]\n else:\n return None", "def language(self):\n lang = None\n if self.__dict__['TAG:language']:\n lang = self.__dict__['TAG:language']\n return lang", "def get_language_from_object(self, odata):\n try:\n if odata.get('lxp', {}).get('language', None):\n language = odata['lxp']['language']\n elif odata.get('meta', {}).get('ecl', {}).get('lang', None):\n language = odata['meta']['ecl']['lang']\n else:\n language = 'en'\n except (AttributeError, KeyError):\n print(\"Exception getting language\")\n language = 'en'\n\n return language", "def language(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"language\")", "def validate_language(language):\n\n try:\n lang_code = language_dict[language]\n except KeyError:\n lang_code = None\n return lang_code", "def get_language_name(iso_code):\n if iso_code not in LANGUAGES_BY_CODE:\n try:\n lang = iso639.languages.get(part3=iso_code)\n except KeyError:\n lang = None\n\n if lang:\n # we only show up to the first semi or paren\n lang = re.split(r\";|\\(\", lang.name)[0].strip()\n\n LANGUAGES_BY_CODE[iso_code] = lang\n\n return LANGUAGES_BY_CODE[iso_code]", "def get_language():\n try:\n from leaves.middleware import request_context\n return request_context.language\n except:\n return get_site().preferences.default_language", "def getLanguage(self):\n return self.getOrDefault(self.language)", "def getObject(language=None):", "def lang_to_fieldname(l):\r\n global searchable_langs\r\n\r\n code = l[:2]\r\n\r\n if code in searchable_langs:\r\n return (\"contents_%s\" % code)\r\n else:\r\n return \"contents\"", "def get_language(self):\r\n return self.language", "def get_language(self):\n return self.lang", "def get_language(self):\n return self.language if self.language is not None else get_language()", "def get_lang(ix):\n\tlang = None\n\tif ix == 0:\n\t\tlang = setting.TLA_ENG\n\telif ix == 1:\n\t\tlang = setting.TLA_JP\n\telse:\n\t\tlang = setting.TLA_VN\n\n\tf = open (f\"lang\\\\{lang}.json\", encoding=setting.TLA_UTF8)\n\tglobal data_json\n\tdata_json = json.load(f)\n\n\treturn lang", "def make_language(iso639row):\n return Language (name=iso639row['Ref_Name'], code=iso639row['Id'])", "def get_language_code(language_id):\n\n # mapping language id to it's code\n if language_id in language_id_to_code_mapper:\n return language_id_to_code_mapper[language_id]\n\n raise Exception('Unknown language id received.')", "def language(self) -> str:\n if self.language_code in CODE_TO_LANGUAGE:\n return CODE_TO_LANGUAGE[self.language_code]\n\n return self.language_code", "def Language(self, default=None):\n return self.data.get('language', default)", "def get_language(language_id):\n\n api = (api_name, 'language')\n args_params = (str(language_id), )\n \n response = make_request(*args_params, api=api, action='get', **{})\n status_code = response.status_code\n content = response.text\n\n msg = str(status_code) + ' : ' + content\n \n if status_code >= 300:\n\n click.echo(\"response error message: %s \" % msg)\n raise click.Abort()\n \n\n logger.debug(\"response from spanglish get_language: {}\".format(response))\n logger.debug(\"response msg from spanglish get_language: {}\".format(msg))\n\n click.echo(\"response message: %s \" % msg)", "def language_code(self):\n return self._language_code", "def language_selector(context):\r\n output = \"\"\r\n from django.conf import settings\r\n i18 = getattr(settings, 'USE_I18N', False)\r\n if i18:\r\n template = \"admin/language_selector.html\"\r\n context['i18n_is_set'] = True\r\n try:\r\n output = render_to_string(template, context)\r\n except:\r\n pass\r\n return output", "def get_lang(self):\n\n path = self.get_lang_path()\n for language in self.languages:\n if language in path:\n return language", "def translate(self, language=None):", "def get_translation(obj, language_code):\n if not obj or not hasattr(obj, \"get_translation\"):\n return None\n return obj.get_translation(language_code)", "def _getLang(self, language):\n if language == None:\n language = self.getDefaultLanguage()\n\n return language", "def find(lang):\n try:\n return as_dict(pycountry.languages.lookup(lang))\n except LookupError:\n return {}", "def get_locale_name(code):\n language_map = dict(django.conf.global_settings.LANGUAGES)\n\n # check for exact match\n if code in language_map:\n return language_map[code]\n\n # try for the language, fall back to just using the code\n language = code.split(\"-\")[0]\n return language_map.get(language, code)", "def language_framework(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"language_framework\")", "def language():\r\n\r\n cursor.execute('SELECT name from languages order by RANDOM() limit 1;')\r\n return cursor.fetchone()[0]", "def get_language(self) -> str:\n return self.language", "def renderer_globals_factory(system):\n return {'current_language': get_localizer(system['request']).locale_name,}", "def code(self):\n return self.language()", "def get_full_language(self, language):\n if language:\n language = pycountry.languages.get(alpha_2=language)\n if language:\n language = language.name\n return language.title()", "def get_lang(self):\n return self.langs.lang", "def GetLangId(self):\n return self._code['lang_id']", "def create(language, namespace, templatepath):\n\n lang = None\n\n if language == \"Java\":\n lang = Java(namespace, templatepath)\n else:\n raise ModelProcessingError(\n \"Invalid language: %s\" % language)\n\n return lang", "def get_langs(id):", "def get_text(name, language_code) -> MLWText:\n if language_code not in data:\n # default language code\n language_code = 'ru'\n retval = data.get(language_code).get(name)\n if retval is None:\n raise ValueError(f'Undefined text name: {name}')\n return retval", "def get_locale():\n localLang = request.args.get('locale')\n supportLang = app.config['LANGUAGES']\n if localLang in supportLang:\n return localLang\n userId = request.args.get('login_as')\n if userId:\n localLang = users[int(userId)]['locale']\n if localLang in supportLang:\n return localLang\n localLang = request.headers.get('locale')\n if localLang in supportLang:\n return localLang\n return request.accept_languages.best_match(app.config['LANGUAGES'])", "def __expandLanguage(self, language):\n\n # Priority Chain:\n # de_DE => de => C (default language) => code\n\n all = [language]\n if \"_\" in language:\n all.append(language[:language.index(\"_\")])\n all.append(\"C\")\n\n return all", "def get_word(key: str, language: str):\n if key not in word_keys:\n return \"LOCALIZATION KEY {} NOT FOUND FOR LANGUAGE {}\".format(key, language)\n words = word_keys[key]\n\n # If the word doesn't exist, just show word in English\n if language not in words or words[language] == \"\":\n return words[EN]\n else:\n return words[language]", "def add_game_language_subscriber(event):\n request = event.request\n # TODO: look up game language from a cookie or something\n en = db.get_by_identifier_query(db.t.Language, u'en').first()\n request.tmpl_context.game_language = en", "def get_language(self) -> str:\n return settings.LANGUAGE_CODE", "def get_locale(lang):\n lang = babel_format_locale_map.get(lang) or lang\n try:\n return Locale.parse(lang, sep='-')\n except (UnknownLocaleError, ValueError):\n return Locale(*settings.LANGUAGE_CODE.split('-'))", "def get_language(chat_id):\n db_connection = DatabaseConnection()\n language = db_connection.get_setting(chat_id=chat_id, setting=LANGUAGE_SETTING)\n db_connection.close()\n\n return language", "def getLang(lang, localedir=os.path.expanduser(\"~\") + \"/share/locale\"):\n return gettext.translation(\"bridgedb\", localedir=localedir, \n languages=[lang], fallback=\"en\")", "def __init__(self, language=None):\n self.language = language\n self.translations = {}", "def get_language_name(self):\n return self.language_name", "def requestLanguage(request):\n # Return the user language preferences for registered users\n if request.user.valid and request.user.language:\n return request.user.language\n\n # Or try to return one of the user browser accepted languages, if it\n # is available on this wiki...\n available = wikiLanguages()\n if not request.cfg.language_ignore_browser:\n for lang in browserLanguages(request):\n if lang in available:\n return lang\n \n # Or return the wiki default language...\n if request.cfg.language_default in available:\n lang = request.cfg.language_default\n # If everything else fails, read the manual... or return 'en'\n else:\n lang = 'en'\n return lang", "def language(self):\n # type: () -> string_types\n return self._language", "def language(self):\n # type: () -> string_types\n return self._language", "def snippets_by_language(request, slug):\n language = get_object_or_404(Language, slug__exact=slug)\n return list_detail.object_list(request,\n queryset=Snippet.objects.get_by_language(slug),\n extra_context={ 'object': language },\n template_name='cab/language_detail.html',\n **base_generic_dict)", "def wikiLanguages():\n return languages", "def language(self):\n return self._language", "def language(self):\n return self._language", "def language(self):\n return self._language", "def language(self):\n return self._language", "def lang(self):\n return self._lang", "def getVKBLanguage(self):\r\n\r\n return self.phone.sx('(send (send (get-input-locale-manager) get-current-locale) get-iso)', convertToString=False)", "def get_language(mgroups):\n\n if mgroups:\n lang = mgroups[0].strip('[').strip(']')\n return lang.lower().strip()\n return None", "def language(self):\n portal_state = self.context.unrestrictedTraverse(\"@@plone_portal_state\")\n return aq_inner(self.context).Language() or portal_state.default_language()", "def book_language_list(request):\n languages = Language.objects.all().order_by('-name')\n return render(request, 'library/book_language_list.html', {\"languages\": languages, })", "def languages(self):\r\n url = '{0}/{1}'.format(self.get_url(), 'languages')\r\n\r\n return http.Request('GET', url), parsers.parse_json", "def get_locale():\n setting = Setting.query.filter(Setting.name == 'default_language').first()\n\n if setting is not None:\n return setting.value\n\n # Return default language when none found\n return 'en'", "def get_discussion_language(matchdict, params, session, current_issue_uid=None):\n if not current_issue_uid:\n current_issue = DBDiscussionSession.query(Issue).filter(Issue.is_disabled == False,\n Issue.is_private == False).first()\n current_issue_uid = current_issue.uid if current_issue else None\n\n # first matchdict, then params, then session, afterwards fallback\n issue = matchdict['issue'] if 'issue' in matchdict \\\n else params['issue'] if 'issue' in params \\\n else session['issue'] if 'issue' in session \\\n else current_issue_uid\n\n db_issue = DBDiscussionSession.query(Issue).get(issue)\n\n return db_issue.lang if db_issue else 'en'", "async def getLanguageFile(self, language: str):\n try:\n if language not in SUPPORTED_LANGUAGES:\n filename = \"translations/en.json\"\n else:\n filename = f\"translations/{language.lower()}.json\"\n\n with open(filename, \"r\") as json_file:\n return json.load(json_file)\n\n except FileNotFoundError as e:\n _LOGGER.debug(\"Could not read language file. Error message: %s\", e)\n return None\n except Exception as e:\n _LOGGER.debug(\"Could not read language file. Error message: %s\", e)\n return None", "def get_language_script(script):\n languages_scripts = {\n 'arab': ('ara', 'per'),\n 'cyrl': ('bel', 'chu', 'mac', 'rus', 'srp', 'ukr'),\n 'grek': ('grc', 'gre'),\n 'hani': ('chi', 'jpn'),\n 'hebr': ('heb', 'lad', 'yid'),\n 'jpan': ('jpn', ),\n 'kore': ('kor', ),\n 'zyyy': ('chi', )\n }\n if script in languages_scripts:\n languages = ([marc21.lang_from_008] +\n marc21.langs_from_041_a +\n marc21.langs_from_041_h)\n for lang in languages:\n if lang in languages_scripts[script]:\n return '-'.join([lang, script])\n error_print('WARNING LANGUAGE SCRIPTS:', marc21.bib_id,\n script, '008:', marc21.lang_from_008,\n '041$a:', marc21.langs_from_041_a,\n '041$h:', marc21.langs_from_041_h)\n return '-'.join(['und', script])", "def translate_url(context, lang_code):\n # thanks to https://stackoverflow.com/a/51974042\n path = context.get(\"request\").get_full_path()\n return django_translate_url(path, lang_code)", "def to_language(arg: str) -> Tuple[Union[str, None], str]: \n if (low:= arg.lower()) in LANGUAGES:\n return arg\n else:\n return LANGCODES.get(low, None)", "def languageHelperFactory(intLanguageName, debugMode):\r\n return THE_LANGUAGE_HELPER", "def init_language(self):\n\n if 'HTTP_COOKIE' in os.environ:\n cookies = os.environ['HTTP_COOKIE'].split(';')\n for cookie in cookies:\n (key, value) = cookie.split('=')\n if key == Intuition.COOKIE_USERLANG:\n return value\n \n return self.default_language", "def get_language_key(host, domain, user):\n\n # Get lang from authenticated user\n if not user.is_anonymous():\n value = user.language\n\n # Get lang based on request host and global language settings\n else:\n current_subdomain = host[:-len(domain) - 1]\n default_language = settings.LANGUAGE_CODE\n valid_languages = [l[0] for l in settings.LANGUAGES]\n valid_subdomains = list(settings.SUBDOMAIN_URLCONFS)\n default_language_domains = []\n\n for d in valid_subdomains:\n if (d is default_language) or (d not in valid_languages):\n default_language_domains.append(d)\n\n if current_subdomain in default_language_domains:\n value = default_language\n else:\n value = current_subdomain\n\n return value", "def get_template(self):\n return self.sep.join([self.htmls[html] for html in self.lang]).format(**self.fields)", "def get_language(benchmark):\n config = benchmark_config.get_config(benchmark)\n return config.get('language', 'c++')", "def get_version(self, language):\n\n if isinstance(language, basestring):\n language = Language.get(language)\n\n for version in self.versions:\n if version.language == language:\n return version\n\n return None", "def _get_lang(self, *args, **kwargs):\n if \"lang\" in kwargs:\n if kwargs[\"lang\"] in self._available_languages:\n self.lang = kwargs[\"lang\"]", "def get(tag: {str, 'Language'}, normalize=True) -> 'Language':\n if isinstance(tag, Language):\n if not normalize:\n # shortcut: we have the tag already\n return tag\n\n # We might need to normalize this tag. Convert it back into a\n # string tag, to cover all the edge cases of normalization in a\n # way that we've already solved.\n tag = tag.to_tag()\n\n if (tag, normalize) in Language._PARSE_CACHE:\n return Language._PARSE_CACHE[tag, normalize]\n\n data = {}\n\n # If the complete tag appears as something to normalize, do the\n # normalization right away. Smash case and convert underscores to\n # hyphens when checking, because the case normalization that comes from\n # parse_tag() hasn't been applied yet.\n\n tag_lower = normalize_characters(tag)\n if normalize and tag_lower in LANGUAGE_REPLACEMENTS:\n tag = LANGUAGE_REPLACEMENTS[tag_lower]\n\n components = parse_tag(tag)\n\n for typ, value in components:\n if typ == 'extlang' and normalize and 'language' in data:\n # smash extlangs when possible\n minitag = '%s-%s' % (data['language'], value)\n norm = LANGUAGE_REPLACEMENTS.get(normalize_characters(minitag))\n if norm is not None:\n data.update(\n Language.get(norm, normalize).to_dict()\n )\n else:\n data.setdefault('extlangs', []).append(value)\n elif typ in {'extlang', 'variant', 'extension'}:\n data.setdefault(typ + 's', []).append(value)\n elif typ == 'language':\n if value == 'und':\n pass\n elif normalize:\n replacement = LANGUAGE_REPLACEMENTS.get(value.lower())\n if replacement is not None:\n # parse the replacement if necessary -- this helps with\n # Serbian and Moldovan\n data.update(\n Language.get(replacement, normalize).to_dict()\n )\n else:\n data['language'] = value\n else:\n data['language'] = value\n elif typ == 'territory':\n if normalize:\n data['territory'] = TERRITORY_REPLACEMENTS.get(value.lower(), value)\n else:\n data['territory'] = value\n elif typ == 'grandfathered':\n # If we got here, we got a grandfathered tag but we were asked\n # not to normalize it, or the CLDR data doesn't know how to\n # normalize it. The best we can do is set the entire tag as the\n # language.\n data['language'] = value\n else:\n data[typ] = value\n\n result = Language.make(**data)\n Language._PARSE_CACHE[tag, normalize] = result\n return result", "def get_language_data(self, object, data):\n if \"lang_materials\" in object:\n if object.get(\"lang_materials\") in [\"\", [], {}]:\n data[\"lang_materials\"] = closest_parent_value(object, \"lang_materials\")\n else:\n data[\"language\"] = closest_parent_value(object, \"language\")\n return data", "def __translationLanguage(self):\n return self.transLanguageComboBox.itemData(\n self.transLanguageComboBox.currentIndex())", "def get_language(lang_list: list = None) -> str:\n\tis_logged_in = frappe.session.user != \"Guest\"\n\n\t# fetch language from form_dict\n\tif frappe.form_dict._lang:\n\t\tlanguage = get_lang_code(frappe.form_dict._lang or get_parent_language(frappe.form_dict._lang))\n\t\tif language:\n\t\t\treturn language\n\n\t# use language set in User or System Settings if user is logged in\n\tif is_logged_in:\n\t\treturn frappe.local.lang\n\n\tlang_set = set(lang_list or get_all_languages() or [])\n\n\t# fetch language from cookie\n\tpreferred_language_cookie = get_preferred_language_cookie()\n\n\tif preferred_language_cookie:\n\t\tif preferred_language_cookie in lang_set:\n\t\t\treturn preferred_language_cookie\n\n\t\tparent_language = get_parent_language(language)\n\t\tif parent_language in lang_set:\n\t\t\treturn parent_language\n\n\t# fetch language from request headers\n\taccept_language = list(frappe.request.accept_languages.values())\n\n\tfor language in accept_language:\n\t\tif language in lang_set:\n\t\t\treturn language\n\n\t\tparent_language = get_parent_language(language)\n\t\tif parent_language in lang_set:\n\t\t\treturn parent_language\n\n\t# fallback to language set in System Settings or \"en\"\n\treturn frappe.db.get_default(\"lang\") or \"en\"", "def show(self, id):\n lm = h.eagerload_morpheme_language_model(Session.query(MorphemeLanguageModel)).get(id)\n if lm:\n return lm\n else:\n response.status_int = 404\n return {'error': 'There is no morpheme language model with id %s' % id}", "def _get_locale() -> str:\n languages = flask.current_app.config['LANGUAGES'].keys()\n locale = flask.request.accept_languages.best_match(languages)\n\n # If no locale could be determined, fall back to the default.\n if locale is None:\n locale = flask.current_app.config['BABEL_DEFAULT_LOCALE']\n\n return locale", "def to_language(self):\n return self.language()", "def language(self) -> str:\n return self._language", "def language(self) -> str:\n return self._language", "def get_language(self, word, lang=None):\n lang = lang or self.cfg.get('lang', 'en')\n # let's retrieve the word from configuration dict.\n try:\n return self.cfg['words_' + lang][word]\n except StandardError:\n return 'Do not know how to \"{}\" in \"{}\"'.format(word, lang)", "def get_localization(self):\n return self._request_data(\"/lokarria/localization\")", "def get_lang(self):\n props = getToolByName(self.context,\n 'portal_properties')\n return props.site_properties.getProperty('default_language') or 'en'", "def get_parent_language(lang: str) -> str:\n\tis_language_variant = \"-\" in lang\n\tif is_language_variant:\n\t\treturn lang[: lang.index(\"-\")]", "def language(self, max_length=None, **kwargs):\n T = self.with_output()\n return T.language(max_length)", "def __init__(self, language=None):\n self._language = language", "def fetchTranslation(self, language):\n pass" ]
[ "0.6488083", "0.6458506", "0.64521635", "0.6425052", "0.6333819", "0.63314784", "0.62535805", "0.6156458", "0.61025834", "0.60967314", "0.60901237", "0.60439485", "0.602857", "0.60120213", "0.59964323", "0.5989398", "0.5981419", "0.5978724", "0.59718126", "0.59527946", "0.59374666", "0.5934484", "0.592589", "0.59172606", "0.5902493", "0.583714", "0.5834787", "0.5830548", "0.5829775", "0.58215046", "0.5816021", "0.5808136", "0.58058095", "0.580194", "0.57906187", "0.5770688", "0.5756154", "0.5740944", "0.57406586", "0.5726123", "0.5716362", "0.5703403", "0.5699291", "0.56940776", "0.5683305", "0.56436276", "0.56369334", "0.56337726", "0.5632933", "0.5600715", "0.5592188", "0.5583371", "0.5574441", "0.55596054", "0.5548096", "0.5537277", "0.5528164", "0.5520366", "0.5520366", "0.5493657", "0.5480115", "0.54744613", "0.54744613", "0.54744613", "0.54744613", "0.5465684", "0.54478043", "0.54331124", "0.54182094", "0.54153043", "0.54138815", "0.5412134", "0.5411998", "0.53948706", "0.5390647", "0.53888345", "0.5378781", "0.53661734", "0.53597385", "0.5349519", "0.53393763", "0.5336828", "0.5334934", "0.5320622", "0.53162664", "0.5310395", "0.53059846", "0.5305475", "0.5286773", "0.5283448", "0.52831876", "0.5278394", "0.5278394", "0.5257537", "0.5255797", "0.5244406", "0.5243393", "0.5238481", "0.5235198", "0.5205506" ]
0.6110068
8
Decodes a Base58Check encoded key.
def from_b58check(key): return HDKey.from_bytes(base58.b58decode_check(key))[0]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def base58_decode(v: bytes) -> bytes:\n try:\n prefix_len = next(\n len(encoding[2])\n for encoding in base58_encodings\n if len(v) == encoding[1] and v.startswith(encoding[0])\n )\n except StopIteration:\n raise ValueError('Invalid encoding, prefix or length mismatch.')\n\n return base58.b58decode_check(v)[prefix_len:]", "def decode(self, crypto):", "def _decode_key(self, key):\n return key if not key or isinstance(key, str) else key.decode()", "def decode(s):\n try:\n if not s:\n return b''\n\n # Convert the string to an integer\n n = 0\n for c in s:\n n *= 58\n if c not in b58_digits:\n raise InvalidBase58Error('Character %r is not a valid base58 character' % c)\n digit = b58_digits.index(c)\n n += digit\n\n # Convert the integer to bytes\n h = '%x' % n\n if len(h) % 2:\n h = '0' + h\n res = unhexlify(h.encode('utf8'))\n\n # Add padding back.\n pad = 0\n for c in s[:-1]:\n if c == b58_digits[0]: pad += 1\n else: break\n return hexlify(b'\\x00' * pad + res).decode('utf8')", "def decode_key(as_bytes: typing.List[int]) -> str:\n raise NotImplementedError()", "def decode_base58(v):\n prefix = b''\n while v.startswith(B58[0]):\n prefix += b'\\0' \n v = v[1:]\n if v:\n return prefix + bytes(changebase(map(B58.index,v),58,256))\n else:\n return prefix", "def base58_decode(s):\n if not s:\n return b''\n alphabet = '123456789ABCDEFGHJKLMNPQRSTUVWXYZabcdefghijkmnopqrstuvwxyz'\n # Convert the string to an integer\n n = 0\n for c in s:\n n *= 58\n if c not in alphabet:\n raise Exception('Character %r is not a valid base58 character' % c)\n digit = alphabet.index(c)\n n += digit\n\n # Convert the integer to bytes\n h = '%x' % n\n if len(h) % 2:\n h = '0' + h\n # res = \"\"\n res = bytearray.fromhex(h)\n\n # Add padding back.\n pad = 0\n for c in s[:-1]:\n if c == alphabet[0]: pad += 1\n else: break\n return b'\\x00' * pad + res", "def decodeBase58(s):\n if not s:\n return b''\n\n # Convert the string to an integer\n n = 0\n for c in s:\n n *= 58\n if c not in B58_DIGITS:\n raise InvalidBase58Error('Character %r is not a valid base58 character' % c)\n digit = B58_DIGITS.index(c)\n n += digit\n\n # Convert the integer to bytes\n h = '%x' % n\n if len(h) % 2:\n h = '0' + h\n res = unhexlify(h.encode('utf8'))\n\n # Add padding back.\n pad = 0\n for c in s[:-1]:\n if c == B58_DIGITS[0]:\n pad += 1\n else:\n break\n return b'\\x00' * pad + res", "def deserialize_key(key: bytes) -> str:\n return key.decode()", "def rc4_decode(data, key, decode=base64.b64decode, salt_length=16):\n if decode:\n data = decode(data)\n salt = data[:salt_length]\n return crypt(data[salt_length:], sha1(key + salt).digest())", "def parse_key(raw_key):\n raw_key_bytes = raw_key.encode('ascii')\n try:\n validate_cmek(raw_key)\n key_type = KeyType.CMEK\n sha256 = None\n except errors.Error:\n if len(raw_key) != 44:\n raise\n key_type = KeyType.CSEK\n sha256 = hash_util.get_base64_hash_digest_string(\n hashlib.sha256(base64.b64decode(raw_key_bytes)))\n return EncryptionKey(key=raw_key, sha256=sha256, type=key_type)", "def decode(key: str, enc: str) -> str:\n\n dec = []\n enc = base64.urlsafe_b64decode(enc).decode()\n for i in range(len(enc)):\n key_c = key[i % len(key)]\n dec_c = chr((256 + ord(enc[i]) - ord(key_c)) % 256)\n dec.append(dec_c)\n return \"\".join(dec)", "def decode_public_key(as_bytes: typing.List[int]) -> PublicKey:\n raise NotImplementedError()", "def decode_and_decrypt(encoded_data, key):\r\n return aes_decrypt(base64.urlsafe_b64decode(encoded_data), key)", "def parse_signature(data: bytes):\n return base58_encode(data, b'sig').decode()", "def decode_base58(smartAddress, length):\n n = 0\n for char in smartAddress:\n try:\n n = n * 58 + digits58.index(char)\n except:\n msg = u\"Character not part of SmartCashs's base58: '%s'\"\n raise ValueError(msg % (char,))\n\n return n.to_bytes(length, 'big')", "def b58decode(v, length):\n long_value = 0L\n for (i, c) in enumerate(v[::-1]):\n long_value += __b58chars.find(c) * (__b58base**i)\n \n result = ''\n while long_value >= 256:\n div, mod = divmod(long_value, 256)\n result = chr(mod) + result\n long_value = div\n result = chr(long_value) + result\n \n nPad = 0\n for c in v:\n if c == __b58chars[0]: nPad += 1\n else: break\n \n result = chr(0)*nPad + result\n if length is not None and len(result) != length:\n return None\n \n return result", "def b58decode(v, length):\n\tlong_value = 0L\n\tfor (i, c) in enumerate(v[::-1]):\n\t\tlong_value += __b58chars.find(c) * (__b58base**i)\n\tresult = ''\n\twhile long_value >= 256:\n\t\tdiv, mod = divmod(long_value, 256)\n\t\tresult = chr(mod) + result\n\t\tlong_value = div\n\tresult = chr(long_value) + result\n\tnPad = 0\n\tfor c in v:\n\t\tif c == __b58chars[0]: nPad += 1\n\t\telse: break\n\tresult = chr(0)*nPad + result\n\tif length is not None and len(result) != length:\n\t\treturn None\n\treturn result", "def b58decode(v, length):\n long_value = 0\n for (i, c) in enumerate(v[::-1]):\n long_value += b58_chars.find(c) * (b58_base ** i)\n\n result = ''\n while long_value >= 256:\n div, mod = divmod(long_value, 256)\n result = chr(mod) + result\n long_value = div\n result = chr(long_value) + result\n\n nPad = 0\n for c in v:\n if c == b58_chars[0]:\n nPad += 1\n else:\n break\n\n result = chr(0) * nPad + result\n if length is not None and len(result) != length:\n return None\n\n return result", "def b2a_base58check(data):\n\n return encoding.b2a_hashed_base58(data)", "def from_base58(cls, seed: str) -> 'PrivateKey':\n return cls(base58.b58decode(seed))", "def decipher_raw(s, key):\n assert struct.calcsize('I') == 4\n assert len(s) % 8 == 0, len(s)\n u = struct.unpack('%dI' % (len(s) / 4), s)\n e = [decrypt(u[i], u[i + 1], key) for i in range(len(u))[::2]]\n return b''.join([struct.pack('2I', ee, ef) for ee, ef in e])", "def test_decode(self):\n self.assertEqual(\n hex_to_b64(self.hex_string),\n self.expect_result\n )", "def b58decode(v, length):\r\n long_value = 0L\r\n for (i, c) in enumerate(v[::-1]):\r\n long_value += __b58chars.find(c) * (__b58base**i)\r\n\r\n result = ''\r\n while long_value >= 256:\r\n div, mod = divmod(long_value, 256)\r\n result = chr(mod) + result\r\n long_value = div\r\n result = chr(long_value) + result\r\n\r\n nPad = 0\r\n for c in v:\r\n if c == __b58chars[0]: nPad += 1\r\n else: break\r\n\r\n result = chr(0)*nPad + result\r\n if length is not None and len(result) != length:\r\n return None\r\n\r\n return result", "def forge_base58(value: str) -> bytes:\n return base58_decode(value.encode())", "def parse_public_key(data: bytes) -> str:\n key_prefix = {\n b'\\x00': b'edpk',\n b'\\x01': b'sppk',\n b'\\x02': b'p2pk'\n }\n return base58_encode(data[1:], key_prefix[data[:1]]).decode()", "def decode_key(key: str) -> Tuple[int, int]:\n try:\n mod, exp = key.split(\".\")\n except ValueError:\n raise ValueError(f\"`{key}` is not a valid key\")\n\n return (\n int.from_bytes(base64.urlsafe_b64decode(mod), config.BYTEORDER),\n int.from_bytes(base64.urlsafe_b64decode(exp), config.BYTEORDER, signed=True),\n )", "def decode(self, encoded):", "def decode_base58(bitcoin_address, length):\n n = 0\n for char in bitcoin_address:\n try:\n n = n * 58 + digits58.index(char)\n except:\n msg = u\"Character not part of Bitcoin's base58: '%s'\"\n raise IllegalCharacterError(msg % char)\n try:\n return n.to_bytes(length, 'big')\n except AttributeError:\n # Python version < 3.2\n return _long_to_bytes(n, length, 'big')", "def decrypt_key(data, key):\n data = MegaCrypto.base64_decode(data)\n return sum((MegaCrypto.str_to_a32(MegaCrypto.cbc_decrypt(data[_i:_i + 16], key))\n for _i in range(0, len(data), 16)), ())", "def Read(key):\n mac = json.loads(key)\n return HmacKey(mac['hmacKeyString'], mac['size'])", "def Read(key):\n mac = json.loads(key)\n return HmacKey(mac['hmacKeyString'], mac['size'])", "def decode_secret(secret, encoding=SecretEncoding.BASE32):\n return _decoding_map[encoding](secret)", "def decode(k, key_length):\n key = k[:key_length]\n val_length, ber_length = decode_ber(k[key_length:])\n value = k[key_length + ber_length : key_length + ber_length + val_length]\n return key, value", "def get_verifying_key(private_key):\n return private_key.get_verifying_key().to_pem().decode('ascii')", "def decrypt(self, input_u8):\n if self.__prev_key == self.__new_key:\n self.__randomize()\n key_map = {b:i for i, b in enumerate(self.cipher)}\n i = 0\n while i < len(input_u8):\n input_u8[i] = key_map[input_u8[i] ^ self.cipher[i%256]]\n i += 1\n return input_u8.decode(\"utf-8\")", "def parse_key(key: RSA.RsaKey) -> str:\n\n return binascii.hexlify(key.exportKey(\n format='DER')).decode('ascii')", "def test_decoder(self):\n from sosbeacon.utils import number_decode\n\n encoded = 'b6'\n number = number_decode(encoded)\n self.assertEqual(number, 123)", "def _decode_encrypted_part(self, value):\n\n return encoding_utils.base64_to_bytes(value)", "def base32_decode(encoded_bytes: bytes) -> str:\n\n # decode the bytes from base32\n # then, decode the bytes-like object to return as a string\n return base64.b32decode(encoded_bytes).decode(\"utf-8\")", "def xor_decode(data, key):\n if not data:\n return \"\"\n if not key:\n raise exceptions.DecryptError\n data = binascii.a2b_hex(data.encode(\"utf-8\")).decode(\"utf-8\")\n return ''.join(chr(ord(a) ^ ord(b)) for a, b in zip(data, key))", "def decode_base64(self, s):\n return self.transcode(struct.unpack('!L', base64.b64decode(s + '==', self.extra_chars))[0])", "def encode_key(self, key):\n return key.private_bytes(\n encoding=serialization.Encoding.PEM,\n format=serialization.PrivateFormat.TraditionalOpenSSL,\n encryption_algorithm=serialization.NoEncryption(),\n ).decode(encoding='UTF-8')", "def rawPubkey(self):\n # note the first byte determines what type of address\n # and the last four are checksums\n return a2b_base58(self.pubkey)[1:-4]", "def decrypt(self, b):\n decrypted = self.__aes.ecbDecrypt(b)\n return unpadPkcs7(decrypted, 16)", "def b2a_base58(s):\n v, prefix = to_long(256, lambda x: x, iterbytes(s))\n s = from_long(v, prefix, BASE58_BASE, lambda v: BASE58_ALPHABET[v])\n return s.decode(\"utf8\")", "def decode(self, data):\n return self.__cipher.decrypt(data)", "def _b64_decode(data):\n encoded = data.translate(B64_FROM_BCRYPT)\n padding = '=' * (4 - len(data) % 4) if len(data) % 4 else ''\n return base64.b64decode(encoded + padding)", "def decode(data): #@NoSelf", "def decode(encoded_key, encoded_string, size):\n\t\n\tdecoded_string = ''\n\ti = 0\n\tfor i in range(0, size):\n\t\tdecoded_string = decoded_string + chr(ord(encoded_string[i]) ^ ord(encoded_key[i % len(encoded_key)]))\n\t\n\treturn str(decoded_string)", "def decode_symbol(self, bits):\n return self.bits_to_symbol.get(bits)", "def from_base58(cls, address: str) -> 'PublicKey':\n return cls(base58.b58decode(address))", "def private_key_from_wif(wif: str) -> bytes:\n try:\n decoded_key: bytes = base58.b58decode_check(wif)\n except Exception:\n raise ValueError(\"Base58decode failure of wif\")\n\n if len(decoded_key) != 34:\n raise ValueError(f\"The decoded wif length should be \"\n f\"{len(WIF_PREFIX) + PRIVATE_KEY_LENGTH + len(WIF_SUFFIX)}, while the given wif \"\n f\"length is {len(decoded_key)}\")\n elif decoded_key[:1] != WIF_PREFIX:\n raise ValueError(f\"The decoded wif first byte should be {str(WIF_PREFIX)}\")\n elif decoded_key[-1:] != WIF_SUFFIX:\n raise ValueError(f\"The decoded wif last byte should be {str(WIF_SUFFIX)}\")\n\n private_key = decoded_key[1: 33]\n\n return private_key", "def decode(self, data: bytes) -> bytes:\n ...", "def Decrypt(self, input_bytes):\n ciph_bytes = input_bytes[keyczar.HEADER_SIZE:]\n decrypted = self.key.decrypt(ciph_bytes)\n return self.__Decode(decrypted)", "def address_to_pkb_hash(b58check_address: str) -> bytes:\n byte_address = b58decode(b58check_address)\n # validate the checksum\n assert byte_address[-4:] == sha256(sha256(byte_address[:-4]))[:4]\n # strip the version in front and the checksum at tail\n pkb_hash = byte_address[1:-4]\n return pkb_hash", "def decode(cls, b: bytes):\n assert isinstance(b, bytes)\n\n # the uncompressed version is straight forward\n if b[0] == 4:\n x = int.from_bytes(b[1:33], 'big')\n y = int.from_bytes(b[33:65], 'big')\n return Point(BITCOIN.gen.G.curve, x, y)\n\n # for compressed version uncompress the full public key Point\n # first recover the y-evenness and the full x\n assert b[0] in [2, 3]\n is_even = b[0] == 2\n x = int.from_bytes(b[1:], 'big')\n\n # solve y^2 = x^3 + 7 for y, but mod p\n p = BITCOIN.gen.G.curve.p\n y2 = (pow(x, 3, p) + 7) % p\n y = pow(y2, (p + 1) // 4, p)\n y = y if ((y % 2 == 0) == is_even) else p - y # flip if needed to make the evenness agree\n return cls(BITCOIN.gen.G.curve, x, y)", "def load_private_key_bytes(self, private_key):\n if not self.curve:\n raise NoCurveError(\"Curve must be set prior to key load.\")\n return self.load_private_key(\n SigningKey.from_string(private_key, curve=self.curve))", "def _decode(data: BencodedString) -> Union[bytes, dict, int, list]:\n if not data.bytes:\n raise ValueError(\"Cannot decode an empty bencoded string.\")\n\n if data.bytes[0] == START_DICT:\n return _decode_dict(data)\n\n if data.bytes[0] == START_LIST:\n return _decode_list(data)\n\n if data.bytes[0] == START_INTEGER:\n return _decode_int(data)\n\n if chr(data.bytes[0]).isdigit():\n return _decode_bytes(data)\n\n raise ValueError(\n \"Cannot decode data, expected the first byte to be one of \"\n f\"'d', 'i', 'l' or a digit, got {chr(data.bytes[0])!r} instead.\"\n )", "def decode(eVal):\n return pickle.loads(zlib.decompress(base64.b64decode(eVal)))", "def decode_base32(data: str, altchars='+/') -> bytes:\n data = re.sub(rf'[^a-zA-Z0-9{altchars}]+', '', data) # normalize\n missing_padding = len(data) % 8\n if missing_padding:\n data += '=' * (8 - missing_padding)\n return b32decode(data, altchars)", "def decipher2(s, key): # s = message\n return decipher_raw2(s, key).rstrip(bytes('\\x00'.encode('utf-8')))", "def forge_public_key(value) -> bytes:\n prefix = value[:4]\n res = base58.b58decode_check(value)[4:]\n\n if prefix == 'edpk':\n return b'\\x00' + res\n elif prefix == 'sppk':\n return b'\\x01' + res\n elif prefix == 'p2pk':\n return b'\\x02' + res\n\n raise ValueError(f'Unrecognized key type: #{prefix}')", "def decode_sig(sig):\n table = maketrans(\"-._\", \"+/=\")\n sig = str(sig).translate(table)\n try:\n return b64decode(sig)\n except TypeError:\n raise MalformedResponseError(\"Signature is not a valid base-64 \"\n \"encoded string\")", "def rawPrivkey(self):\n # note the first byte determines what type of address\n # and the last four are checksums\n return a2b_base58(self.privkey)[1:-4]", "def a2b_base58check(string):\n\n try:\n return encoding.a2b_hashed_base58(string)\n except encoding.EncodingError:\n raise Error('Invalid base58check string')", "def decode(encoded):\n if encoded is None:\n return None\n\n try:\n s = decode(APP.config['SECRET_KEY'], encoded)\n return json.loads(s)\n except Exception as err:\n LOGGER.error('Error decoding auth: %s' % str(err))\n raise err", "def raw(self) -> bytes:\n return bytes(self._verify_key)", "def rsa_private_key_pkcs8_to_pkcs1(pkcs8_key):\n decoded_values = decoder.decode(pkcs8_key, asn1Spec=PKCS8PrivateKey())\n\n try:\n decoded_key = decoded_values[0]\n except IndexError:\n raise ValueError(\"Invalid private key encoding\")\n\n return decoded_key[\"privateKey\"]", "def decode(self, data):\n\n # Tested:\n # types: z, T, a\n # nested_structure\n # repeated\n if not hasattr(data, 'read'):\n data = io.BytesIO(data)\n\n if self._kv_fmt:\n return dict(self._decode_wire(data))\n else:\n return tuple(self._decode_wire(data))", "def parse_chain_id(data: bytes):\n return base58_encode(data, b'Net').decode()", "def a2b_base58(s):\n v, prefix = to_long(BASE58_BASE, lambda c: BASE58_LOOKUP[c], s.encode(\"utf8\"))\n return from_long(v, prefix, 256, lambda x: x)", "def test_encode_decode(self):\n assert self._test == pybinn.loads(pybinn.dumps(self._test))", "def a2b_hashed_base58(s):\n data = a2b_base58(s)\n data, the_hash = data[:-4], data[-4:]\n if double_sha256(data)[:4] == the_hash:\n return data\n raise EncodingError(\"hashed base58 has bad checksum %s\" % s)", "def decrypt(encoded_data: Union[bytes, str]) -> Tuple[Tuple[Any], Dict[str, Any]]:\n if isinstance(encoded_data, str):\n encoded_data = encoded_data.encode('utf8')\n\n key = pad(settings.PTRACK_SECRET).encode('utf8')\n box = nacl.secret.SecretBox(key)\n\n encrypted = base64.urlsafe_b64decode(encoded_data)\n data = box.decrypt(encrypted)\n # json.loads expects a str, so we convert bytes to str\n data = data.decode('utf8')\n return json.loads(data)", "def base_decode(v: Union[bytes, str], length: Optional[int], base: int) -> Optional[bytes]:\n # assert_bytes(v)\n v = to_bytes(v, 'ascii')\n if base not in (58, 43):\n raise ValueError('not supported base: {}'.format(base))\n chars = __b58chars\n if base == 43:\n chars = __b43chars\n long_value = 0\n for (i, c) in enumerate(v[::-1]):\n digit = chars.find(bytes([c]))\n if digit == -1:\n raise ValueError('Forbidden character {} for base {}'.format(c, base))\n long_value += digit * (base**i)\n result = bytearray()\n while long_value >= 256:\n div, mod = divmod(long_value, 256)\n result.append(mod)\n long_value = div\n result.append(long_value)\n nPad = 0\n for c in v:\n if c == chars[0]:\n nPad += 1\n else:\n break\n result.extend(b'\\x00' * nPad)\n if length is not None and len(result) != length:\n return None\n result.reverse()\n return bytes(result)", "def bip38(priv, passphrase, quiet=False):\n prnt('\\nCalculating encrypted private key...\\n', quiet)\n addr = o_pub_to_addr(o_priv_to_pub(priv))\n#1 Compute the Bitcoin address (ASCII), and take the first four bytes of SHA256(SHA256()) of it.\n addrhash = hashlib.sha256(hashlib.sha256(addr).digest()).digest()[:4] # salt\n\n#2. Derive a key from the passphrase using scrypt\n# a. Parameters: passphrase is the passphrase itself encoded in UTF-8.\n# addresshash came from the earlier step, n=16384, r=8, p=8, length=64\n# (n, r, p are provisional and subject to consensus)\n# b. Let's split the resulting 64 bytes in half, and call them derivedhalf1 and derivedhalf2.\n # scrypt(password, salt, n, r, p, buflen):\n scryptedkey = scrypt(passphrase, addrhash, 16384, 8, 8, 64, quiet)\n half1 = scryptedkey[0:32]\n half2 = scryptedkey[32:64]\n\n#3 AES encryptedhalf1 = AES256Encrypt(bitcoinprivkey[0...15] xor derivedhalf1[0...15], derivedhalf2)\n priv256 = encode(priv, 256, 32)\n aes4b38 = Aes(half2) # set AES object key\n ehalf1 = aes4b38.enc(sxor(priv256[:16], half1[:16]))\n\n#4 AES encryptedhalf2 = AES256Encrypt(bitcoinprivkey[16...31] xor derivedhalf1[16...31], derivedhalf2)\n ehalf2 = aes4b38.enc(sxor(priv256[16:32], half1[16:32]))\n\n#5 Base58 ( 0x01 0x42 + flagbyte + salt + encryptedhalf1 + encryptedhalf2 )\n fbyte = chr(0b11100000) # 11 noec 1 compressedpub 00 future 0 ec only 00 future\n encrypted_privkey = ('\\x01\\x42' + fbyte + addrhash + ehalf1 + ehalf2)\n encrypted_check = hashlib.sha256(hashlib.sha256(encrypted_privkey).digest()).digest()[:4]\n return b58encode(encrypted_privkey + encrypted_check)", "def _decode_bytes(data: BencodedString) -> bytes:\n # Get byte string length\n delimiter_index = data.bytes.find(COLON)\n\n if delimiter_index > 0:\n length_prefix = data.get_prefix(delimiter_index)\n string_length = int(length_prefix.decode(\"ascii\"))\n data.del_prefix(delimiter_index + 1)\n else:\n raise ValueError(\n \"Cannot decode a byte string, it doesn't contain a delimiter. \"\n \"Most likely the bencoded string is incomplete or incorrect.\"\n )\n\n # Get byte string data\n if len(data.bytes) >= string_length:\n result_bytes = data.get_prefix(string_length)\n data.del_prefix(string_length)\n else:\n raise ValueError(\n f\"Cannot decode a byte string (prefix length \"\n f\"- {string_length}, real_length - {len(data.bytes)}. \"\n \"Most likely the bencoded string is incomplete or incorrect.\"\n )\n\n return result_bytes", "def decipher(s, key): # s = message\n return decipher_raw(s, key).rstrip(bytes('\\x00'.encode('utf-8')))", "def _decode_public_key_identifier(identifier):\n\n return JWTAuth._get_identifier_cypher().decrypt(base64.b64decode(identifier)).decode('utf-8')", "def decode_data ( data ) :\n cipher = get_cipher( data )\n index = 0\n firstpass = []\n datalen = len( data )\n while index < datalen :\n if index % 2 == 0 :\n firstpass.append( chr( ord( data[ index ] ) - cipher ) )\n else :\n firstpass.append( chr( ord( data[ index ] ) + cipher ) )\n index += 1\n\n firstpass[ 0 ] = data[ 0 ]\n firstpass[ -1 ] = data[ -1 ]\n firstpass[ -2 ] = data[ -2 ]\n decoded_data = ''.join( firstpass )\n return base64.b64decode( decoded_data )", "def text2PrivateKey(text:str):\n return RSA.importKey(b58decode(text))", "def parse_dict(self, data):\n self.public_key = VerifyingKey(data['public_key'])\n self.signature = base58.b58decode(data['signature']) if data['signature'] else None", "def to_b58check(self, testnet=False):\n b = self.testnet_bytes if testnet else bytes(self)\n return base58.b58encode_check(b)", "def load_received_public_key_bytes(self, public_key_str):\n return self.load_received_public_key(\n VerifyingKey.from_string(public_key_str, self.curve))", "def _decode_key(self, key):\n if hasattr(key, \"char\"):\n return str(key.char).lower()\n elif hasattr(key, \"name\"):\n return str(key.name).lower()", "def decode_b64(s):\n if isinstance(s, dict):\n ret = {}\n for k,v in s.items():\n ret[k] = decode_b64(v)\n return ret\n elif isinstance(s, list) or isinstance(s, tuple):\n ret = []\n for v in s:\n ret.append(decode_b64(v))\n return ret\n elif isinstance(s, str) and s.beginswith('\\0'):\n return standard_b64decode(s[1:])\n else:\n return s", "def decode(b):\n\n if b.startswith(\"0z\"):\n b = b[2:]\n\n l, i, v = len(b), 0, 0\n for x in b:\n v += _value(x) * (BASE ** (l - (i + 1)))\n i += 1\n\n return v", "def from_bytes(b):\n if len(b) < 78:\n raise ValueError(\"b must be at least 78 bytes long.\")\n\n version = int.from_bytes(b[:4], 'big')\n depth = b[4]\n parent_fingerprint = b[5:9]\n index = int.from_bytes(b[9:13], 'big')\n chain_code = b[13:45]\n key_bytes = b[45:78]\n\n rv = None\n if version == HDPrivateKey.MAINNET_VERSION or version == HDPrivateKey.TESTNET_VERSION:\n if key_bytes[0] != 0:\n raise ValueError(\"First byte of private key must be 0x00!\")\n\n private_key = int.from_bytes(key_bytes[1:], 'big')\n rv = HDPrivateKey(key=private_key,\n chain_code=chain_code,\n index=index,\n depth=depth,\n parent_fingerprint=parent_fingerprint)\n elif version == HDPublicKey.MAINNET_VERSION or version == HDPublicKey.TESTNET_VERSION:\n if key_bytes[0] != 0x02 and key_bytes[0] != 0x03:\n raise ValueError(\"First byte of public key must be 0x02 or 0x03!\")\n\n public_key = PublicKey.from_bytes(key_bytes)\n rv = HDPublicKey(x=public_key.point.x,\n y=public_key.point.y,\n chain_code=chain_code,\n index=index,\n depth=depth,\n parent_fingerprint=parent_fingerprint)\n else:\n raise ValueError(\"incorrect encoding.\")\n\n return (rv, b[78:])", "def decrypt(key, encoded):\n \n if isinstance(encoded, str):\n encoded = map(ord, encoded)\n key = _key_array(key)\n aes = mxit.aes.AES()\n \n parts = _split(encoded, 16)\n decoded = []\n for part in parts:\n decoded += aes.decrypt(part, key, aes.keySize[\"SIZE_128\"]) \n return _get_text(decoded)", "def b64_decode(value: bytes) -> bytes:\n pad = b\"=\" * (-len(value) % 4)\n return base64.urlsafe_b64decode(value + pad)", "def decode(self, s):", "def decode(self, s):", "def base64_pub_decode(self, base64_pub):\n base64_pub_bytes = self.base64_pub.encode('utf-8')\n pubkey_text = base64.b64decode(base64_pub_bytes)\n pubkey_vars = pubkey_text.split(\":\")\n y = int(pubkey_vars[0])\n g = int(pubkey_vars[1])\n p = int(pubkey_vars[2])\n q = int(pubkey_vars[3])\n return DSA.construct((y,g,p,q))", "def decode_key(key):\n if '-tags=' in key:\n key_name, tags_json = key.split('-tags=')\n return key_name, json.loads(tags_json)\n return key, None", "def from_hex(x):\n return base64.b16decode(x, True)", "def decipher_raw2(s, key):\n assert struct.calcsize('I') == 4\n assert len(s) % 8 == 0, len(s)\n u = struct.unpack('%dI' % (len(s) / 4), s)\n stringa = str(b'\\xff\\xd8\\xff').replace('\\'', '')\n for i in range(len(u))[::2]:\n e = [decrypt2(u[i], u[i + 1], key)]\n i = b''.join([struct.pack('2I', ee, ef) for ee, ef in e])\n\n prova = str(i).replace('\\'', '')\n\n #lel = prova.find(stringa)\n\n if prova.find(stringa) != -1:\n print(\"detect format file: JPG\")\n return 0\n else:\n return 1", "def extract_aes_key(self) -> bytes:\r\n log(\"extract_aes_key start\")\r\n try:\r\n key_base64_raw: bytes = self.file_lines[0]\r\n except IndexError:\r\n # shouldn't be reachable due to test for emptiness prior in code, keep around anyway.\r\n log(\"extract_aes_key fail 1\")\r\n raise DecryptionKeyInvalidError(\"There was no decryption key.\")\r\n \r\n # Test that every byte in the byte-string of the raw key is a valid url-safe base64\r\n # character this also cuts down some junk files.\r\n for c in key_base64_raw:\r\n if c not in URLSAFE_BASE64_CHARACTERS:\r\n log(f\"extract_aes_key fail 2: '{key_base64_raw.decode()}' character: '{chr(c)}'\")\r\n raise DecryptionKeyInvalidError(f\"Key not base64 encoded: {str(key_base64_raw)}\")\r\n \r\n # handle the various cases that can occur when extracting from base64.\r\n try:\r\n decoded_key: bytes = decode_base64(key_base64_raw)\r\n except (TypeError, PaddingException, Base64LengthException) as decode_error:\r\n log(\"extract_aes_key fail 3\")\r\n raise DecryptionKeyInvalidError(f\"Invalid decryption key: {decode_error}\")\r\n \r\n base64_key = self.rsa_decrypt(decoded_key)\r\n \r\n try:\r\n decrypted_key: bytes = decode_base64(base64_key)\r\n if not decrypted_key:\r\n log(\"extract_aes_key fail 4\")\r\n raise TypeError(f\"decoded key was '{decrypted_key}'\")\r\n except (TypeError, IndexError, PaddingException, Base64LengthException) as decr_error:\r\n log(\"extract_aes_key fail 5\")\r\n raise DecryptionKeyInvalidError(f\"Invalid decryption key: {decr_error}\")\r\n \r\n # If the decoded bits of the key is not exactly 128 bits (16 bytes) that probably means that\r\n # the RSA encryption failed - this occurs when the first byte of the encrypted blob is all\r\n # zeros. Apps require an update to solve this (in a future rewrite we should use a correct\r\n # padding algorithm).\r\n if len(decrypted_key) != 16:\r\n log(\"extract_aes_key 6\")\r\n raise DecryptionKeyInvalidError(f\"Decryption key not 128 bits: {decrypted_key}\")\r\n \r\n if self.participant.os_type == IOS_API:\r\n self.populate_ios_decryption_key(base64_key)\r\n \r\n log(\"extract_aes_key success\")\r\n return decrypted_key", "def decode_with_base32(self, upper_case_secret: str) -> bytes:\n\n return base64.b32decode(upper_case_secret)", "def test_decode(self):\n pass # TODO(tlarsen)" ]
[ "0.65961087", "0.6403686", "0.6391344", "0.63413113", "0.6321229", "0.6299899", "0.61630493", "0.6157103", "0.61495435", "0.60469747", "0.600818", "0.59588367", "0.5914103", "0.5791583", "0.57892734", "0.5774782", "0.5729094", "0.5713923", "0.5695626", "0.5672472", "0.5671971", "0.56648386", "0.56602347", "0.5640567", "0.56206065", "0.5570051", "0.55683273", "0.5536966", "0.55233926", "0.5521278", "0.5519576", "0.551094", "0.5494396", "0.54811954", "0.54727185", "0.5472688", "0.5472175", "0.54682016", "0.5425398", "0.54231125", "0.5420805", "0.541795", "0.5406817", "0.539157", "0.5389481", "0.5377367", "0.5354878", "0.5349486", "0.53479254", "0.53440267", "0.53404975", "0.53395796", "0.5321315", "0.5317715", "0.5314885", "0.5302957", "0.530149", "0.5298291", "0.5295711", "0.5292985", "0.5288348", "0.5272262", "0.5266521", "0.52472997", "0.524198", "0.5222525", "0.5218216", "0.5206912", "0.51959294", "0.5187832", "0.516887", "0.5153519", "0.5153326", "0.51522285", "0.51412636", "0.51402944", "0.51327795", "0.51273376", "0.51238143", "0.5106382", "0.50965154", "0.50960433", "0.5089916", "0.507916", "0.50738925", "0.5073865", "0.50673753", "0.5061772", "0.5057208", "0.50551385", "0.5047832", "0.50463504", "0.50463504", "0.50431967", "0.5040826", "0.5033425", "0.5033293", "0.5024688", "0.50068206", "0.4996736" ]
0.74329513
0
Generates either a HDPrivateKey or HDPublicKey from the underlying bytes.
def from_bytes(b): if len(b) < 78: raise ValueError("b must be at least 78 bytes long.") version = int.from_bytes(b[:4], 'big') depth = b[4] parent_fingerprint = b[5:9] index = int.from_bytes(b[9:13], 'big') chain_code = b[13:45] key_bytes = b[45:78] rv = None if version == HDPrivateKey.MAINNET_VERSION or version == HDPrivateKey.TESTNET_VERSION: if key_bytes[0] != 0: raise ValueError("First byte of private key must be 0x00!") private_key = int.from_bytes(key_bytes[1:], 'big') rv = HDPrivateKey(key=private_key, chain_code=chain_code, index=index, depth=depth, parent_fingerprint=parent_fingerprint) elif version == HDPublicKey.MAINNET_VERSION or version == HDPublicKey.TESTNET_VERSION: if key_bytes[0] != 0x02 and key_bytes[0] != 0x03: raise ValueError("First byte of public key must be 0x02 or 0x03!") public_key = PublicKey.from_bytes(key_bytes) rv = HDPublicKey(x=public_key.point.x, y=public_key.point.y, chain_code=chain_code, index=index, depth=depth, parent_fingerprint=parent_fingerprint) else: raise ValueError("incorrect encoding.") return (rv, b[78:])
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def generate_ecdh_key_pair() -> tuple[X25519PrivateKey, bytes]:\n private_key = X25519PrivateKey.generate()\n public_key_raw = private_key.public_key().public_bytes(\n serialization.Encoding.Raw, serialization.PublicFormat.Raw\n )\n return private_key, public_key_raw", "def mk_keyobj_from_private_key_der(self, derdat):\n self.private_key_obj = serialization.load_der_private_key(derdat, password=None, backend=default_backend())\n self._get_naive_private_key_bytes()\n self.public_key_obj = self.private_key_obj.public_key()\n self._get_naive_public_key_bytes()", "def from_public_parts(self, x: bytes, y: bytes):\n return asymmetric.ec.EllipticCurvePublicNumbers(\n int.from_bytes(x, 'big'),\n int.from_bytes(y, 'big'),\n asymmetric.ec.SECP256R1()\n ).public_key()", "def gen_private_key():\n return DH.b2i(Random.new().read(DH_SIZE))", "def decode_public_key(as_bytes: typing.List[int]) -> PublicKey:\n raise NotImplementedError()", "def generate_ecc_public_key(private_key: EllipticCurvePrivateKeyWithSerialization) -> EllipticCurvePublicKey:\n return private_key.public_key()", "def derive_public_key(private_key):\r\n\r\n Q = int.from_bytes(private_key, byteorder='big') * BIP32_CURVE.generator\r\n xstr = Q.x().to_bytes(32, byteorder='big')\r\n parity = Q.y() & 1\r\n return (2 + parity).to_bytes(1, byteorder='big') + xstr", "def load_pub_key_bytes(bs: bytes) -> rsa.RSAPublicKey:\n k = serialization.load_pem_public_key(bs)\n assert isinstance(k, rsa.RSAPublicKey)\n return k", "def decode_credential_public_key(\n key: bytes,\n) -> Union[DecodedOKPPublicKey, DecodedEC2PublicKey, DecodedRSAPublicKey]:\n # Occassionally we might be given a public key in an \"uncompressed\" format,\n # typically from older U2F security keys. As per the FIDO spec this is indicated by\n # a leading 0x04 \"uncompressed point compression method\" format byte. In that case\n # we need to fill in some blanks to turn it into a full EC2 key for signature\n # verification\n #\n # See https://fidoalliance.org/specs/fido-v2.0-id-20180227/fido-registry-v2.0-id-20180227.html#public-key-representation-formats\n if key[0] == 0x04:\n return DecodedEC2PublicKey(\n kty=COSEKTY.EC2,\n alg=COSEAlgorithmIdentifier.ECDSA_SHA_256,\n crv=COSECRV.P256,\n x=key[1:33],\n y=key[33:65],\n )\n\n decoded_key: dict = decoder.loads(key)\n\n kty = decoded_key[COSEKey.KTY]\n alg = decoded_key[COSEKey.ALG]\n\n if not kty:\n raise InvalidPublicKeyStructure(\"Credential public key missing kty\")\n if not alg:\n raise InvalidPublicKeyStructure(\"Credential public key missing alg\")\n\n if kty == COSEKTY.OKP:\n crv = decoded_key[COSEKey.CRV]\n x = decoded_key[COSEKey.X]\n\n if not crv:\n raise InvalidPublicKeyStructure(\"OKP credential public key missing crv\")\n if not x:\n raise InvalidPublicKeyStructure(\"OKP credential public key missing x\")\n\n return DecodedOKPPublicKey(\n kty=kty,\n alg=alg,\n crv=crv,\n x=x,\n )\n elif kty == COSEKTY.EC2:\n crv = decoded_key[COSEKey.CRV]\n x = decoded_key[COSEKey.X]\n y = decoded_key[COSEKey.Y]\n\n if not crv:\n raise InvalidPublicKeyStructure(\"EC2 credential public key missing crv\")\n if not x:\n raise InvalidPublicKeyStructure(\"EC2 credential public key missing x\")\n if not y:\n raise InvalidPublicKeyStructure(\"EC2 credential public key missing y\")\n\n return DecodedEC2PublicKey(\n kty=kty,\n alg=alg,\n crv=crv,\n x=x,\n y=y,\n )\n elif kty == COSEKTY.RSA:\n n = decoded_key[COSEKey.N]\n e = decoded_key[COSEKey.E]\n\n if not n:\n raise InvalidPublicKeyStructure(\"RSA credential public key missing n\")\n if not e:\n raise InvalidPublicKeyStructure(\"RSA credential public key missing e\")\n\n return DecodedRSAPublicKey(\n kty=kty,\n alg=alg,\n n=n,\n e=e,\n )\n\n raise UnsupportedPublicKeyType(f'Unsupported credential public key type \"{kty}\"')", "def generate_key(seed):\n private_key = sha256(seed)\n public_key = privtopub(private_key)\n return {\"private\": private_key, \"public\": public_key}", "def _get_pubkey_from_der_private_key(filedata: bytes, backend: Any) -> Tuple[Any, None]:\n try:\n privkey = serialization.load_der_private_key(filedata, None, backend=backend)\n return privkey.public_key(), None\n except Exception:\n return None, None", "def mk_keyobj_from_private_key_pem(self, pemdat_string):\n if isinstance(pemdat_string, str):\n pemdat_string = pemdat_string.encode()\n self.private_key_obj = serialization.load_pem_private_key(pemdat_string, password=None, backend=default_backend())\n self._get_naive_private_key_bytes()\n self.public_key_obj = self.private_key_obj.public_key()\n self._get_naive_public_key_bytes()", "def dh_get_key():\n G = EcGroup()\n priv_dec = G.order().random()\n pub_enc = priv_dec * G.generator()\n return (G, priv_dec, pub_enc)", "def createKeyPair(type, bits):\n pkey = crypto.PKey()\n pkey.generate_key(type, bits)\n return pkey", "def generate_keys() -> tuple:\n private_key = ecdsa.SigningKey.generate(curve=curve)\n public_key = private_key.get_verifying_key()\n\n private_key = encode_private_key(private_key)\n public_key = encode_public_key(public_key)\n\n return public_key, private_key", "def parse_public_key(data: bytes) -> str:\n key_prefix = {\n b'\\x00': b'edpk',\n b'\\x01': b'sppk',\n b'\\x02': b'p2pk'\n }\n return base58_encode(data[1:], key_prefix[data[:1]]).decode()", "def mk_keyobj_from_private_key(self, privkey):\n bn = BACKEND_KP.private_key_obj._backend._ffi.NULL\n bn_ptr = BACKEND_KP.private_key_obj._backend._lib.BN_bin2bn(privkey, len(privkey), bn)\n secret_val = BACKEND_KP.private_key_obj._backend._bn_to_int(bn_ptr)\n\n if self.curvetype == KeyType.ECDSA_P256v1:\n self.private_key_obj = ec.derive_private_key(secret_val, ec.SECP256R1(), default_backend())\n elif self.curvetype == KeyType.ECDSA_SECP256k1:\n self.private_key_obj = ec.derive_private_key(secret_val, ec.SECP256K1(), default_backend())\n self._get_naive_private_key_bytes()\n self.public_key_obj = self.private_key_obj.public_key()\n self._get_naive_public_key_bytes()", "def get_key_object(self):\n key_type, data = self.key_data()\n data = base64.b64decode(data)\n\n if key_type == \"ssh-rsa\":\n key = rsakey.RSAKey(data=data)\n elif key_type == \"ssh-dss\":\n key = dsskey.DSSKey(data=data)\n else:\n raise Exception(\"Invalid key type\")\n\n return key", "def get_key_pair() -> typing.Tuple[bytes, bytes]: \n return _get_key_pair_from_sk(ecdsa.SigningKey.generate(curve=CURVE))", "def unwrap(self):\n\n if self.algorithm == 'rsa':\n return self.asn1['private_key'].parsed\n\n if self.algorithm == 'dsa':\n params = self.asn1['private_key_algorithm']['parameters']\n return DSAPrivateKey({\n 'version': 0,\n 'p': params['p'],\n 'q': params['q'],\n 'g': params['g'],\n 'public_key': self.public_key.unwrap(),\n 'private_key': self.asn1['private_key'].parsed,\n })\n\n if self.algorithm == 'ec':\n output = self.asn1['private_key'].parsed\n output['parameters'] = self.asn1['private_key_algorithm']['parameters']\n output['public_key'] = self.public_key.unwrap()\n return output", "def forge_public_key(value) -> bytes:\n prefix = value[:4]\n res = base58.b58decode_check(value)[4:]\n\n if prefix == 'edpk':\n return b'\\x00' + res\n elif prefix == 'sppk':\n return b'\\x01' + res\n elif prefix == 'p2pk':\n return b'\\x02' + res\n\n raise ValueError(f'Unrecognized key type: #{prefix}')", "def solve(key_data: bytes) -> PublicKey:\n return { # type: ignore\n Encoding.PEM: load_pem_public_key,\n Encoding.DER: load_der_public_key\n }[real_encoding](key_data, default_backend())", "def _get_pubkey_from_der_public_key(filedata: bytes, backend: Any) -> Tuple[Any, None]:\n try:\n return serialization.load_der_public_key(filedata, backend=backend), None\n except Exception:\n return None, None", "def parse_public(data):\n\n if not isinstance(data, byte_cls):\n raise TypeError(pretty_message(\n '''\n data must be a byte string, not %s\n ''',\n type_name(data)\n ))\n\n key_type = None\n\n # Appears to be PEM formatted\n if re.match(b'\\\\s*-----', data) is not None:\n key_type, algo, data = _unarmor_pem(data)\n\n if key_type == 'private key':\n raise ValueError(pretty_message(\n '''\n The data specified does not appear to be a public key or\n certificate, but rather a private key\n '''\n ))\n\n # When a public key returning from _unarmor_pem has a known algorithm\n # of RSA, that means the DER structure is of the type RSAPublicKey, so\n # we need to wrap it in the PublicKeyInfo structure.\n if algo == 'rsa':\n return PublicKeyInfo.wrap(data, 'rsa')\n\n if key_type is None or key_type == 'public key':\n try:\n pki = PublicKeyInfo.load(data)\n # Call .native to fully parse since asn1crypto is lazy\n pki.native\n return pki\n except (ValueError):\n pass # Data was not PublicKeyInfo\n\n try:\n rpk = RSAPublicKey.load(data)\n # Call .native to fully parse since asn1crypto is lazy\n rpk.native\n return PublicKeyInfo.wrap(rpk, 'rsa')\n except (ValueError):\n pass # Data was not an RSAPublicKey\n\n if key_type is None or key_type == 'certificate':\n try:\n parsed_cert = Certificate.load(data)\n key_info = parsed_cert['tbs_certificate']['subject_public_key_info']\n return key_info\n except (ValueError):\n pass # Data was not a cert\n\n raise ValueError('The data specified does not appear to be a known public key or certificate format')", "def generate_rsa_public_key(private_key: RSAPrivateKeyWithSerialization) -> RSAPublicKey:\n return private_key.public_key()", "def solve(key_data: bytes) -> PrivateKey:\n return { # type: ignore\n Encoding.PEM: load_pem_private_key,\n Encoding.DER: load_der_private_key\n }[real_encoding](key_data, password, default_backend())", "def generate(self):\n if self.curvetype == KeyType.ECDSA_P256v1:\n self.private_key_obj = ec.generate_private_key(ec.SECP256R1(), default_backend())\n elif self.curvetype == KeyType.ECDSA_SECP256k1:\n self.private_key_obj = ec.generate_private_key(ec.SECP256K1(), default_backend())\n self.public_key_obj = self.private_key_obj.public_key()\n self._get_naive_private_key_bytes()\n self._get_naive_public_key_bytes()", "def derive_keypair(seed: str, validator: bool = False) -> Tuple[str, str]:\n decoded_seed, algorithm = addresscodec.decode_seed(seed)\n module = _ALGORITHM_TO_MODULE_MAP[algorithm]\n public_key, private_key = module.derive_keypair(decoded_seed, validator)\n signature = module.sign(_VERIFICATION_MESSAGE, private_key)\n if not module.is_valid_message(_VERIFICATION_MESSAGE, signature, public_key):\n raise XRPLKeypairsException(\n \"Derived keypair did not generate verifiable signature\",\n )\n return public_key, private_key", "def generate_private_key():\n\treturn binascii.hexlify(os.urandom(32)).decode('utf-8').upper()", "def private_key_to_public_key(private_key):\n\tpk = PrivateKey().fromString(bytes.fromhex(private_key))\n\treturn '04' + pk.publicKey().toString().hex().upper()", "def _unarmor_pem(data, password=None):\n\n object_type, headers, der_bytes = unarmor(data)\n\n type_regex = '^((DSA|EC|RSA) PRIVATE KEY|ENCRYPTED PRIVATE KEY|PRIVATE KEY|PUBLIC KEY|RSA PUBLIC KEY|CERTIFICATE)'\n armor_type = re.match(type_regex, object_type)\n if not armor_type:\n raise ValueError(pretty_message(\n '''\n data does not seem to contain a PEM-encoded certificate, private\n key or public key\n '''\n ))\n\n pem_header = armor_type.group(1)\n\n data = data.strip()\n\n # RSA private keys are encrypted after being DER-encoded, but before base64\n # encoding, so they need to be handled specially\n if pem_header in set(['RSA PRIVATE KEY', 'DSA PRIVATE KEY', 'EC PRIVATE KEY']):\n algo = armor_type.group(2).lower()\n return ('private key', algo, _unarmor_pem_openssl_private(headers, der_bytes, password))\n\n key_type = pem_header.lower()\n algo = None\n if key_type == 'encrypted private key':\n key_type = 'private key'\n elif key_type == 'rsa public key':\n key_type = 'public key'\n algo = 'rsa'\n\n return (key_type, algo, der_bytes)", "def gen_public_key(g, private, p):\n return pow(g, private, p)", "def create_keypair(self):\n # NOTE: currently we rely on zmq for convenience, but we may use libnacl directly\n # if we want to isolate this module from zmq dependency.\n public_key, private_key = zmq.curve_keypair()\n return public_key, private_key", "def gen_key_pair():\n sk = gen_secret_key(BITCOIN.gen.n)\n pk = PublicKey.from_sk(sk)\n return sk, pk", "def get_private_key_in_der(self):\n serialized_private = self.private_key_obj.private_bytes(\n encoding=serialization.Encoding.DER,\n format=serialization.PrivateFormat.TraditionalOpenSSL,\n encryption_algorithm=serialization.NoEncryption()\n )\n return serialized_private", "def _get_pubkey_from_pem_private_key(filedata: bytes, backend: Any) -> Tuple[Any, None]:\n try:\n privkey = serialization.load_pem_private_key(filedata, None, backend=backend)\n return privkey.public_key(), None\n except Exception:\n return None, None", "def base64_pub_decode(self, base64_pub):\n base64_pub_bytes = self.base64_pub.encode('utf-8')\n pubkey_text = base64.b64decode(base64_pub_bytes)\n pubkey_vars = pubkey_text.split(\":\")\n y = int(pubkey_vars[0])\n g = int(pubkey_vars[1])\n p = int(pubkey_vars[2])\n q = int(pubkey_vars[3])\n return DSA.construct((y,g,p,q))", "def generate_private_key(self):\n if not self.curve:\n raise NoCurveError(\"Curve must be set prior to key generation.\")\n return self.load_private_key(SigningKey.generate(curve=self.curve))", "def import_public_key(self, hex_bytes: str) -> str:\n return self.context.post(\n \"/dsum/public_key\", {\"key\": hex_bytes}, None, \"DSum: failed importing a Curve 25519 public key\")['uid']", "def gen_public_key(n, e):\n\n # Assign key parameters\n key_params = (n, e)\n # Construct private key\n key = RSA.construct(key_params)\n\n return key.exportKey()", "def get_pubkey(filedata: bytes) -> Tuple[Optional[SupportedKeyTypes], Optional[int]]:\n default_be = backends.default_backend()\n for func in [\n _get_pubkey_from_der_x509_certificate,\n _get_pubkey_from_pem_x509_certificate,\n _get_pubkey_from_der_public_key,\n _get_pubkey_from_pem_public_key,\n _get_pubkey_from_der_private_key,\n _get_pubkey_from_pem_private_key,\n ]:\n pubkey, keyidv2 = func(filedata, default_be)\n if pubkey:\n if not isinstance(pubkey, (RSAPublicKey, EllipticCurvePublicKey)):\n raise ValueError(f\"Unsupported key type {type(pubkey).__name__}\")\n return pubkey, keyidv2\n\n return None, None", "def get_shared_key(public, private, p):\n s = pow(public, private, p)\n s_hex = hex(s)[2:]\n # Make the length of s_hex a multiple of 2\n if len(s_hex) % 2 != 0:\n s_hex = '0' + s_hex\n # Convert hex to bytes\n s_bytes = binascii.unhexlify(s_hex)\n # Hash and return the hex result\n return sha256(s_bytes).digest()", "def generate_rsa_server_keys() -> Tuple[bytes, bytes]:\n from cryptography.hazmat.primitives import serialization as crypto_serialization\n from cryptography.hazmat.primitives.asymmetric import rsa\n from cryptography.hazmat.backends import default_backend as crypto_default_backend\n\n # Generate the key\n key = rsa.generate_private_key(\n backend=crypto_default_backend(),\n public_exponent=65537,\n key_size=2048\n )\n\n # Get the private key in the standard PEM/PKCS8 format for SSH private keys.\n private_key = key.private_bytes(\n crypto_serialization.Encoding.PEM,\n crypto_serialization.PrivateFormat.OpenSSH,\n crypto_serialization.NoEncryption())\n\n # Get the public key in the standard OpenSSH format.\n public_key = key.public_key().public_bytes(\n crypto_serialization.Encoding.OpenSSH,\n crypto_serialization.PublicFormat.OpenSSH\n )\n\n # Write the two keys.\n with open('host_rsa', 'wb') as f:\n f.write(private_key)\n\n with open('host_rsa.pub', 'wb') as f:\n f.write(public_key)\n\n # Return them.\n return private_key, public_key", "def generate_keypair(bits):\n p = generate_prime(bits // 2)\n #print(p)\n q = generate_prime(bits // 2)\n #print(q)\n n = p * q\n return PrivateKey(p, q, n), PublicKey(n)", "def generateKeys(bits=256):\n #print \"generating first prime number\"\n p = generatePrime(bits/2)\n #print \"generating second prime number\"\n q = generatePrime(bits/2)\n \n assert p != q\n #print p, \"\\n\", q\n assert gcd(p*q, (p-1)*(q-1)) == 1\n \n priv = PrivateKey(p, q)\n pub = PublicKey(p, q)\n \n priv.saveToFile()\n pub.saveToFile()\n \n return priv, pub", "def password_to_key(password: str):\r\n curve = ec.SECP256R1() # Elliptic curve\r\n digest = hashes.Hash(hashes.SHA256())\r\n digest.update(password.encode())\r\n password_int = int.from_bytes(digest.finalize(), \"big\")\r\n return ec.derive_private_key(password_int, curve)", "def load_key(fn, psw=None):\n if not fn:\n die(\"Need private key\")\n if psw:\n psw = as_bytes(psw)\n data = load_gpg_file(fn)\n key = load_pem_private_key(data, password=psw, backend=get_backend())\n return key", "def get_public_key_in_der(self):\n serialized_public = self.public_key_obj.public_bytes(\n encoding=serialization.Encoding.DER,\n format=serialization.PublicFormat.SubjectPublicKeyInfo\n )\n return serialized_public", "def load_private_key_bytes(self, private_key):\n if not self.curve:\n raise NoCurveError(\"Curve must be set prior to key load.\")\n return self.load_private_key(\n SigningKey.from_string(private_key, curve=self.curve))", "def publickey_unsafe(sk: bytes) -> bytes:\n h = H(sk)\n a = decodecoord(h)\n A = scalarmult_B(a)\n return encodepoint(A)", "def key_for_signature(self, data, sig):\n verification = self.verify(data, sig)\n return PublicKey.objects.filter(\n fingerprint=verification.fingerprint,\n profile__verified=True,\n ).first()", "def get_pub_key(priv_key: rsa.RSAPrivateKey) -> rsa.RSAPublicKey:\n return priv_key.public_key()", "def from_b58check(key):\n return HDKey.from_bytes(base58.b58decode_check(key))[0]", "def generateIdentityKeyPair():\n keyPair = Curve.generateKeyPair()\n publicKey = IdentityKey(keyPair.getPublicKey())\n serialized = '0a21056e8936e8367f768a7bba008ade7cf58407bdc7a6aae293e2c' \\\n 'b7c06668dcd7d5e12205011524f0c15467100dd603e0d6020f4d293' \\\n 'edfbcd82129b14a88791ac81365c'\n serialized = binascii.unhexlify(serialized.encode())\n identityKeyPair = IdentityKeyPair(publicKey, keyPair.getPrivateKey())\n return identityKeyPair\n # return IdentityKeyPair(serialized=serialized)", "def load_received_public_key_bytes(self, public_key_str):\n return self.load_received_public_key(\n VerifyingKey.from_string(public_key_str, self.curve))", "def get_public_key():\n\n ssh_conf_path = os.path.expanduser('~/.ssh')\n\n dsa_public_key_path = os.path.join(ssh_conf_path, 'id_dsa.pub')\n dsa_private_key_path = os.path.join(ssh_conf_path, 'id_dsa')\n\n rsa_public_key_path = os.path.join(ssh_conf_path, 'id_rsa.pub')\n rsa_private_key_path = os.path.join(ssh_conf_path, 'id_rsa')\n\n has_dsa_keypair = os.path.isfile(dsa_public_key_path) and \\\n os.path.isfile(dsa_private_key_path)\n has_rsa_keypair = os.path.isfile(rsa_public_key_path) and \\\n os.path.isfile(rsa_private_key_path)\n\n if has_dsa_keypair:\n print 'DSA keypair found, using it'\n public_key_path = dsa_public_key_path\n\n elif has_rsa_keypair:\n print 'RSA keypair found, using it'\n public_key_path = rsa_public_key_path\n\n else:\n print 'Neither RSA nor DSA keypair found, creating DSA ssh key pair'\n system('ssh-keygen -t dsa -q -N \"\" -f %s' % dsa_private_key_path)\n public_key_path = dsa_public_key_path\n\n public_key = open(public_key_path, 'r')\n public_key_str = public_key.read()\n public_key.close()\n\n return public_key_str", "def Generate(size=keyinfo.DSA_PRIV.default_size):\n key = DSA.generate(size, util.RandBytes)\n params = { 'x': util.PadBytes(util.BigIntToBytes(key.x), 1) }\n pubkey = key.publickey()\n pub_params = { 'g': util.PadBytes(util.BigIntToBytes(pubkey.g), 1),\n 'p': util.PadBytes(util.BigIntToBytes(pubkey.p), 1),\n 'q': util.PadBytes(util.BigIntToBytes(pubkey.q), 1),\n 'y': util.PadBytes(util.BigIntToBytes(pubkey.y), 1)\n }\n pub = DsaPublicKey(pub_params, pubkey, size)\n return DsaPrivateKey(params, pub, key, size)", "def alt_stubbed_receiver() -> PublicKey:\n return PublicKey(\"J3dxNj7nDRRqRRXuEMynDG57DkZK4jYRuv3Garmb1i98\")", "def from_base58(cls, seed: str) -> 'PrivateKey':\n return cls(base58.b58decode(seed))", "def ecdsa_key_gen():\n G = EcGroup()\n priv_sign = G.order().random()\n pub_verify = priv_sign * G.generator()\n return (G, priv_sign, pub_verify)", "def _create_rsa_key_pair(self, length, public_exponent=65537):\n self.logger.info(\n \"Generating an RSA key pair with length: {0}, and \"\n \"public_exponent: {1}\".format(\n length, public_exponent\n )\n )\n try:\n private_key = rsa.generate_private_key(\n public_exponent=public_exponent,\n key_size=length,\n backend=default_backend())\n public_key = private_key.public_key()\n\n private_bytes = private_key.private_bytes(\n serialization.Encoding.DER,\n serialization.PrivateFormat.PKCS8,\n serialization.NoEncryption())\n public_bytes = public_key.public_bytes(\n serialization.Encoding.DER,\n serialization.PublicFormat.PKCS1)\n except Exception as e:\n self.logger.exception(e)\n raise exceptions.CryptographicFailure(\n \"An error occurred while generating the RSA key pair. \"\n \"See the server log for more information.\"\n )\n\n public_key = {\n 'value': public_bytes,\n 'format': enums.KeyFormatType.PKCS_1,\n 'public_exponent': public_exponent\n }\n private_key = {\n 'value': private_bytes,\n 'format': enums.KeyFormatType.PKCS_8,\n 'public_exponent': public_exponent\n }\n\n return public_key, private_key", "def _get_pubkey_from_der_x509_certificate(filedata: bytes, backend: Any) -> Tuple[Any, Optional[int]]:\n try:\n cert = x509.load_der_x509_certificate(filedata, backend=backend)\n return cert.public_key(), _get_keyidv2_from_cert(cert)\n except Exception:\n return None, None", "def parse_private(data, password=None):\n\n if not isinstance(data, byte_cls):\n raise TypeError(pretty_message(\n '''\n data must be a byte string, not %s\n ''',\n type_name(data)\n ))\n\n if password is not None:\n if not isinstance(password, byte_cls):\n raise TypeError(pretty_message(\n '''\n password must be a byte string, not %s\n ''',\n type_name(password)\n ))\n else:\n password = b''\n\n # Appears to be PEM formatted\n if re.match(b'\\\\s*-----', data) is not None:\n key_type, _, data = _unarmor_pem(data, password)\n\n if key_type == 'public key':\n raise ValueError(pretty_message(\n '''\n The data specified does not appear to be a private key, but\n rather a public key\n '''\n ))\n\n if key_type == 'certificate':\n raise ValueError(pretty_message(\n '''\n The data specified does not appear to be a private key, but\n rather a certificate\n '''\n ))\n\n try:\n pki = PrivateKeyInfo.load(data)\n # Call .native to fully parse since asn1crypto is lazy\n pki.native\n return pki\n except (ValueError):\n pass # Data was not PrivateKeyInfo\n\n try:\n parsed_wrapper = EncryptedPrivateKeyInfo.load(data)\n encryption_algorithm_info = parsed_wrapper['encryption_algorithm']\n encrypted_data = parsed_wrapper['encrypted_data'].native\n decrypted_data = _decrypt_encrypted_data(encryption_algorithm_info, encrypted_data, password)\n pki = PrivateKeyInfo.load(decrypted_data)\n # Call .native to fully parse since asn1crypto is lazy\n pki.native\n return pki\n except (ValueError):\n pass # Data was not EncryptedPrivateKeyInfo\n\n try:\n parsed = RSAPrivateKey.load(data)\n # Call .native to fully parse since asn1crypto is lazy\n parsed.native\n return PrivateKeyInfo.wrap(parsed, 'rsa')\n except (ValueError):\n pass # Data was not an RSAPrivateKey\n\n try:\n parsed = DSAPrivateKey.load(data)\n # Call .native to fully parse since asn1crypto is lazy\n parsed.native\n return PrivateKeyInfo.wrap(parsed, 'dsa')\n except (ValueError):\n pass # Data was not a DSAPrivateKey\n\n try:\n parsed = ECPrivateKey.load(data)\n # Call .native to fully parse since asn1crypto is lazy\n parsed.native\n return PrivateKeyInfo.wrap(parsed, 'ec')\n except (ValueError):\n pass # Data was not an ECPrivateKey\n\n raise ValueError(pretty_message(\n '''\n The data specified does not appear to be a known private key format\n '''\n ))", "def unwrap(self):\n\n if self.algorithm == 'ec':\n return self.asn1['public_key']\n return self.asn1['public_key'].parsed", "def __generate_key(length):\n if length % 2 != 0:\n raise ValueError(\"'length' must be a multiple of 2\")\n length_bytes = int(length / 2) # length of key in bytes\n key_bytes = os.urandom(length_bytes)\n return binascii.hexlify(key_bytes).decode()", "def from_key(self, public_id, key):\n otp = self.get_otp(key)\n from_key = modhex_encode(public_id.encode('hex')) + modhex_encode(otp.encode('hex'))\n return from_key", "def _wrap_publickey(self) -> None:\n p_der = ffi.new(\"unsigned char **\")\n der_len = lib.i2d_PublicKey(self._pkey, p_der)\n if der_len < 0:\n raise InvalidPKeyError(\"Could not serialize public key\")\n try:\n der = ffi.buffer(p_der[0], der_len)[:]\n try:\n self._key = load_der_public_key(der, backend=default_backend())\n except ValueError as exc:\n raise InvalidPKeyError from exc\n finally:\n lib.OPENSSL_free(p_der[0])", "def get_key_pair_from_pvk_b64(pvk_b64: str):\n pvk = base64.b64decode(pvk_b64)\n sk = ecdsa.SigningKey.from_string(pvk, curve=CURVE)\n\n return _get_key_pair_from_sk(sk)", "def generate_key_pair(G):\r\n\r\n global random\r\n\r\n if random == None:\r\n random = hash_drbg.HashDRBG()\r\n\r\n if G.order == None:\r\n raise RuntimeError(\"Base point must have order.\")\r\n\r\n key_size = log(ec.leftmost_bit(G.order)) / log(2)\r\n key_size = int(ceil(key_size) / 2)\r\n private_key = 1\r\n\r\n while private_key <= 1:\r\n private_key = random(key_size) #generates a random number\r\n #with twice the required bits\r\n private_key %= G.order\r\n\r\n return (private_key, G * private_key)", "def GenKey(type, size=None):\n if size is None:\n size = type.default_size\n\n if not type.IsValidSize(size):\n raise errors.KeyczarError(\"Unsupported key size %d bits.\" % size)\n\n try:\n return {keyinfo.AES: AesKey.Generate,\n keyinfo.HMAC_SHA1: HmacKey.Generate,\n keyinfo.DSA_PRIV: DsaPrivateKey.Generate,\n keyinfo.RSA_PRIV: RsaPrivateKey.Generate}[type](size)\n except KeyError:\n if type == keyinfo.DSA_PUB or type == keyinfo.RSA_PUB:\n msg = \"Public keys of type %s must be exported from private keys.\"\n else:\n msg = \"Unsupported key type: %s\"\n raise errors.KeyczarError(msg % type)", "def privatekey_to_publickey(private_key_bin: bytes) -> bytes:\n if not ishash(private_key_bin):\n raise ValueError('private_key_bin format mismatch. maybe hex encoded?')\n private_key = PrivateKey(private_key_bin)\n return private_key.public_key.format(compressed=False)", "def deserializePublicKey(string, bc = backend):\n\tif type(string) == str:\n\t\tstring = string.encode('utf8')\n\treturn serialization.load_pem_public_key(string , backend = bc)", "def public_from_private(self, private_key):", "def get_key_from_blob(blob):\n keys = pgpy.PGPKey.from_blob(blob)\n logging.debug(keys)\n return keys[0]", "def rsa_gen_pubpriv_keys(bitsize):\r\n\r\n rsa_implementation = RSA_RSAImplementation()\r\n \r\n # The key returned is of type RSA_RSAobj which is derived \r\n # from pubkey_pubkey, and wraps a _slowmath_RSAKey object\r\n rsa_key = rsa_implementation.generate(bitsize)\r\n \r\n return ({'e':rsa_key.e, 'n':rsa_key.n },\r\n {'d':rsa_key.d, 'p':rsa_key.p, 'q':rsa_key.q })", "def generate_key(self):\n key = rsa.generate_private_key(\n public_exponent=self.settings['key_public_exponent_size'],\n key_size=self.settings['key_size'],\n backend=default_backend()\n )\n return key", "def create_handshake_key_pair(cls) -> Tuple[bytes, bytes]:\n ...", "def public_key_from_private_key(privkey: bytes, compressed: bool) -> bytes:\n key = ECKey(privkey)\n return key.get_public_key(compressed)", "def get_rsa_asymn_keys(public_exponent = 65537, key_size = 2048, bc = backend):\n\tprivate_key = asymmetric.rsa.generate_private_key(public_exponent = public_exponent, key_size = key_size, backend = bc)\n\treturn private_key,private_key.public_key()", "def get_new_key() -> rsa.RSAPrivateKeyWithSerialization:\n\n return rsa.generate_private_key(\n backend=default_backend(), public_exponent=65537, key_size=2048\n )", "def _derive_key(\n self, passphrase: str, otp: YubikeyOTP, *args : bytes\n ) -> bytes:\n return self._context_kdf.derive(\n combine_keys(\n passphrase.encode('utf-8'),\n otp.token.private_uid,\n *args\n )\n )", "def Generate(size=keyinfo.DSA_PRIV.default_size):\n key = DSA.generate(size, util.RandBytes)\n params = {'x': util.BigIntToBytes(key.x)}\n pubkey = key.publickey()\n pub_params = {\n 'g': util.BigIntToBytes(pubkey.g),\n 'p': util.BigIntToBytes(pubkey.p),\n 'q': util.BigIntToBytes(pubkey.q),\n 'y': util.BigIntToBytes(pubkey.y)\n }\n pub = DsaPublicKey(pub_params, pubkey, size)\n return DsaPrivateKey(params, pub, key, size)", "def _get_pubkey_from_pem_public_key(filedata: bytes, backend: Any) -> Tuple[Any, None]:\n try:\n return serialization.load_pem_public_key(filedata, backend=backend), None\n except Exception:\n return None, None", "def get_key_pair_from_seed(seed: bytes) -> typing.Tuple[bytes, bytes]:\n sk = ecdsa.SigningKey.from_string(seed, curve=CURVE)\n\n return _get_key_pair_from_sk(sk)", "def read_public_key(f: IO[str]) -> Tuple[str, str, str, str]:\n data = f.read()\n try:\n kind, key, comment = data.split(\" \")\n if kind.startswith(\"ssh-\") and comment:\n base64.b64decode(key)\n return (kind, key, comment, data)\n except ValueError:\n pass\n\n raise click.ClickException(\"{} is not a valid SSH key\".format(f.name))", "def GenerateFromSeed(cls,\n seed_bytes: bytes) -> Tuple[bytes, bytes]:\n if len(seed_bytes) < Bip32Slip10MstKeyGeneratorConst.SEED_MIN_BYTE_LEN:\n raise ValueError(f\"Invalid seed length ({len(seed_bytes)})\")\n\n key_bytes = Pbkdf2HmacSha512.DeriveKey(CardanoIcarusMasterKeyGeneratorConst.PBKDF2_PASSWORD,\n seed_bytes,\n CardanoIcarusMasterKeyGeneratorConst.PBKDF2_ROUNDS,\n CardanoIcarusMasterKeyGeneratorConst.PBKDF2_OUT_BYTE_LEN)\n key_bytes = cls.__TweakMasterKeyBits(key_bytes)\n\n return key_bytes[:Ed25519KholawPrivateKey.Length()], key_bytes[Ed25519KholawPrivateKey.Length():]", "def generate_key():\n key = crypto.Key.generate_key()\n click.echo('Private Key (len {}):: \\n{}'.format(\n len(key.get_privkey()),\n hexlify(key.get_privkey())))\n click.echo('Public Key (len {})::\\n{}'.format(\n len(key.get_pubkey()),\n hexlify(key.get_pubkey())))", "def create_rsa_key_pair() -> Tuple[str, str]:\n key = RSA.generate(RSA_KEY_STRENGTH)\n public_key = key.publickey().export_key().decode()\n private_key = key.export_key().decode()\n return public_key, private_key", "def text2PrivateKey(text:str):\n return RSA.importKey(b58decode(text))", "def genPrivateKey(self, bits):\n return self.genRandom(bits)", "def get_pub_key_bytes(priv_key: rsa.RSAPrivateKey) -> bytes:\n k = priv_key.public_key()\n return k.public_bytes(encoding=serialization.Encoding.PEM,\n format=serialization.PublicFormat.SubjectPublicKeyInfo)", "def _create_pkey(self, commonname, serial):\n pkey = PKey()\n pkey.generate_key(crypto.TYPE_RSA, self.key_bits)\n private = crypto.dump_privatekey(crypto.FILETYPE_PEM,\n pkey).decode()\n key_path = self._get_key_path(commonname, serial)\n if os.path.exists(key_path):\n raise FileExistsError(key_path)\n with open(key_path, 'w') as private_file:\n private_file.writelines(private)\n\n key_link = self._get_key_link(commonname)\n if os.path.exists(key_link):\n os.unlink(key_link)\n os.symlink(os.path.basename(key_path), key_link)\n\n return pkey", "def gen_private_key(p, q, e):\n\n # Calculate 'n', n = p x q\n n = p * q\n # Calculate 'd', d = e^(-1) mod [(p-1)x(q-1)]\n phi = (p - 1) * (q - 1)\n # Need to use extended euclidean algorithm for 'd'\n gcd, d, b = egcd(e, phi)\n\n # Assign key parameters\n key_params = (n, e, d, p, q)\n # Construct private key\n key = RSA.construct(key_params)\n\n return key.exportKey()", "def from_base58(cls, address: str) -> 'PublicKey':\n return cls(base58.b58decode(address))", "def generate_rsa_auxiliary_key_pair() -> AuxiliaryKeyPair:\n rsa_key_pair = rsa_keypair()\n return AuxiliaryKeyPair(rsa_key_pair.private_key, rsa_key_pair.public_key)", "def from_bytes(cls, bytes):\n construct = _constructs.PreMasterSecret.parse(bytes)\n return cls(\n client_version=ProtocolVersion(\n major=construct.version.major,\n minor=construct.version.minor,\n ),\n random=construct.random_bytes,\n )", "def load_received_public_key_der(self, public_key_der):\n return self.load_received_public_key(VerifyingKey.from_der(public_key_der))", "def __init__(self, pubkey, e=65537):\n if isinstance(pubkey, int):\n self.key = RSA.RsaKey(n=pubkey, e=e)\n\n else:\n if not isinstance(pubkey, str):\n raise ValueError('pubkey must be str or int.')\n\n if '----' in pubkey:\n try:\n self.key = RSA.import_key(pubkey)\n except Exception as e:\n print(e)\n else:\n if pubkey == pubkey.lower():\n pubkey = int(pubkey, 16)\n self.key = RSA.RsaKey(n=pubkey, e=e)\n else:\n pubkey = '-----BEGIN PUBLIC KEY-----\\n' + pubkey + '\\n-----END PUBLIC KEY-----'\n try:\n self.key = RSA.import_key(pubkey)\n except Exception as e:\n print(e)", "def public_key(ctx):\n if not ctx.data:\n raise RefError(\n \"Ref error: eval_func: public key cannot be derived; try \"\n \"something like '|reveal:path/to/encrypted_private_key|publickey'\"\n )\n\n data_dec = ctx.data\n if ctx.ref_encoding == \"base64\":\n data_dec = base64.b64decode(data_dec).decode()\n\n private_key = serialization.load_pem_private_key(\n data_dec.encode(), password=None, backend=default_backend()\n )\n public_key = private_key.public_key()\n\n ctx.data = str(\n public_key.public_bytes(\n encoding=serialization.Encoding.PEM, format=serialization.PublicFormat.SubjectPublicKeyInfo\n ),\n \"UTF-8\",\n )", "def Generate(size=keyinfo.HMAC_SHA1.default_size):\n key_bytes = util.RandBytes(size / 8)\n key_string = util.Encode(key_bytes)\n return HmacKey(key_string, size)" ]
[ "0.6803569", "0.6298475", "0.6296541", "0.61843973", "0.6125002", "0.6038086", "0.6034926", "0.6032014", "0.6002514", "0.5997152", "0.598115", "0.59766436", "0.59530914", "0.59374905", "0.59370124", "0.5930606", "0.588195", "0.5862614", "0.5829133", "0.5813407", "0.58064556", "0.57275146", "0.5725388", "0.57161444", "0.56934845", "0.5683587", "0.56451404", "0.5638943", "0.55802584", "0.5561091", "0.5561015", "0.5550122", "0.5543565", "0.5523794", "0.5509299", "0.5486297", "0.5481985", "0.5469333", "0.5467232", "0.544382", "0.5432944", "0.5429053", "0.5425208", "0.5417378", "0.5410652", "0.53937125", "0.5390396", "0.5374689", "0.5365291", "0.5359171", "0.5355557", "0.53493714", "0.53412753", "0.53365934", "0.5331822", "0.5326463", "0.531169", "0.5298818", "0.5293861", "0.5291796", "0.5286692", "0.527589", "0.5273403", "0.5260004", "0.5247171", "0.52467465", "0.5244917", "0.52449155", "0.52385813", "0.52371514", "0.5236173", "0.52319604", "0.5227036", "0.52175677", "0.52103585", "0.5207373", "0.5200058", "0.5193268", "0.5192436", "0.5189089", "0.5186095", "0.5183279", "0.51808023", "0.5170559", "0.5168632", "0.516847", "0.51574033", "0.5156602", "0.51535845", "0.51535803", "0.51498806", "0.5148038", "0.5139113", "0.5135825", "0.5127766", "0.5124925", "0.51248634", "0.51220834", "0.5119579", "0.5111744" ]
0.7741583
0
Whether or not this is a master node.
def master(self): return self.depth == 0
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def is_node_master(self) -> bool:\n self._assert_local_rank_set()\n return self.local_rank == 0", "def is_master(self):\n return self._is_master", "def is_master(self):\n return MPControl.is_master", "def is_master(self) -> bool:\n return self.zone.SharedRoomID and self.zone.MasterMode", "def isMaster(self):\n logger.debug(\"Checking if %s is Cloudera Master\" % self)\n is_master = self.getClusterRole()\n logger.debug(\"Is %s master: %s\" % (self, is_master))\n return is_master", "def is_master(self, process_group: ProcessGroup = None) -> bool:\n rank = dist.get_rank(group=process_group)\n return rank == 0", "def am_I_master(self, ipdict):\n hostname = socket.gethostname()\n ip_address = socket.gethostbyname(hostname)\n return ipdict.get(ip_address).is_master", "def is_master(self): \n\n master_access = (PermissionGroups.query\n .filter_by(group_name=\"Master\")\n .first())\n if self.has_auth_access(master_access):\n return True\n else:\n return False", "def isPowerMaster(self) -> bool:\r\n if self.visprotocol is not None:\r\n return self.visprotocol.isPowerMaster()\r\n return False", "def is_master(self):\n return self.itunesAttributes.get('Master', False)", "def check_master(client, master_only=False):\n if master_only and not is_master_node(client):\n logger.info('Master-only flag detected. Connected to non-master node. Aborting.')\n sys.exit(9)", "async def async_set_is_master(self, is_master):\n self._is_master = is_master", "def am_i_root():\n if WITH_MPI:\n return not ME\n else:\n return os.getpid() == MASTER_PID", "def is_client(self) -> bool:\n return self.zone.SharedRoomID and not self.zone.MasterMode", "def remote_publishing_master():\n return remote_publishing() and hasattr(settings, 'NEWS_REMOTE_ROLE') \\\n and settings.NEWS_REMOTE_ROLE is 'MASTER'", "def master_branch(branch_name):\n\n if branch_name in MASTER_BRANCHES:\n return True\n\n return False", "def ForceMaster(node, is_testver):\n gsaport = core_utils.GSAMasterPort(is_testver)\n # ignore the result of forcemaster\n port_talker.TCPTalk(node, gsaport, 30, command='GET /forcemaster\\n')", "def is_master_version(cls):\n\n # We initiate the command we have to run in order to\n # get the branch we are currently working with.\n command = \"git branch\"\n\n # We execute and get the command output.\n command_result = PyFunceble.helpers.Command(command).execute()\n\n for branch in command_result.split(\"\\n\"):\n # We loop through each line of the command output.\n\n if branch.startswith(\"*\") and \"master\" in branch:\n # The current branch is `master`.\n\n # We return True.\n return True\n\n # The current branch is not `master`.\n\n # We return False.\n return False", "def is_active(self):\n group_names = self.get_var(\"group_names\", default=[])\n master_or_node = 'oo_masters_to_config' in group_names or 'oo_nodes_to_config' in group_names\n return super(OvsVersion, self).is_active() and master_or_node", "def master(self):\n return self._master", "def zone_master(self) -> None:\n for zone in self.coordinator.data.zones:\n if zone.MasterMode and zone.SharedRoomID == self.zone.SharedRoomID:\n return zone.ZoneID", "def master(self):\n\n return self._master", "def is_network_node():\n return config.NODE_IP == config.NETWORK_NODE_IP", "def is_current_node_active(self, device, partition):\n if self.is_version_sufficient(min_version='11.3.0') is False:\n print \"!!!!!!!!!!! WARNING !!!!!!!!!!!!!!!\"\n print \"! UNABLE TO VERIFY FAILOVER STATE !\"\n print \"!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!\"\n stop = raw_input('Do you want to continue? [y|N]')\n if stop.strip() == \"y\" or stop.strip() == \"Y\":\n return True\n else:\n return False\n \"\"\" Determines if the connect device is the master, if not Bail with an error.\"\"\"\n try:\n self.connection.System.Session.set_active_folder(\"/Common\")\n status = self.connection.Management.Device.get_failover_state([device])\n if status == ['HA_STATE_ACTIVE']:\n self.connection.System.Session.set_active_folder(\"/\"+partition)\n return True\n else:\n return False\n except:\n raise Exception(\"Failed to determine if {} is a master\".format(device))", "def master(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"master\")", "def master(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"master\")", "def master(self) -> pulumi.Output[Optional[str]]:\n return pulumi.get(self, \"master\")", "def node_leaf(self):\r\n return self.zero_son is None and self.one_son is None", "def is_ssl_cert_master(votes=None):\n master = None\n for rid in relation_ids('cluster'):\n master = relation_get(attribute='ssl-cert-master', rid=rid,\n unit=local_unit())\n\n if master == local_unit():\n votes = votes or get_ssl_cert_master_votes()\n if not peer_units() or (len(votes) == 1 and master in votes):\n return True\n\n log(\"Did not get consensus from peers on who is ssl-cert-master \"\n \"(%s)\" % (votes), level=INFO)\n\n return False", "def is_cont_node():\n return False", "def check_connection_to_db(self):\n try:\n self._client.admin.command('ismaster')\n return True\n except Exception:\n return False", "def is_start_node():\n return False", "def master_id(self):\r\n return self._arm.master_id", "def master_host(self) -> str:\n raise NotImplementedError", "def is_root(self):\n return (self.__type & NODE_ROOT) == NODE_ROOT", "def is_root(self):\n return True", "def is_root(self):\n return True", "def is_root(self) -> bool:\n return self.parent_id is None", "def add_master_process(self, value=u\"on\"):\n path = [u\"master_process\"]\n self.add_config_item(self._nodeconfig, value, path)", "def getMasterMode(self, unitCode=0):\n resp = self.XAPCommand('MASTER', unitCode=unitCode)\n return int(resp)", "def masterIP(self):\r\n return self._masterIP", "def is_cluster_leader(self):\n return self.leader == 'self'", "def masterPort(self):\r\n return self._masterPort", "def is_root(self):\n return self.parent == None", "def is_root(self):\n return self.parent_id is None", "def node_exists(self):\n return self.oid is not None", "def print_on_node_master(self, msg: str):\n self._assert_local_rank_set()\n if self.local_rank == 0:\n print(msg)", "def is_root(self, node: object) -> bool:\n if node == self.root:\n return True\n else:\n return False", "def is_root(self):\n return self._parent == None", "def is_server_default(self):\n ...", "def ensure_ssl_cert_master():\n master_override = False\n elect = is_elected_leader(CLUSTER_RES)\n\n # If no peers we allow this unit to elect itsef as master and do\n # sync immediately.\n if not peer_units():\n elect = True\n master_override = True\n\n if elect:\n votes = get_ssl_cert_master_votes()\n # We expect all peers to echo this setting\n if not votes or 'unknown' in votes:\n log(\"Notifying peers this unit is ssl-cert-master\", level=INFO)\n for rid in relation_ids('cluster'):\n settings = {'ssl-cert-master': local_unit()}\n relation_set(relation_id=rid, relation_settings=settings)\n\n # Return now and wait for cluster-relation-changed (peer_echo) for\n # sync.\n return master_override\n elif not is_ssl_cert_master(votes):\n if not master_override:\n log(\"Conscensus not reached - current master will need to \"\n \"release\", level=INFO)\n\n return master_override\n\n if not is_ssl_cert_master():\n log(\"Not ssl cert master - skipping sync\", level=INFO)\n return False\n\n return True", "def get_master_address(self):\n if self.master_address:\n return self.master_address\n return super(CelerySentinelConnectionPool, self).get_master_address()", "def master():\n env.branch = 'master'", "def master():\n env.branch = 'master'", "async def sync_master(self):\n if not [entity for entity in self._casatunes_entities() if entity.is_client]:\n await self.coordinator.data.zone_master(self.zone_master, False)\n await self.coordinator.async_refresh()\n _LOGGER.debug(\"%s zone is no longer master.\", self.zone_master)", "def slave(self):\n return self._slave_mode", "def master_host(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"master_host\")", "def master_host(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"master_host\")", "def master_host(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"master_host\")", "def _createMaster(self):\n\n host = self.to_config['address']\n port = self.to_config['port']\n master = modbus_tcp.TcpMaster(host=host, port=port,\n timeout_in_sec=10.0) #@TODO: Put timeout in configuration\n return master", "def is_root(self):\n return self.root in [-1, self]", "def is_global(self) -> bool:\n return self._parent_node.is_global()", "def on_public_cluster(self) -> bool:\n return not self.on_private_cluster", "def masters(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:\n return pulumi.get(self, \"masters\")", "def masters(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:\n return pulumi.get(self, \"masters\")", "def _check_master_tablet(self, t, port=None):\n ti = utils.run_vtctl_json(['GetTablet', t.tablet_alias])\n self.assertEqual(ti['type'], topodata_pb2.MASTER)\n if port:\n self.assertEqual(ti['port_map']['vt'], port)\n\n # make sure the health stream is updated\n health = utils.run_vtctl_json(['VtTabletStreamHealth', '-count', '1',\n t.tablet_alias])\n self.assertIn('serving', health)\n self.assertEqual(health['target']['tablet_type'], topodata_pb2.MASTER)", "def get_host_master_id(self):\r\n return self._handler.get_host_master_id()", "def using_xdist(request: pytest.FixtureRequest) -> bool:\n try:\n is_master = request.getfixturevalue(\"worker_id\") == \"master\"\n return not is_master\n except pytest.FixtureLookupError:\n return False", "def master(self):\n return self.remappers[self._master_name]", "def get_sync_master(self):\n servers = self.get_sync_masters()\n assert servers, \"starter: don't have instances!\"\n return servers[0]", "def is_standalone(self) -> bool:\n if not self.name or self.fallback:\n return True\n\n return False", "def local(self):\n return self.hostname == \"localhost\" and self.user is None and self.ssh_args is None", "def is_central_server() -> bool:\n return hasattr(Config().algorithm,\n 'cross_silo') and Config().args.port is None", "def is_on(self) -> bool:\n val = bool(self._cluster_handler.cluster.get(self._zcl_attribute))\n return (not val) if self.inverted else val", "def is_root(self, node: Node) -> bool:\n return node == self._root", "def is_root(self):\n return not self.parent", "def get_master(self):\n\n def watcher(watched_event):\n if watched_event.type and watched_event.path:\n msg = \"child changed, try to get master again.type %s, state %s, path %s.\" % (\n watched_event.type, watched_event.state, watched_event.path)\n logger.info(\"[ %s(%s) ] %s\" % (self.path, \"master\" if self.is_master else \"slave\", msg))\n self.workers = self.get_workers()\n logger.debug(\"watcher call get_master start\")\n self.get_master()\n logger.debug(\"watcher call get_master end\")\n\n try:\n children = self.zk.get_children(self.LEADERSHIP_PATH, watcher)\n except:\n logger.error(traceback.format_exc())\n return\n\n # self register\n infos = []\n for child in children:\n data, stat = self.zk.get(self.LEADERSHIP_PATH + \"/\" + child)\n infos.append(data)\n\n # make sure leadship and services exists\n if self.info not in infos or \\\n not self.zk.exists(self.SERVICE_PATH + \"/\" + self.info):\n logger.debug(\"get_master call register start\")\n self.register_leadership()\n self.register_service()\n logger.debug(\"get_master call register end\")\n\n children.sort()\n logger.debug(\"%s's children: %s\" % (self.LEADERSHIP_PATH, children))\n # check if I'm master\n self.master = children[:self.MASTER_NUM]\n if self.path in self.master:\n self.is_master = True\n logger.info(\"[ %s(%s) ] %s\" % (self.path, \"master\" if self.is_master else \"slave\", \"I am master!\"))\n # get slave status and assign undone task to them\n online_workers = self.get_workers()\n self.assign_task(online_workers)\n self.workers = online_workers", "def master_ip(self) -> str:\n raise NotImplementedError", "def is_connected(self):\n if self.server: return True\n return False", "def configure_master_host(master_node, slave_nodes, host_type='linux'):\n if (host_type=='linux'):\n check_firewalld(master_node)\n if not key_is_present(master_node):\n generate_key(master_node)\n push_key_to_slave(master_node, slave_nodes)", "def i_am_root():\n try:\n return True if mpi_rank() == 0 else False\n except AttributeError:\n # not running MPI\n return True", "def rosnode_exists(name, ros_master_uri='http://localhost:11311'):\n import os\n import subprocess\n from ..log import PCG_ROOT_LOGGER\n env_variables = os.environ.copy()\n env_variables['ROS_MASTER_URI'] = ros_master_uri\n try:\n output = subprocess.check_output(\n ['rosnode', 'list'],\n env=env_variables\n )\n output = output.decode('utf-8')\n return name in output\n except subprocess.CalledProcessError as ex:\n PCG_ROOT_LOGGER.warning(\n 'Error testing ROS master, message={}'.format(ex))\n return False", "def is_regular(self):\n return not( self.is_master()\n or self.is_folder()\n or self.is_smart()\n or self.is_distinguished())", "def master_count(self) -> Optional[pulumi.Input[int]]:\n return pulumi.get(self, \"master_count\")", "def is_esi_node():\n\n # Fetch ACME logger and write debug message\n log = logging.getLogger(\"ACME\")\n log.debug(\"Test if hostname matches the pattern 'esi-sv*'\")\n return socket.gethostname().startswith(\"esi-sv\") and os.path.isdir(\"/cs\")", "def isRoot(self):\n\n # If I don't have a parent, I am root\n return not self._father", "def is_primary(self):\n\n return not self.parent.non_primary", "def master_instance_name(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"master_instance_name\")", "def master_global_access_config(self) -> Optional[pulumi.Input['PrivateClusterMasterGlobalAccessConfigArgs']]:\n return pulumi.get(self, \"master_global_access_config\")", "def remote_publishing_slave():\n return remote_publishing() and hasattr(settings, 'NEWS_REMOTE_ROLE') \\\n and settings.NEWS_REMOTE_ROLE is 'SLAVE'", "def master_instance_name(self) -> str:\n return pulumi.get(self, \"master_instance_name\")", "def IsRoot(self):\n return not self._parent_group", "def is_main_process(args: dict):\n\n return not is_distributed(args) or args.local_rank == 0", "def is_leaf_node(self):\n if self is None:\n return False\n if self.left is None and self.right is None:\n return True\n return False", "def series_master_id(self):\n if \"seriesMasterId\" in self._prop_dict:\n return self._prop_dict[\"seriesMasterId\"]\n else:\n return None", "def is_machinetag (self) :\n\n return self.__ismachinetag__", "def is_root(self) -> bool:\n parent_type = self.parent_id.split(\"_\", 1)[0]\n return parent_type == self._reddit.config.kinds[\"submission\"]", "def isNodeLeaf ( self ):\n return self.nodes is None or len ( self.nodes ) == 0\n # End isNodeLeaf", "def slaves_found(self):\n return not (len(self.topology) and self.topology[0][1] == [])", "def node_inode(self):\n return False" ]
[ "0.8737156", "0.8224543", "0.8093627", "0.8079041", "0.787273", "0.7678137", "0.7496142", "0.7409133", "0.7236752", "0.7206599", "0.71834207", "0.6667358", "0.6657551", "0.66142035", "0.64824533", "0.6475769", "0.6441394", "0.6434042", "0.639641", "0.6376803", "0.6363649", "0.633074", "0.6310723", "0.63028127", "0.62911737", "0.62911737", "0.6281085", "0.6235243", "0.6220278", "0.6219032", "0.62165725", "0.62018824", "0.62000227", "0.6192886", "0.6192417", "0.6159932", "0.6159932", "0.60249835", "0.6004057", "0.5998836", "0.59959024", "0.5992269", "0.5932443", "0.592668", "0.5924251", "0.5915102", "0.5889559", "0.5888883", "0.5885534", "0.5878497", "0.5856725", "0.58509547", "0.58488804", "0.58488804", "0.5821605", "0.5816877", "0.579881", "0.579881", "0.579881", "0.579785", "0.57933396", "0.5789573", "0.5786504", "0.5786293", "0.5786293", "0.5767164", "0.57653487", "0.57618994", "0.5747651", "0.574734", "0.57328767", "0.57252496", "0.572516", "0.5713427", "0.5712964", "0.57067835", "0.57048917", "0.5700912", "0.5682988", "0.56815386", "0.56795835", "0.56711", "0.5659999", "0.565616", "0.56514084", "0.564529", "0.5640696", "0.5636029", "0.56314653", "0.5629285", "0.56183565", "0.56139696", "0.5609485", "0.5604648", "0.55946994", "0.55840814", "0.5583541", "0.557171", "0.55680615", "0.5565051" ]
0.71505064
11
Whether or not this is a hardened node. Hardened nodes are those with indices >= 0x80000000.
def hardened(self): # A hardened key is a key with index >= 2 ** 31, so # we check that the MSB of a uint32 is set. return self.index & 0x80000000
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def node_leaf(self):\r\n return self.zero_son is None and self.one_son is None", "def is_internal(self):\n if self.is_leaf() or self.is_semileaf():\n return False\n return True", "def node_inode(self):\n return False", "def node_inode(self):\n return False", "def is_leaf(self):\n return self.pixel_count > 0", "def _is_leaf(self, index):\r\n return 2*index+1 > self._size - 1", "def IsEulerGraph(self):\n\n for node in self.nodes:\n if ((len(node.neighbours) % 2) == 1) or (len(node.neighbours) == 0):\n return False\n return True", "def has_node(self, n):\n return n in self.node_dict", "def isWellFormedNode(self, *args):\n return _libsbml.ASTBasePlugin_isWellFormedNode(self, *args)", "def _is_left_edge(self, ndx):\n if len(self._dims)== 1:\n return ndx == 0\n return ndx < self._dims[1]", "def __nonzero__(self):\n return self.root.__nonzero__()", "def is_ghost(self):\n\t\treturn False", "def essential_node_count(self) -> int:\n return sum(\n 1 for n in self.graph.nodes() if n.kind() not in self._EXCLUDED_NODE_KINDS\n )", "def has_leaf(self) -> bool:\r\n self._logger.debug(log_message_formatter(\r\n \"get\", f\"{self}\", \"has_leaf\"))\r\n return self._hvac_mode == \"eco\"", "def is_shorter(node, n_node):\r\n return True if node.distance + n_node.value < n_node.distance else False", "def has_node(self, n):\n return n in self.dict", "def has_ghosts(self):\n return not np.all(self.mesh.discretization.ghosts == 0)", "def is_leaf(self):\n return len(self.blocks) == 0", "def has_nei(self, key: int) -> bool:\r\n return self.neighbors.__contains__(key)", "def is_ghost(self):\n return self._is_ghost", "def is_leaf(self):\r\n return self.num_children() == 0", "def is_root(self):\n return self.unpack_word(0x2) & 0x0004 > 0", "def need_neighbor(self):\n return self._need_neighbor", "def is_internal(self):\n # TODO: Check if either left child or right child has a value\n return ... or ...", "def is_leaf(self):\n return not self.children.exists()", "def isNodeLeaf ( self ):\n return self.nodes is None or len ( self.nodes ) == 0\n # End isNodeLeaf", "def _is_trivial(self):\n return self._.d <= 1", "def count_dead_node(self):\n count = 0\n for node in self.node:\n if node.energy < 0:\n count += 1\n return count", "def is_network_node():\n return config.NODE_IP == config.NETWORK_NODE_IP", "def _node_only_used_for_sym_size(node: Node, partition_nodes: List[Node]):\n if _is_sym_size_node(node):\n return True\n\n return all(\n ((user not in partition_nodes) or _is_sym_size_node(user))\n for user in node.users\n )", "def node_exists(self):\n return self.oid is not None", "def direct(self):\n return self.isleaf and not self.isExtended", "def is_node_down():\n try:\n hostname = check_command_output(\"hostname\").strip()\n host_fqdn = socket.getfqdn(hostname)\n nodes = get_compute_nodes_info(hostname_filter=hostname)\n if not any(host in nodes for host in [hostname, host_fqdn]):\n log.warning(\"Node is not attached to scheduler. Reporting as down\")\n return True\n\n node = nodes.get(host_fqdn, nodes.get(hostname))\n log.info(\"Node is in state: '{0}'\".format(node.state))\n if all(error_state not in node.state for error_state in SGE_ERROR_STATES):\n return False\n except Exception as e:\n log.error(\"Failed when checking if node is down with exception %s. Reporting node as down.\", e)\n\n return True", "def is_leaf(self):\n return len(self.children) == 0", "def is_exceptional(self):\n G = self.poset().hasse_diagram()\n for x in G:\n nx = list(G.neighbors_out(x))\n nx.append(x)\n if min(nx) < x and max(nx) > x:\n return False\n return True", "def is_leaf(self):\n return len(self.refs) == 0", "def is_leaf(self):\n # TODO: Check if both left child and right child have no value\n return ... and ...", "def has_next(self):\n if self.idx < len(self.nodes):\n return True\n else:\n return False", "def is_unrolled_out_leaf(self, game):\n return self._plays[game] == 0", "def is_leaf(self):\n # Has no children nodes\n return self.nodes is None or len(self.nodes) == 0", "def is_leaf(self):\n if len(self.children) == 0:\n return True\n else:\n return False", "def is_leaf(self):\n return self.__left == None and self.__right==None", "def is_red(node):\n if node is None:\n return False\n return node.colour is True", "def __nonzero__(self):\n for e in self:\n if e != 0:\n return True\n return False", "def is_leaf(self, pos):\n if pos >= (self.size//2) and pos <= self.size: \n return True\n return False", "def _isred(self, node):\n \n if node is None:\n return False\n else:\n return node.color == Color.RED", "def test_false_if_no_node(graph_no_edges):\n false_nodes = ['land submarine', 'Portland Timbers', 'tug cable scope', 100]\n for node in false_nodes:\n assert graph_no_edges.has_node(node) is False", "def next_leaf(node):\n return len(node[1][0][1]) == 0", "def is_cont_node():\n return False", "def is_leaf(self):\n return self._children == {}", "def is_leaf(self):\n return self._children == {}", "def is_leaf(self):\n return self._children == {}", "def is_leaf(self):\n return self._children == {}", "def isleaf(self):\n no_kids = super(PartitionDevice, self).isleaf\n # it is possible that the disk that originally contained this partition\n # no longer contains a disklabel, in which case we can assume that this\n # device is a leaf\n if self.disk and self.partedPartition and \\\n self.disk.format.type == \"disklabel\" and \\\n self.partedPartition in self.disk.format.partitions:\n disklabel = self.disk.format\n else:\n disklabel = None\n\n extended_has_logical = (self.isExtended and\n (disklabel and disklabel.logicalPartitions))\n return (no_kids and not extended_has_logical)", "def is_there_life(pixel):\n\treturn pixel[0] == 0", "def is_leaf(self, n):\n return self.num_children(n) == 0", "def nodes_flags(self):\n return self._nodes_flags", "def is_expanded(self) -> bool:\n return len(self._untried_edges) == 0", "def get_no_edges(self):\r\n return len(self.__cost.keys())", "def has_connection_left(tile):\n return is_kth_bit_set(tile, 1)", "def isInternal(self):\n\n\t\t# TODO optimization do we really need to look at the host attributes?\n\t\t# maybe we can just use the global attribute (faster)\n\t\tfe = self.newdb.getFrontendName()\n\t\tnetwork = self.newdb.getHostAttr(fe, 'Kickstart_PrivateNetwork')\n\t\tnetmask = self.newdb.getHostAttr(fe, 'Kickstart_PrivateNetmask')\n\n\t\t# Test based on our client's IP address.\n\t\twork = string.split(network, '.')\n\t\tmask = string.split(netmask, '.')\n\t\tip = string.split(self.clientList[-1], '.')\n\n\t\tfor i in range(0, len(ip)):\n\t\t\ta = int(ip[i]) & int(mask[i])\n\t\t\tb = int(work[i]) & int(mask[i])\n\n\t\t\tif a != b:\n\t\t\t\treturn 0\n\n\t\treturn 1", "def is_leaf(self):\n return len(self.child_list) == 0", "def es_hoja(self) -> bool:\n return len(self.children) == 0", "def has_node(self, val):\n return val in self._g", "def disconnected_node_acknowledged(self):\n return self._disconnected_node_acknowledged", "def node_is_edge(self, node: MazeCell) -> bool:\n return node.x == 0 or node.x == self._ncols - 1 or node.y == 0 or node.y == self._nrows - 1", "def is_full(self, node):\n if node.right != None and node.left != None:\n return True\n return False", "def is_trivial(self):\n return self.dims == 0", "def is_leaf(self):\n return len(self._children) == 0", "def h(self, node):\n\n return sum(s != g for (s, g) in zip(node.state, self.goal))", "def any_neighbor_burning(self):\n neighbors = self.world.get_four_neighbors(self, Patch.null)\n states = [patch.state for patch in neighbors]\n return \"orange\" in states", "def is_connected(self) -> bool:\n for node in self.nodes.values():\n if node.is_connected:\n return True\n return False", "def test_node_without_neighbors(graph_no_edges):\n assert graph_no_edges.neighbors(99) == {}", "def has_metal(self):\n if self.metal_indices:\n return True\n return False", "def __nonzero__(self):\n return _osgAnimation.mapVertexInfluence___nonzero__(self)", "def has_node(self, ntype, nid):\n return ntype in self._node_index and nid in self._node_index[ntype]", "def is_internal(self):\n return bool(self.is_reducing() and self.is_non_reducing())", "def is_leaf(node):\n return node.children == {}", "def is_leaf(self):\n if len(self.children) == 0: #If the Node has no children, it's a leaf\n return True\n else:\n return False", "def fn(node):\n if not node: return True, 0\n tf0, h0 = fn(node.left)\n tf1, h1 = fn(node.right)\n return tf0 and tf1 and abs(h0-h1) <= 1, 1 + max(h0, h1)", "def findWeakNode(self):\r\n weak = [0, 1] #LIste point le plus faible et nombre éléments connexes si l'on retire ce point\r\n for sommet in range(self.n):\r\n print(f\"Try NODE={sommet}\")\r\n newGraph = self.retireSommet(sommet)\r\n nGpesConnexes = newGraph.countConnexe()\r\n if weak[1] < nGpesConnexes:\r\n weak = [sommet, nGpesConnexes]\r\n return weak[0], weak[1]", "def test_petersen_graph(self):\n G = nx.petersen_graph()\n _, decomp = treewidth_min_degree(G)\n is_tree_decomp(G, decomp)", "def is_island(self):\n return bool(not self.children.exists() and not self.parents.exists())", "def __nonzero__(self):\n return True", "def module_is_avalaible(self):\n return bool(self._get_nodes())", "def out_of_bounds(self):\n return not 0 <= self.nodes[0].x < WIDTH * SCALE or not 0 <= self.nodes[0].y < HEIGHT * SCALE", "def x_overrun(self):\n return (self.status & 0x10) != 0", "def is_eht_off(self):\n raise NotImplementedError", "def test_efficiency_disconnected_nodes(self):\n assert_equal(nx.efficiency(self.G1, 1, 2), 0)", "def inside_itself(self):\n for i in range(2, len(self.nodes)):\n if self.nodes[0] == self.nodes[i]:\n return True\n return False", "def test_petersen_graph(self):\n G = nx.petersen_graph()\n _, decomp = treewidth_min_fill_in(G)\n is_tree_decomp(G, decomp)", "def is_finite(self) -> bool:\n normal = self.to_normal_form()\n di_graph = nx.DiGraph()\n for production in normal.productions:\n body = production.body\n if len(body) == 2:\n di_graph.add_edge(production.head, body[0])\n di_graph.add_edge(production.head, body[1])\n try:\n nx.find_cycle(di_graph, orientation=\"original\")\n except nx.exception.NetworkXNoCycle:\n return True\n return False", "def has_disable_nohz_full_enabled(labels):\n if not labels:\n return False\n\n for label in labels:\n if (label.label_key == helm_common.LABEL_DISABLE_NOHZ_FULL and\n label.label_value):\n return helm_common.LABEL_VALUE_ENABLED == label.label_value.lower()\n\n # We haven't found the nohz_full node key. Return False\n return False", "def countAdjacentFloorNodes(self, x, y):\n\t\treturn self.isFloor(x - 1, y) + self.isFloor(x + 1, y) + self.isFloor(x, y - 1) + self.isFloor(x, y + 1)", "def has_nodes(self) -> bool:\n\n return len(self.dirs) > 0", "def detect_deadlock(self):\n knots = []\n for subgraph in nx.strongly_connected_component_subgraphs(self.digraph):\n nodes = set(subgraph.nodes())\n if len(nodes) == 1:\n n = nodes.pop()\n nodes.add(n)\n if set(self.digraph.successors(n)) == nodes:\n knots.append(subgraph)\n else:\n for n in nodes:\n successors = nx.descendants(self.digraph, n)\n if not successors <= nodes:\n break\n else:\n knots.append(subgraph)\n if len(knots) > 0:\n return True\n return False", "def is_leaf(self, node: object) -> bool:\n if node.left == None and node.right == None:\n return True\n else:\n return False", "def has_neighbour(self, node):\n if node in self.neighbours:\n return True\n return False", "def is_leaf(self) -> bool:\n return not any(self.children)", "def is_internal(self, u):\n return not self.is_leaf(u)" ]
[ "0.62713253", "0.60492986", "0.5942298", "0.5942298", "0.5901078", "0.57642645", "0.5762194", "0.5736317", "0.57134473", "0.5682844", "0.5674507", "0.5665041", "0.56444204", "0.5642746", "0.56300306", "0.55741334", "0.55603236", "0.55570173", "0.55541235", "0.5552279", "0.55295277", "0.55292237", "0.5527793", "0.55263567", "0.55004317", "0.5482618", "0.5472563", "0.5466461", "0.5457714", "0.54350126", "0.5390536", "0.53760445", "0.53708833", "0.5370824", "0.5363699", "0.53621507", "0.5354949", "0.53520125", "0.5351626", "0.53510296", "0.5342477", "0.5341679", "0.53405476", "0.533936", "0.5335492", "0.5327853", "0.5327458", "0.53240484", "0.53213364", "0.53211474", "0.53211474", "0.53211474", "0.53211474", "0.53089577", "0.53027135", "0.5296269", "0.529615", "0.5295896", "0.5291143", "0.5290276", "0.52891356", "0.5286051", "0.52847487", "0.5284345", "0.5281868", "0.5278159", "0.5275963", "0.52724856", "0.5265412", "0.5264677", "0.52644753", "0.5263985", "0.5260284", "0.5256146", "0.52452457", "0.5244283", "0.52424824", "0.5229262", "0.52261716", "0.5219659", "0.52148277", "0.52145094", "0.5213089", "0.5207053", "0.5198505", "0.5195105", "0.518768", "0.5186097", "0.5182388", "0.5179638", "0.5179185", "0.51762205", "0.5171545", "0.5169608", "0.5169608", "0.51634526", "0.5162682", "0.5159458", "0.51586235", "0.51573974" ]
0.68602026
0
Returns the identifier for the key.
def identifier(self): raise NotImplementedError
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_key_id(self):", "def key(key):\n return key", "def key_id(self):\n return self._key_id", "def key(self):\n return str(self._id)", "def key(self) -> str:\n return pulumi.get(self, \"key\")", "def key(self) -> str:\n return pulumi.get(self, \"key\")", "def key(self) -> str:\n return pulumi.get(self, \"key\")", "def key(self) -> str:\n return pulumi.get(self, \"key\")", "def key(self) -> str:\n return pulumi.get(self, \"key\")", "def key(self) -> str:\n return pulumi.get(self, \"key\")", "def key(self) -> str:\n return pulumi.get(self, \"key\")", "def key(self) -> str:\n return pulumi.get(self, \"key\")", "def key(self) -> str:\n return pulumi.get(self, \"key\")", "def key(self) -> str:\n return pulumi.get(self, \"key\")", "def key(self) -> str:\n return pulumi.get(self, \"key\")", "def key(self) -> str:\n return pulumi.get(self, \"key\")", "def key(self) -> str:\n return pulumi.get(self, \"key\")", "def key(self) -> str:\n return pulumi.get(self, \"key\")", "def key(self) -> str:\n return pulumi.get(self, \"key\")", "def key(self) -> str:\n return pulumi.get(self, \"key\")", "def key(self) -> str:\n return pulumi.get(self, \"key\")", "def key(self) -> str:\n return pulumi.get(self, \"key\")", "def key(self) -> str:\n return pulumi.get(self, \"key\")", "def key(self) -> str:\n return pulumi.get(self, \"key\")", "def key(self) -> str:\n return pulumi.get(self, \"key\")", "def key(self) -> str:\n return pulumi.get(self, \"key\")", "def key(self) -> str:\n return pulumi.get(self, \"key\")", "def key(self) -> str:\n return pulumi.get(self, \"key\")", "def key(self) -> str:\n return pulumi.get(self, \"key\")", "def key(self) -> str:\n return pulumi.get(self, \"key\")", "def key(self) -> str:\n return pulumi.get(self, \"key\")", "def key(self) -> str:\n return pulumi.get(self, \"key\")", "def key(self) -> str:\n return pulumi.get(self, \"key\")", "def key(self) -> str:\n return pulumi.get(self, \"key\")", "def key(self) -> str:\n return pulumi.get(self, \"key\")", "def key(self) -> str:\n return pulumi.get(self, \"key\")", "def key(self) -> str:\n return pulumi.get(self, \"key\")", "def key(self) -> str:\n return pulumi.get(self, \"key\")", "def key(self) -> str:\n return pulumi.get(self, \"key\")", "def key(self) -> str:\n return pulumi.get(self, \"key\")", "def key(self) -> str:\n return pulumi.get(self, \"key\")", "def key(self) -> str:\n return pulumi.get(self, \"key\")", "def key(self) -> str:\n return pulumi.get(self, \"key\")", "def key(self) -> str:\n return pulumi.get(self, \"key\")", "def key(self) -> str:\n return pulumi.get(self, \"key\")", "def key(self) -> str:\n return pulumi.get(self, \"key\")", "def key(self) -> str:\n return pulumi.get(self, \"key\")", "def key(self) -> str:\n return pulumi.get(self, \"key\")", "def key(self) -> str:\n return pulumi.get(self, \"key\")", "def key(self) -> str:\n return pulumi.get(self, \"key\")", "def key(self) -> str:\n return pulumi.get(self, \"key\")", "def key(self) -> str:\n return pulumi.get(self, \"key\")", "def key(self) -> str:\n return pulumi.get(self, \"key\")", "def key(self) -> str:\n return pulumi.get(self, \"key\")", "def key(self) -> str:\n return pulumi.get(self, \"key\")", "def key(self) -> str:\n return pulumi.get(self, \"key\")", "def key(self) -> str:\n return pulumi.get(self, \"key\")", "def key(self) -> str:\n return pulumi.get(self, \"key\")", "def key(self) -> str:\n return pulumi.get(self, \"key\")", "def key(self) -> str:\n return pulumi.get(self, \"key\")", "def key(self) -> str:\n return pulumi.get(self, \"key\")", "def key(self) -> str:\n return pulumi.get(self, \"key\")", "def key(self) -> str:\n return pulumi.get(self, \"key\")", "def key_id(self) -> pulumi.Output[int]:\n return pulumi.get(self, \"key_id\")", "def get_key(self):\n return self._determine_key()", "def key(self):\n return self._key if self._key else self.factory().key", "def key(self) -> str:\n return self._key", "def get_key(self):\n return self.key", "def get_key(self):\n return self.key", "def get_key(self) -> int:\n return self.key", "def key(self):\n return self.key_for(self.id)", "def key(self):\n return key_for_name(self.name)", "def key(self) -> str:\n return self.__key", "def key_id(self) -> Optional[pulumi.Input[int]]:\n return pulumi.get(self, \"key_id\")", "def key(self) -> Optional[str]:\n return pulumi.get(self, \"key\")", "def key(self) -> Optional[str]:\n return pulumi.get(self, \"key\")", "def key(self) -> Optional[str]:\n return pulumi.get(self, \"key\")", "def key(self) -> Optional[str]:\n return pulumi.get(self, \"key\")", "def key(self) -> Optional[str]:\n return pulumi.get(self, \"key\")", "def key(self) -> Optional[str]:\n return pulumi.get(self, \"key\")", "def key(self) -> Optional[str]:\n return pulumi.get(self, \"key\")", "def key(self) -> Optional[str]:\n return pulumi.get(self, \"key\")", "def key(self) -> Optional[str]:\n return pulumi.get(self, \"key\")", "def key(self) -> Optional[str]:\n return pulumi.get(self, \"key\")", "def key(self) -> Optional[str]:\n return pulumi.get(self, \"key\")", "def key(self) -> Optional[str]:\n return pulumi.get(self, \"key\")", "def key(self) -> Optional[str]:\n return pulumi.get(self, \"key\")", "def key(self) -> Optional[str]:\n return pulumi.get(self, \"key\")", "def key(self) -> Optional[str]:\n return pulumi.get(self, \"key\")", "def key(self) -> Optional[str]:\n return pulumi.get(self, \"key\")", "def key(self) -> Optional[str]:\n return pulumi.get(self, \"key\")", "def key(self) -> Optional[str]:\n return pulumi.get(self, \"key\")", "def key(self) -> Optional[str]:\n return pulumi.get(self, \"key\")", "def key(self) -> Optional[str]:\n return pulumi.get(self, \"key\")", "def key(self) -> Optional[str]:\n return pulumi.get(self, \"key\")", "def key(self) -> Optional[str]:\n return pulumi.get(self, \"key\")", "def key(self) -> Optional[str]:\n return pulumi.get(self, \"key\")", "def key(self) -> Optional[str]:\n return pulumi.get(self, \"key\")", "def key(self) -> Optional[str]:\n return pulumi.get(self, \"key\")", "def key(self) -> Optional[str]:\n return pulumi.get(self, \"key\")", "def key(self) -> Optional[str]:\n return pulumi.get(self, \"key\")" ]
[ "0.8199289", "0.8123676", "0.79820096", "0.79350626", "0.78921616", "0.78921616", "0.78921616", "0.78921616", "0.78921616", "0.78921616", "0.78921616", "0.78921616", "0.78921616", "0.78921616", "0.78921616", "0.78921616", "0.78921616", "0.78921616", "0.78921616", "0.78921616", "0.78921616", "0.78921616", "0.78921616", "0.78921616", "0.78921616", "0.78921616", "0.78921616", "0.78921616", "0.78921616", "0.78921616", "0.78921616", "0.78921616", "0.78921616", "0.78921616", "0.78921616", "0.78921616", "0.78921616", "0.78921616", "0.78921616", "0.78921616", "0.78921616", "0.78921616", "0.78921616", "0.78921616", "0.78921616", "0.78921616", "0.78921616", "0.78921616", "0.78921616", "0.78921616", "0.78921616", "0.78921616", "0.78921616", "0.78921616", "0.78921616", "0.78921616", "0.78921616", "0.78921616", "0.78921616", "0.78921616", "0.78921616", "0.78921616", "0.78921616", "0.78783184", "0.78268695", "0.76964945", "0.7683737", "0.7658154", "0.7658154", "0.7653101", "0.76459676", "0.7603101", "0.75886965", "0.7551909", "0.7545269", "0.7545269", "0.7545269", "0.7545269", "0.7545269", "0.7545269", "0.7545269", "0.7545269", "0.7545269", "0.7545269", "0.7545269", "0.7545269", "0.7545269", "0.7545269", "0.7545269", "0.7545269", "0.7545269", "0.7545269", "0.7545269", "0.7545269", "0.7545269", "0.7545269", "0.7545269", "0.7545269", "0.7545269", "0.7545269", "0.7545269" ]
0.0
-1
Returns the key's fingerprint, which is the first 4 bytes of its identifier.
def fingerprint(self): return self.identifier[:4]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def fingerprint(public_key):\r\n\r\n return hashlib.new('ripemd160', hashlib.sha256(public_key).digest()).digest()[:4]", "def fingerprint(self, key):\n base64_pub = self.base64_pub_encode(key)\n return SHA256.new(base64_pub.encode('utf-8')).digest()", "def fingerprint(self):\n return self.gpg.list_keys()[0]['fingerprint']", "def fingerprint_key(key):\n try: key = key.public_key()\n except: pass\n\n serialized = key.public_bytes(\n encoding = serialization.Encoding .OpenSSH,\n format = serialization.PublicFormat.OpenSSH)\n\n blob = b64decode(serialized.split(None,2)[1])\n return fingerprint_public_key_blob(blob)", "def fingerprint(self):\n\n if self._fingerprint is None:\n self._fingerprint = _fingerprint(self.asn1, None)\n return self._fingerprint", "def host_key_fingerprint(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"host_key_fingerprint\")", "def fingerprint(self) -> str:\n fp = self.sha256.hex()\n return fp", "def fingerprint(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"fingerprint\")", "def get_fingerprint(self):\n return self.fp", "def getFingerprint(self):\r\n if self.getNumCerts() == 0:\r\n raise AssertionError()\r\n return self.x509List[0].getFingerprint()", "def host_key_fingerprint(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"host_key_fingerprint\")", "def getFingerprint(self):\r\n return b2a_hex(SHA1(self.bytes))", "def get_short_fingerprint(length=6):\n assert 6 <= length <= 32\n #\n return get_fingerprint(md5=True)[-length:]", "def fingerprint(self):\n return self.pod.hash_file(self.pod_path)", "def label_fingerprint(self) -> str:\n return pulumi.get(self, \"label_fingerprint\")", "def label_fingerprint(self) -> str:\n return pulumi.get(self, \"label_fingerprint\")", "def label_fingerprint(self) -> str:\n return pulumi.get(self, \"label_fingerprint\")", "def get_fingerprint(filepath):\n ssh_file = open(filepath, 'r')\n ssh_file_contents = ssh_file.readlines()\n ssh_fingerprint = ''.join(ssh_file_contents).strip()\n\n return ssh_fingerprint", "def key(self):\n return self._key.decode('utf-8')", "def _certificate_fingerprint(identity):\n fingerprint, stderr = _check_output([\n \"openssl\",\n \"x509\",\n \"-inform\",\n \"DER\",\n \"-noout\",\n \"-fingerprint\",\n ],\n inputstr=identity)\n fingerprint = fingerprint.strip()\n fingerprint = fingerprint.replace(\"SHA1 Fingerprint=\", \"\")\n fingerprint = fingerprint.replace(\":\", \"\")\n return fingerprint", "def fingerprint(keyed_data, digest_size=16):\n h = blake2b(digest_size=16)\n for key in sorted(keyed_data.keys()):\n val = keyed_data[key]\n s = json.dumps(val, sort_keys=True, cls=NpEncoder).encode()\n h.update(s)\n return h.hexdigest()", "def fingerprint_from_file(filename):\n cmd = flatten([gnupg_bin(), gnupg_home(), filename])\n outp = stderr_output(cmd).split('\\n')\n if not outp[0].startswith('pub'):\n raise CryptoritoError('probably an invalid gpg key')\n\n return outp[1].strip()", "def _gpg_fingerprints(self) -> List[str]:\n return self._gpg_keys.fingerprints", "def get_fingerprint(md5=False):\n sb = []\n sb.append(p.node())\n sb.append(p.architecture()[0])\n sb.append(p.architecture()[1])\n sb.append(p.machine())\n sb.append(p.processor())\n sb.append(p.system())\n sb.append(str(uuid.getnode())) # MAC address\n text = '#'.join(sb)\n if md5:\n return string_to_md5(text)\n else:\n return text", "def get_fingerprint(self, md='md5'):\n der = self.as_der()\n md = EVP.MessageDigest(md)\n md.update(der)\n digest = md.final()\n return hex(util.octx_to_num(digest))[2:-1].upper()", "def fingerprint(self) -> Text:\n return self.name", "def ssl_fingerprint(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"ssl_fingerprint\")", "def _fingerprint(key_object, load_private_key):\n\n if isinstance(key_object, PrivateKeyInfo):\n key = key_object['private_key'].parsed\n\n if key_object.algorithm == 'rsa':\n to_hash = '%d:%d' % (\n key['modulus'].native,\n key['public_exponent'].native,\n )\n\n elif key_object.algorithm == 'dsa':\n params = key_object['private_key_algorithm']['parameters']\n public_key = Integer(pow(\n params['g'].native,\n key_object['private_key'].parsed.native,\n params['p'].native\n ))\n\n to_hash = '%d:%d:%d:%d' % (\n params['p'].native,\n params['q'].native,\n params['g'].native,\n public_key.native,\n )\n\n elif key_object.algorithm == 'ec':\n public_key = key['public_key'].native\n if public_key is None:\n # This is gross, but since the EC public key is optional,\n # and we need to load the private key and use the crypto lib\n # to get the public key, we have to import the platform-specific\n # asymmetric implementation. This is the reason a bunch of the\n # imports are module imports, so we don't get an import cycle.\n public_key_object = load_private_key(key_object).public_key\n public_key = public_key_object.asn1['public_key'].parsed.native\n\n to_hash = '%s:' % key_object.curve[1]\n to_hash = to_hash.encode('utf-8')\n to_hash += public_key\n\n if isinstance(to_hash, str_cls):\n to_hash = to_hash.encode('utf-8')\n\n return hashlib.sha256(to_hash).digest()\n\n if isinstance(key_object, PublicKeyInfo):\n if key_object.algorithm == 'rsa':\n key = key_object['public_key'].parsed\n\n to_hash = '%d:%d' % (\n key['modulus'].native,\n key['public_exponent'].native,\n )\n\n elif key_object.algorithm == 'dsa':\n key = key_object['public_key'].parsed\n params = key_object['algorithm']['parameters']\n\n to_hash = '%d:%d:%d:%d' % (\n params['p'].native,\n params['q'].native,\n params['g'].native,\n key.native,\n )\n\n elif key_object.algorithm == 'ec':\n public_key = key_object['public_key'].native\n\n to_hash = '%s:' % key_object.curve[1]\n to_hash = to_hash.encode('utf-8')\n to_hash += public_key\n\n if isinstance(to_hash, str_cls):\n to_hash = to_hash.encode('utf-8')\n\n return hashlib.sha256(to_hash).digest()\n\n raise ValueError(pretty_message(\n '''\n key_object must be an instance of the\n asn1crypto.keys.PrivateKeyInfo or asn1crypto.keys.PublicKeyInfo\n classes, not %s\n ''',\n type_name(key_object)\n ))", "def key_pair_finger_print(self) -> str:\n return pulumi.get(self, \"key_pair_finger_print\")", "def _load_fingerprint(self):\n path = os.path.join(self._cache_path, '%s.fingerprint' % self._name)\n\n if not os.path.exists(path):\n return None\n\n with open(path) as f:\n fingerprint = f.read()\n\n return fingerprint", "def get_public_key_fingerprint(curve: object, temp_public_key: object) \\\n -> object:\n\n vk = VerifyingKey.from_string(bytes.fromhex(temp_public_key), curve=curve)\n\n uncompressed_pub_key = vk.to_string('uncompressed')\n\n pub_key_hash_fingerprint = hashlib.sha256(uncompressed_pub_key)\n\n return pub_key_hash_fingerprint.hexdigest()", "def calculate_key_signature(public_key: str) -> str:\n rsa_obj = RSA.import_key(public_key)\n rsa_der = rsa_obj.export_key(\"DER\")\n\n hasher = SHA1.new()\n hasher.update(rsa_der)\n fingerprint = base64url_encode(hasher.digest())\n\n return fingerprint.decode(\"utf8\")", "def fingerprint_public_key_blob(blob):\n hash = sha256(blob).digest()\n encoded = b64encode(hash).decode('UTF-8').rstrip('=')\n return 'SHA256:{}'.format(encoded)", "def fingerprint(self, fingerprint_hash=None):\n try:\n fd, name = tempfile.mkstemp(prefix='sshkey-')\n with open(name, 'w') as fd:\n fd.write('{}'.format(self.line))\n if fingerprint_hash:\n p = Popen(('ssh-keygen', '-E', fingerprint_hash, '-lf', name), stdin=PIPE, stdout=PIPE, stderr=PIPE)\n else:\n p = Popen(('ssh-keygen', '-lf', name), stdin=PIPE, stdout=PIPE, stderr=PIPE)\n stdout, stderr = [str(v, 'utf-8') for v in p.communicate()]\n if p.returncode != 0:\n raise SSHKeyError('Error running ssh-keygen: returns {}'.format(p.returncode))\n os.unlink(name)\n return stdout.rstrip().split()[1].split(':', 1)[1]\n except Exception as e:\n raise SSHKeyError('Error getting fingerprint for {}: {}'.format(self.line, e))", "def fingerprint_from_keybase(fingerprint, kb_obj):\n if 'public_keys' in kb_obj and \\\n 'pgp_public_keys' in kb_obj['public_keys']:\n for key in kb_obj['public_keys']['pgp_public_keys']:\n keyprint = fingerprint_from_var(key).lower()\n fingerprint = fingerprint.lower()\n if fingerprint == keyprint or \\\n keyprint.startswith(fingerprint) or \\\n keyprint.endswith(fingerprint):\n return {\n 'fingerprint': keyprint,\n 'bundle': key\n }\n\n return None", "def fingerprint(publicKeyN, publicKeyE=65537L):\n asn1Str = encoder.encode(univ.Sequence().setComponentByPosition(0, univ.Integer(publicKeyN)).setComponentByPosition(1, univ.Integer(publicKeyE)))\n hashString = hashlib.sha1(asn1Str).digest()\n hexlifiedHash = binascii.hexlify(hashString)\n return hexlifiedHash.upper()", "def convert_to_fingerprint(s):\n\n\ttry:\n\t\t# Convert SMILES to Molecule object\n\t\tmolecule = Chem.MolFromSmiles(s)\n\t\t# Get MACCS Key from Molecule object\n\t\tmaccs_key = MACCSkeys.GenMACCSKeys(molecule)\n\t\treturn maccs_key.ToBitString()\n\texcept:\n\t\treturn None", "def fingerprint_from_var(var):\n vsn = gpg_version()\n cmd = flatten([gnupg_bin(), gnupg_home()])\n if vsn[0] >= 2 and vsn[1] < 1:\n cmd.append(\"--with-fingerprint\")\n\n output = polite_string(stderr_with_input(cmd, var)).split('\\n')\n if not output[0].startswith('pub'):\n raise CryptoritoError('probably an invalid gpg key')\n\n if vsn[0] >= 2 and vsn[1] < 1:\n return output[1] \\\n .split('=')[1] \\\n .replace(' ', '')\n\n return output[1].strip()", "def raw_key(self) -> bytes:\n return bytes(self.data_bytes[ProofPath._Positions.KEY_POS : ProofPath._Positions.KEY_POS + KEY_SIZE])", "def get_key_id(self):\n jwk_data = {\n \"crv\": \"P-256\",\n \"kty\": \"EC\",\n \"x\": base64.urlsafe_b64encode(self.public_key_obj.public_numbers().x.to_bytes(32, \"big\")).decode().replace(\"=\", \"\"),\n \"y\": base64.urlsafe_b64encode(self.public_key_obj.public_numbers().y.to_bytes(32, \"big\")).decode().replace(\"=\", \"\")\n }\n jwk = json.dumps(jwk_data, separators=(',', ':'))\n return hashlib.sha256(jwk.encode()).digest()", "def hash_str(self):\n return '___'.join([self.key.kind(), self.key.string_id(),\n self._Hash()])", "def _fingerprint(self):\n hasher = hashlib.md5()\n source = inspect.getsource(self._func)\n hasher.update(source.encode('utf-8'))\n\n return hasher.hexdigest()", "def get_fingerprint():\n print('Requesting fingerprint...')\n config = {\n 'host': os.environ.get('ACR_HOST'),\n 'access_key': os.environ.get('ACR_ACCESS_KEY'), \n 'access_secret': os.environ.get('ACR_ACCESS_SECRET'),\n 'timeout': 10\n }\n recognizer = ACRCloudRecognizer(config)\n mp3_path = helpers.get_mp3_output_path()\n start_seconds = 0\n rec_length = helpers.get_recording_length_seconds()\n result = json.loads(recognizer.recognize_by_file(mp3_path, start_seconds, rec_length))\n save_fingerprint_result_to_file(result)\n if int(result['status']['code']) == 0:\n try:\n song = {\n 'name': get_song_name_from_result(result),\n 'id': get_spotify_id_from_result(result),\n 'artist': get_first_artist_from_result(result),\n 'seconds_remaining': get_song_seconds_remaining(result),\n 'percent_remaining': get_song_percent_remaining(result),\n }\n print('Song Found: {}'.format(song['name']))\n return song\n except KeyError:\n pass\n return None", "def _get_kid(message) -> str:\n if KID in message.phdr.keys():\n return base64.b64encode(message.phdr[KID]).decode(\"UTF-8\")\n return base64.b64encode(message.uhdr[KID]).decode(\"UTF-8\")", "def ssl_fingerprint(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"ssl_fingerprint\")", "def id(self):\n\n return sha256(self.pub.export()).digest()", "def get_fingerprint_info_ext(fingerprint):\n extension = _FINGERPRINT_NOT_FOUND % fingerprint\n return extension", "def _GetServerKey(self, peer_id):\n return hashlib.sha224(peer_id + self.network_id).hexdigest()", "def gen_Fernet_key():\n\tkey = Fernet.generate_key()\n\treturn key", "def public_key(self):\n keyfile = self._get_field('System', 'keyfile')\n return join(self.key_path, keyfile)", "def generate_key(self):\n return str(uuid4())", "def get_public_key(self, uid: str) -> str:\n return self.context.get(\n \"/dsum/public_key/%s\" % uid, None, \"DSum: failed retrieving the Curve 25519 private key with uid: %s\" % uid)['key']", "def get_signature(self):\n\n\t\treturn self.boot_sector_data[3 : 11]", "def key(self):\n return str(self._id)", "def fnv1(self, key):\n # hash = 0xff\n hash = 0xcbf29ce484222325\n for n in key.encode():\n # print(n)\n hash = hash ^ n\n hash = hash * 0x100000001b3\n\n # print(hash)\n return hash", "def get_key_id(self):", "def _Hash(self):\n fullhash = util.Hash(util.IntToBytes(len(self.key_bytes)), self.key_bytes)\n return util.Encode(fullhash[:keyczar.KEY_HASH_SIZE])", "def get_fingerprints(self, jid: JID) -> List[str]:\n return []", "def _Hash(self):\n fullhash = util.PrefixHash(self.key_bytes)\n return util.Base64WSEncode(fullhash[:constants.KEY_HASH_SIZE])", "def get_file_key(fname, hash_length=HASH_LENGTH, size_length=SIZE_LENGTH):\n fhash = get_file_hash(fname, hash_length)\n fsize = get_file_size(fname, size_length)\n return fhash + fsize", "def uuid(self):\n try:\n return self.keystore['id']\n except KeyError:\n return None", "def key(self):\n key = self.name\n if self.task:\n key += getsource(self.task.__class__)\n return hashlib.md5(key.encode()).hexdigest()", "def _get_raw_key(self, key_id):", "def key_id(key, origin=None):\n\n rdata = _to_rdata(key, origin)\n if key.algorithm == RSAMD5:\n return (rdata[-3] << 8) + rdata[-2]\n else:\n total = 0\n for i in range(len(rdata) // 2):\n total += (rdata[2 * i] << 8) + rdata[2 * i + 1]\n if len(rdata) % 2 != 0:\n total += rdata[len(rdata) - 1] << 8\n total += (total >> 16) & 0xFFFF\n return total & 0xFFFF", "def get_ssh_fingerprint(request, ip_address):\n try:\n ssh_fingerprint = usm_wrapper_utils.get_host_ssh_key(ip_address)\n except Exception, e:\n log.exception(e)\n return Response(\n {'message': 'Error while getting fingerprint'}, status=417)\n\n return Response({'ssh_key_fingerprint': ssh_fingerprint[0]}, status=200)", "def key_id(self):\n return self._key_id", "def public_key(self):\n return f'PublicKey = {self._peer.public_key}'", "def _get_key(self):\n if not self.session:\n key = self.key\n else:\n key = self.session.get(\"_signature_key\")\n if key is None:\n key = str(uuid.uuid1())\n self.session[\"_signature_key\"] = key\n return key", "def key(self):\n return self.account_name()", "def key(self):\n return self.account_name()", "def get_key(self):\n return self._determine_key()", "def key_for_signature(self, data, sig):\n verification = self.verify(data, sig)\n return PublicKey.objects.filter(\n fingerprint=verification.fingerprint,\n profile__verified=True,\n ).first()", "def get_public_key(self):\n return self.private_key.get_verifying_key()", "def get(self):\n return str(self.physical_key)", "def fingerprint(path, fs_options={}, fs=None):\n fs, path = parse(path, fs_options, fs=fs)\n path = stringyfy(path)\n if fs is None:\n mtime = os.path.getmtime(path)\n size = os.path.getsize(path)\n else:\n info = fs.get_file_info([path])[0]\n mtime = info.mtime_ns\n size = info.size\n import vaex.cache\n return vaex.cache.fingerprint(('file', (path, mtime, size)))", "def format_fingerprint(fpr):\n count = 0\n fingerprint = ''\n chunks = [i for i in re.split('([A-F0-9]{4})', fpr) if i]\n for chunk in chunks:\n count += 1\n fingerprint += ' %s' % chunk\n if count == len(chunks)/2:\n fingerprint += ' '\n\n return fingerprint.strip()", "def key(self) -> str:\n return pulumi.get(self, \"key\")", "def key(self) -> str:\n return pulumi.get(self, \"key\")", "def key(self) -> str:\n return pulumi.get(self, \"key\")", "def key(self) -> str:\n return pulumi.get(self, \"key\")", "def key(self) -> str:\n return pulumi.get(self, \"key\")", "def key(self) -> str:\n return pulumi.get(self, \"key\")", "def key(self) -> str:\n return pulumi.get(self, \"key\")", "def key(self) -> str:\n return pulumi.get(self, \"key\")", "def key(self) -> str:\n return pulumi.get(self, \"key\")", "def key(self) -> str:\n return pulumi.get(self, \"key\")", "def key(self) -> str:\n return pulumi.get(self, \"key\")", "def key(self) -> str:\n return pulumi.get(self, \"key\")", "def key(self) -> str:\n return pulumi.get(self, \"key\")", "def key(self) -> str:\n return pulumi.get(self, \"key\")", "def key(self) -> str:\n return pulumi.get(self, \"key\")", "def key(self) -> str:\n return pulumi.get(self, \"key\")", "def key(self) -> str:\n return pulumi.get(self, \"key\")", "def key(self) -> str:\n return pulumi.get(self, \"key\")", "def key(self) -> str:\n return pulumi.get(self, \"key\")", "def key(self) -> str:\n return pulumi.get(self, \"key\")", "def key(self) -> str:\n return pulumi.get(self, \"key\")", "def key(self) -> str:\n return pulumi.get(self, \"key\")", "def key(self) -> str:\n return pulumi.get(self, \"key\")", "def key(self) -> str:\n return pulumi.get(self, \"key\")" ]
[ "0.77391666", "0.76859707", "0.7634195", "0.7514036", "0.7471615", "0.7366445", "0.7218066", "0.71334374", "0.70777875", "0.7041138", "0.68841106", "0.68285894", "0.6695113", "0.6680278", "0.66001993", "0.66001993", "0.66001993", "0.65500057", "0.6528393", "0.6461605", "0.6425575", "0.6385517", "0.63824254", "0.63767177", "0.6375026", "0.6371168", "0.63486654", "0.6319337", "0.63149935", "0.6297245", "0.6273641", "0.623665", "0.6235725", "0.6220784", "0.62070686", "0.6125547", "0.61109805", "0.60615593", "0.6038407", "0.6010458", "0.5997788", "0.59686655", "0.5950095", "0.5931207", "0.59170157", "0.58953804", "0.5892806", "0.5892587", "0.58786905", "0.5876926", "0.58720034", "0.58619374", "0.5856066", "0.58331335", "0.5830695", "0.58234197", "0.5818578", "0.5814307", "0.5799953", "0.57967216", "0.57899266", "0.578457", "0.5768932", "0.57546794", "0.57518667", "0.57289225", "0.5721569", "0.5710053", "0.5706586", "0.5706586", "0.56940603", "0.5693364", "0.56792456", "0.5672814", "0.5657632", "0.5636847", "0.56367415", "0.56367415", "0.56367415", "0.56367415", "0.56367415", "0.56367415", "0.56367415", "0.56367415", "0.56367415", "0.56367415", "0.56367415", "0.56367415", "0.56367415", "0.56367415", "0.56367415", "0.56367415", "0.56367415", "0.56367415", "0.56367415", "0.56367415", "0.56367415", "0.56367415", "0.56367415", "0.56367415" ]
0.7981021
0
Generates a Base58Check encoding of this key.
def to_b58check(self, testnet=False): b = self.testnet_bytes if testnet else bytes(self) return base58.b58encode_check(b)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def to_base58(self) -> str:\n return base58.b58encode(self.raw).decode('utf-8')", "def to_base58(self) -> str:\n return base58.b58encode(self.raw).decode('utf-8')", "def __str__(self):\n return gphBase58CheckEncode(self._hex)", "def forge_base58(value: str) -> bytes:\n return base58_decode(value.encode())", "def b2a_base58check(data):\n\n return encoding.b2a_hashed_base58(data)", "def encode_base58(v):\n prefix = ''\n while v.startswith(b'\\0'):\n prefix += B58[0]\n v = v[1:]\n if v:\n return prefix + \"\".join(map(B58.__getitem__,changebase(v,256,58)))\n else:\n return prefix", "def b2a_hashed_base58(data):\n return b2a_base58(data + double_sha256(data)[:4])", "def b58encode(v):\n _b58chars = '123456789ABCDEFGHJKLMNPQRSTUVWXYZabcdefghijkmnopqrstuvwxyz'\n _b58base = len(_b58chars)\n\n #(c style int->base256)\n long_value = 0L\n for (i, c) in enumerate(v[::-1]):\n long_value += (256**i) * ord(c)\n result = ''\n while long_value >= _b58base:\n div, mod = divmod(long_value, _b58base)\n result = _b58chars[mod] + result\n long_value = div\n result = _b58chars[long_value] + result\n zeropad = 0\n for c in v:\n if c == '\\x00':\n zeropad += 1\n else:\n break\n return '1'*zeropad + result", "def base58_encode(v: bytes, prefix: bytes) -> bytes:\n try:\n encoding = next(\n encoding\n for encoding in base58_encodings\n if len(v) == encoding[3] and prefix == encoding[0]\n )\n except StopIteration:\n raise ValueError('Invalid encoding, prefix or length mismatch.')\n\n return base58.b58encode_check(encoding[2] + v)", "def encodeBase58(b):\n\n # Convert big-endian bytes to integer\n n = int('0x0' + hexlify(b).decode('utf8'), 16)\n\n # Divide that integer into bas58\n res = []\n while n > 0:\n n, r = divmod(n, 58)\n res.append(B58_DIGITS[r])\n res = ''.join(res[::-1])\n\n # Encode leading zeros as base58 zeros\n czero = b'\\x00'\n if sys.version > '3':\n # In Python3 indexing a bytes returns numbers, not characters.\n czero = 0\n pad = 0\n for c in b:\n if c == czero:\n pad += 1\n else:\n break\n return B58_DIGITS[0] * pad + res", "def o_b58(r160, magicbyte=0):\n from re import match as re_match\n inp_fmtd = chr(int(magicbyte)) + r160\n leadingzbytes = len(re_match('^\\x00*', inp_fmtd).group(0))\n checksum = hashlib.sha256(hashlib.sha256(inp_fmtd).digest()).digest()[:4]\n return '1' * leadingzbytes + encode(decode(inp_fmtd + checksum, 256), 58, 0)", "def b58encode(v):\n\n long_value = 0\n for (i, c) in enumerate(v[::-1]):\n long_value += (256 ** i) * int(c)\n\n result = ''\n while long_value >= b58_base:\n div, mod = divmod(long_value, b58_base)\n result = b58_chars[mod] + result\n long_value = div\n result = b58_chars[long_value] + result\n\n # Bitcoin does a little leading-zero-compression:\n # leading 0-bytes in the input become leading-1s\n nPad = 0\n for c in v:\n if c == 0:\n nPad += 1\n else:\n break\n\n return (b58_chars[0] * nPad) + result", "def b58encode(v):\r\n\r\n long_value = 0L\r\n for (i, c) in enumerate(v[::-1]):\r\n long_value += (256**i) * ord(c)\r\n\r\n result = ''\r\n while long_value >= __b58base:\r\n div, mod = divmod(long_value, __b58base)\r\n result = __b58chars[mod] + result\r\n long_value = div\r\n result = __b58chars[long_value] + result\r\n\r\n # Bitcoin does a little leading-zero-compression:\r\n # leading 0-bytes in the input become leading-1s\r\n nPad = 0\r\n for c in v:\r\n if c == '\\0': nPad += 1\r\n else: break\r\n\r\n return (__b58chars[0]*nPad) + result", "def b2a_base58(s):\n v, prefix = to_long(256, lambda x: x, iterbytes(s))\n s = from_long(v, prefix, BASE58_BASE, lambda v: BASE58_ALPHABET[v])\n return s.decode(\"utf8\")", "def generate_key(self)->bytes:\n return os.urandom(32)", "def base58_encode(num, alphabet=ALPHABET):\n if (num == 0):\n return alphabet[0]\n arr = []\n base = len(alphabet)\n while num:\n rem = num % base\n num = num // base\n arr.append(alphabet[rem])\n arr.reverse()\n return ''.join(arr)", "def encode_base58(bytestring):\n # Count zero's\n zeros = 0\n for i in range(len(bytestring)):\n if bytestring[i] == 0:\n zeros += 1\n else:\n break\n\n n = int.from_bytes(bytestring, 'big')\n\n result = ''\n (n, rest) = divmod(n, 58)\n while n or rest:\n result += digits58[rest]\n (n, rest) = divmod(n, 58)\n return zeros * '1' + result[::-1] # reverse string", "def a2b_base58check(string):\n\n try:\n return encoding.a2b_hashed_base58(string)\n except encoding.EncodingError:\n raise Error('Invalid base58check string')", "def encode_base58(bytestring):\n # Count zero's\n zeros = 0\n for i in range(len(bytestring)):\n if bytestring[i] == 0:\n zeros += 1\n else:\n break\n try:\n n = int.from_bytes(bytestring, 'big')\n except AttributeError:\n # Python version < 3.2\n n = _bytes_to_long(bytestring, 'big')\n result = ''\n (n, rest) = divmod(n, 58)\n while n or rest:\n result += digits58[rest]\n (n, rest) = divmod(n, 58)\n return zeros * '1' + result[::-1] # reverse string", "def bip38(priv, passphrase, quiet=False):\n prnt('\\nCalculating encrypted private key...\\n', quiet)\n addr = o_pub_to_addr(o_priv_to_pub(priv))\n#1 Compute the Bitcoin address (ASCII), and take the first four bytes of SHA256(SHA256()) of it.\n addrhash = hashlib.sha256(hashlib.sha256(addr).digest()).digest()[:4] # salt\n\n#2. Derive a key from the passphrase using scrypt\n# a. Parameters: passphrase is the passphrase itself encoded in UTF-8.\n# addresshash came from the earlier step, n=16384, r=8, p=8, length=64\n# (n, r, p are provisional and subject to consensus)\n# b. Let's split the resulting 64 bytes in half, and call them derivedhalf1 and derivedhalf2.\n # scrypt(password, salt, n, r, p, buflen):\n scryptedkey = scrypt(passphrase, addrhash, 16384, 8, 8, 64, quiet)\n half1 = scryptedkey[0:32]\n half2 = scryptedkey[32:64]\n\n#3 AES encryptedhalf1 = AES256Encrypt(bitcoinprivkey[0...15] xor derivedhalf1[0...15], derivedhalf2)\n priv256 = encode(priv, 256, 32)\n aes4b38 = Aes(half2) # set AES object key\n ehalf1 = aes4b38.enc(sxor(priv256[:16], half1[:16]))\n\n#4 AES encryptedhalf2 = AES256Encrypt(bitcoinprivkey[16...31] xor derivedhalf1[16...31], derivedhalf2)\n ehalf2 = aes4b38.enc(sxor(priv256[16:32], half1[16:32]))\n\n#5 Base58 ( 0x01 0x42 + flagbyte + salt + encryptedhalf1 + encryptedhalf2 )\n fbyte = chr(0b11100000) # 11 noec 1 compressedpub 00 future 0 ec only 00 future\n encrypted_privkey = ('\\x01\\x42' + fbyte + addrhash + ehalf1 + ehalf2)\n encrypted_check = hashlib.sha256(hashlib.sha256(encrypted_privkey).digest()).digest()[:4]\n return b58encode(encrypted_privkey + encrypted_check)", "def from_b58check(key):\n return HDKey.from_bytes(base58.b58decode_check(key))[0]", "def forge_public_key(value) -> bytes:\n prefix = value[:4]\n res = base58.b58decode_check(value)[4:]\n\n if prefix == 'edpk':\n return b'\\x00' + res\n elif prefix == 'sppk':\n return b'\\x01' + res\n elif prefix == 'p2pk':\n return b'\\x02' + res\n\n raise ValueError(f'Unrecognized key type: #{prefix}')", "def a2b_base58(s):\n v, prefix = to_long(BASE58_BASE, lambda c: BASE58_LOOKUP[c], s.encode(\"utf8\"))\n return from_long(v, prefix, 256, lambda x: x)", "def gen_key():\n key = os.urandom(32) # 256 bit\n return base64.b64encode(key).rstrip('=') # strip off padding", "def raw(self) -> bytes:\n return bytes(self._verify_key)", "def toHashable(self) -> str:\r\n\r\n return self.toHashBase().encode('utf-8')", "def generate_hash(self):\n if not self.public_key:\n raise ValueError('Requires a public publicKey')\n return self.public_key.encode(encoding='bytes')", "def generate_aes_key ( ) :\n import hashlib\n sr = Crypto.Random.random.StrongRandom( )\n key_bits = sr.getrandbits( 256 )\n sha_key = hashlib.sha256( str( key_bits ) ).digest( )\n return sha_key", "def hash(self) -> bytes:", "def a2b_hashed_base58(s):\n data = a2b_base58(s)\n data, the_hash = data[:-4], data[-4:]\n if double_sha256(data)[:4] == the_hash:\n return data\n raise EncodingError(\"hashed base58 has bad checksum %s\" % s)", "def generate_key():\n\tkey = [ randint(0,255) for i in range(16) ]\n\treturn bytes( key )", "def decode_base58(v):\n prefix = b''\n while v.startswith(B58[0]):\n prefix += b'\\0' \n v = v[1:]\n if v:\n return prefix + bytes(changebase(map(B58.index,v),58,256))\n else:\n return prefix", "def generate_key():\n return unicode(hashlib.sha224(str(random.getrandbits(128))).hexdigest())", "def info_hash_base32(self):\n if getattr(self, '_data', None):\n return b32encode(sha1(bencode(self._data['info'])).digest())\n else:\n raise exceptions.TorrentNotGeneratedException", "def encode_public_key(value: PublicKey) -> bytes:\n return bytes([value.algo.value]) + value.pbk", "def rawPubkey(self):\n # note the first byte determines what type of address\n # and the last four are checksums\n return a2b_base58(self.pubkey)[1:-4]", "def create_hash(self):\n return os.urandom(32).encode('hex')", "def _produce_key(self, passphrase):\n from hashlib import sha256\n pp = bytes(passphrase, 'utf-8')\n hash_alg = sha256(pp)\n for i in range(self._get_key_stretches()):\n d = hash_alg.digest()\n hash_alg.update(d + pp)\n return hash_alg.digest()", "def bcur_encode(data):\n cbor = cbor_encode(data)\n enc = bc32encode(cbor)\n h = hashlib.sha256(cbor).digest()\n enc_hash = bc32encode(h)\n return enc, enc_hash", "def _get_encode_random(self):\n return os.urandom(16).encode('hex')", "def __init__(self, key):\n self.bs = 16\n self.key = hashlib.sha256(key.encode()).digest()", "def encode(b):\n\n # Convert big-endian bytes to integer\n n = int(b, 16)\n\n # Divide that integer into bas58\n res = []\n while n > 0:\n n, r = divmod (n, 58)\n res.append(b58_digits[r])\n res = ''.join(res[::-1])\n\n # Encode leading zeros as base58 zeros\n import sys\n czero = b'\\x00'\n if sys.version > '3':\n # In Python3 indexing a bytes returns numbers, not characters.\n czero = 0\n pad = 0\n for c in b:\n if c == czero: pad += 1\n else: break\n return b58_digits[0] * pad + res", "def test_encoder(self):\n from sosbeacon.utils import number_encode\n\n number = 123\n encoded = number_encode(number)\n self.assertEqual(encoded, 'b6')", "def from_base58(cls, address: str) -> 'PublicKey':\n return cls(base58.b58decode(address))", "def b58xpub(parent_fingerprint, public_key, chain, depth, childnr):\r\n\r\n raw = (\r\n b'\\x04\\x88\\xb2\\x1e' +\r\n bytes(chr(depth), 'utf-8') +\r\n parent_fingerprint +\r\n childnr.to_bytes(4, byteorder='big') +\r\n chain +\r\n public_key)\r\n\r\n return b58encode_check(raw)", "def generate_signing_keys():\n return SigningKey.generate(curve=SECP256k1)", "def generate(self):\n if self.curvetype == KeyType.ECDSA_P256v1:\n self.private_key_obj = ec.generate_private_key(ec.SECP256R1(), default_backend())\n elif self.curvetype == KeyType.ECDSA_SECP256k1:\n self.private_key_obj = ec.generate_private_key(ec.SECP256K1(), default_backend())\n self.public_key_obj = self.private_key_obj.public_key()\n self._get_naive_private_key_bytes()\n self._get_naive_public_key_bytes()", "def generate_private_key():\n\treturn binascii.hexlify(os.urandom(32)).decode('utf-8').upper()", "def generate_key():\n return get_random_bytes(KEY_SIZE)", "def hashBase(self) -> str:\r\n\r\n return self.__hash_base", "def generate(self, force=False):\n raise NotImplementedError(\n 'Cannot generate Key of unknown algorithm type. Use a subclass.', self\n )", "def create_key ():", "def _Hash(self):\n fullhash = util.Hash(util.IntToBytes(len(self.key_bytes)), self.key_bytes)\n return util.Encode(fullhash[:keyczar.KEY_HASH_SIZE])", "def hasher(cls, data, base_64_encode=False):\n salt_key = cls.base_configuration().get_section_option('app', 'key')\n salt_key = base64.b64decode(salt_key)\n result = hashpw(data.encode('utf-8'), salt_key)\n return base64.b64encode(result) if base_64_encode else result", "def main():\n key, plain = get_key_plain()\n encode(key, plain)", "def _Hash(self):\n fullhash = util.PrefixHash(self.key_bytes)\n return util.Base64WSEncode(fullhash[:constants.KEY_HASH_SIZE])", "def hash(self) -> str:\r\n ...", "def base64_pub_encode(self, key):\n (y, g, p, q) = (str(key.y), str(key.g), str(key.p), str(key.q))\n return base64.b64encode((y + \",\" + g + \",\" + p + \",\" + q).encode('utf-8')).decode('utf-8')", "def generate_random_key():\n return '%030x' % (random.randrange(256**15),)", "def genkey(value, length = 8):\n if not isinstance(value, str):\n raise ValueError('Expected `value` to be `str`.')\n\n return blake2b(value.encode('utf-8'), digest_size=4).hexdigest()", "def bech32_encode(hrp, data):\n combined = data + bech32_create_checksum(hrp, data)\n return hrp + '1' + ''.join([CHARSET[d] for d in combined])", "def b58xprv(parent_fingerprint, private_key, chain, depth, childnr):\r\n\r\n raw = (\r\n b'\\x04\\x88\\xad\\xe4' +\r\n bytes(chr(depth), 'utf-8') +\r\n parent_fingerprint +\r\n childnr.to_bytes(4, byteorder='big') +\r\n chain +\r\n b'\\x00' +\r\n private_key)\r\n\r\n return b58encode_check(raw)", "def bin_encode(input, errors='strict'):\n output = \"\"\n for c in input:\n l = '{0:0>8b}'.format(ord(c))\n output += ''.join(l)\n return (output, len(input))", "def generate_key(random=random.SystemRandom()):\n poly = 0\n while not is_acceptable_multiplier(poly):\n poly = random.getrandbits(61)\n oh = []\n for _ in range(2 * BLOCK_SIZE + TWISTING_COUNT):\n u64 = None\n while u64 is None or u64 in oh:\n u64 = random.getrandbits(64)\n oh.append(u64)\n return UmashKey(poly, oh)", "def test_consistent_encoding_128(self):\n text = u\"abracadabra\" # pylint: disable=redundant-u-string-prefix\n self.assertEqual(\n CityHash128WithSeed(text), CityHash128WithSeed(text.encode(\"utf-8\"))\n )", "def encode(key, value, ber_length=0):\n return bytearray(key) + encode_ber(len(value), ber_length) + bytearray(value)", "def is_hashed_base58_valid(base58):\n try:\n a2b_hashed_base58(base58)\n except EncodingError:\n return False\n return True", "def hash_generator(self, value):\n hash_string = hashlib.sha256(bytes(value))\n return hash_string.hexdigest()", "def encode(self) -> bytes:\n\n return bytes()", "def encode(self) -> bytes:\n\n return bytes()", "def _generate_account_hash (self, account):\n return urlsafe_b64encode(account['email'])", "def as_bytes_compressed(self) -> bytes:\n bits_len = self.end()\n whole_bytes_len = div_ceil(bits_len, 8)\n\n key = self.raw_key()[0:whole_bytes_len]\n\n result = bytearray()\n result += leb128_encode_unsigned(bits_len)\n result += key\n\n # Trim insignificant bits in the last byte:\n bits_in_last_byte = bits_len % 8\n if whole_bytes_len > 0 and bits_in_last_byte != 0:\n tail = self.end() % 8\n result[-1] = reset_bits(result[-1], tail)\n\n return bytes(result)", "def newKeyGenerate():\n generate()\n return '', 204", "def rc4_encode(data, key, encode=base64.b64encode, salt_length=16):\n salt = ''\n for n in range(salt_length):\n salt += chr(random.randrange(256))\n data = salt + crypt(data, sha1(key + salt).digest())\n if encode:\n data = encode(data)\n return data", "def generate_key(self, size):\n key = bytearray()\n for i in range(0,size):\n random_byte = ord(os.urandom(1))\n key.append(random_byte)\n return key", "def _hash_encoder(data: bytes) -> str:\n return base64.urlsafe_b64encode(data).rstrip(b\"=\").decode('ascii')", "def _basehash(self):\n if self.baseseed is not None:\n hashinput = (str(self.baseseed) + ',').encode()\n self.basehash = hashlib.sha256(hashinput)\n else:\n self.basehash = None", "def get_encoded(self):\n return self.key", "def encode_bit_str(self, input):\n byte_len = ceil(len(input) / 8)\n byte_ouput = int(input, 2).to_bytes(byte_len, \"big\")\n return byte_ouput", "def decode_base58(smartAddress, length):\n n = 0\n for char in smartAddress:\n try:\n n = n * 58 + digits58.index(char)\n except:\n msg = u\"Character not part of SmartCashs's base58: '%s'\"\n raise ValueError(msg % (char,))\n\n return n.to_bytes(length, 'big')", "def create_crypt_key():\n\n crypt_key = Fernet.generate_key() # key is type = bytes\n\n crypt_query = 'INSERT INTO Crypt (crypt_key) VALUES (%s)'\n my_cursor.execute(crypt_query, (crypt_key,))\n pw_db.commit()", "def make_hash(self):\n timestamp = str(int(round(time.time()*1000)))\n auth = b64encode(config.username) + ':' \\\n + b64encode(config.password) + ':' \\\n + b64encode(timestamp)\n rsa = RSA.load_pub_key(config.public_key)\n encrypted_auth = rsa.public_encrypt(auth, RSA.pkcs1_padding)\n key = b64encode(encrypted_auth)\n return key", "def generate_symmetric_key():\n return Fernet.generate_key()", "def generate_key(*args, **kwargs):\n return hashlib.md5(generate_str_key(**kwargs).encode()).hexdigest()", "def generate_key():\n key = crypto.Key.generate_key()\n click.echo('Private Key (len {}):: \\n{}'.format(\n len(key.get_privkey()),\n hexlify(key.get_privkey())))\n click.echo('Public Key (len {})::\\n{}'.format(\n len(key.get_pubkey()),\n hexlify(key.get_pubkey())))", "def from_base58(cls, seed: str) -> 'PrivateKey':\n return cls(base58.b58decode(seed))", "def Generate(size=keyinfo.HMAC_SHA1.default_size):\n key_bytes = util.RandBytes(size // 8)\n key_string = util.Base64WSEncode(key_bytes)\n return HmacKey(key_string, size)", "def encode(self, compressed, hash160=False):\n # calculate the bytes\n if compressed:\n prefix = b'\\x02' if self.y % 2 == 0 else b'\\x03'\n pkb = prefix + self.x.to_bytes(32, 'big')\n else:\n pkb = b'\\x04' + self.x.to_bytes(32, 'big') + self.y.to_bytes(32, 'big')\n # hash if desired\n return ripemd160(sha256(pkb)) if hash160 else pkb", "def keyGen(key):\n def leftShift(keyBitList):\n \"\"\"Perform a circular left shift on the first and second five bits\"\"\"\n shiftedKey = [None] * KeyLength\n shiftedKey[0:9] = keyBitList[1:10]\n shiftedKey[4] = keyBitList[0]\n shiftedKey[9] = keyBitList[5]\n return shiftedKey\n\n # Converts input key (integer) into a list of binary digits\n keyList = [(key & 1 << i) >> i for i in reversed(range(KeyLength))]\n permKeyList = [None] * KeyLength\n for index, elem in enumerate(P10table):\n permKeyList[index] = keyList[elem - 1]\n shiftedOnceKey = leftShift(permKeyList)\n shiftedTwiceKey = leftShift(leftShift(shiftedOnceKey))\n subKey1 = subKey2 = 0\n for index, elem in enumerate(P8table):\n subKey1 += (128 >> index) * shiftedOnceKey[elem - 1]\n subKey2 += (128 >> index) * shiftedTwiceKey[elem - 1]\n return (subKey1, subKey2)", "def __bytes__(self) -> bytes:\n from hathor.merged_mining.bitcoin import encode_bytearray, encode_list\n struct_bytes = self.header_head\n struct_bytes += encode_bytearray(self.coinbase_head)\n struct_bytes += encode_bytearray(self.coinbase_tail)\n struct_bytes += encode_list(self.merkle_path)\n struct_bytes += self.header_tail\n return struct_bytes", "def raw(self) -> bytes:\n return bytes(self._signing_key)", "def test_encode_pair():\n\tassert encode_pair(0, 0) == 0\n\tassert encode_pair(1, 0) == 1\n\tassert encode_pair(0, 1) == 2\n\tassert encode_pair(4, 6) == 207", "def base58_decode(s):\n if not s:\n return b''\n alphabet = '123456789ABCDEFGHJKLMNPQRSTUVWXYZabcdefghijkmnopqrstuvwxyz'\n # Convert the string to an integer\n n = 0\n for c in s:\n n *= 58\n if c not in alphabet:\n raise Exception('Character %r is not a valid base58 character' % c)\n digit = alphabet.index(c)\n n += digit\n\n # Convert the integer to bytes\n h = '%x' % n\n if len(h) % 2:\n h = '0' + h\n # res = \"\"\n res = bytearray.fromhex(h)\n\n # Add padding back.\n pad = 0\n for c in s[:-1]:\n if c == alphabet[0]: pad += 1\n else: break\n return b'\\x00' * pad + res", "def rawPrivkey(self):\n # note the first byte determines what type of address\n # and the last four are checksums\n return a2b_base58(self.privkey)[1:-4]", "def toHashBase(self) -> str:\r\n\r\n if self.hashBase != '':\r\n self_repr = '{}'.format(self.hashBase)\r\n else:\r\n self_repr = ''\r\n self_repr += '{}{}{}'.format(str(self.data), self.version,\r\n self.compatibilityLimit)\r\n if len(self.script) > 0:\r\n self_repr += ' '.join(self.script)\r\n if self.seriesSignature != '':\r\n self_repr += self.seriesSignature\r\n if self.pha != '':\r\n self_repr += self.pha\r\n for key, value in self.identityInfo.items():\r\n self_repr += '{}{}'.format(key, value)\r\n if self.message != '':\r\n self_repr += self.message\r\n\r\n return self_repr", "def generate_input_key(\n self, output: OutputInfo, private_view_key: bytes, private_spend_key: bytes\n ) -> bytes:", "def HashAlgorithm(self) -> _n_7_t_0:", "def gen_sig(key, data):\n signature = hmac.new(key.encode('utf-8'), data.encode('utf-8'), hashlib.sha1)\n\n sig = signature.digest()\n # base64 encode\n b64 = base64.b64encode( sig)\n # url encode\n return b64", "def __generate_key(length):\n if length % 2 != 0:\n raise ValueError(\"'length' must be a multiple of 2\")\n length_bytes = int(length / 2) # length of key in bytes\n key_bytes = os.urandom(length_bytes)\n return binascii.hexlify(key_bytes).decode()", "def test_generate_key(self): \n k = Key().generate()\n self.assertRegex(k, \"[a-zA-Z0-9+\\/]+={0,2}\")" ]
[ "0.7132694", "0.7132694", "0.69438237", "0.6799692", "0.676327", "0.66251224", "0.63663113", "0.6311434", "0.630213", "0.620253", "0.61336774", "0.61109304", "0.6091969", "0.60715467", "0.60662615", "0.60591286", "0.6051299", "0.60092396", "0.59555876", "0.5883445", "0.58622026", "0.5840995", "0.5839764", "0.58336896", "0.58172846", "0.57936716", "0.57754767", "0.57452375", "0.5743015", "0.5690072", "0.5689513", "0.56568205", "0.56542766", "0.5635685", "0.5627469", "0.5600213", "0.5541801", "0.5522884", "0.5516048", "0.55149764", "0.5462944", "0.5449504", "0.5422715", "0.53974026", "0.53742146", "0.5373755", "0.5358951", "0.5319709", "0.5316327", "0.53149253", "0.5303481", "0.53019536", "0.5298943", "0.5296953", "0.5293128", "0.5289453", "0.52880627", "0.5283203", "0.52723765", "0.52618563", "0.52233046", "0.52180064", "0.5207441", "0.5205652", "0.52010596", "0.51987803", "0.5194764", "0.51937705", "0.5192877", "0.5192877", "0.5191926", "0.5184632", "0.51839703", "0.51798433", "0.5178351", "0.51748127", "0.5167637", "0.5165873", "0.5165746", "0.515997", "0.5158454", "0.51555777", "0.5151844", "0.5142516", "0.51322854", "0.51263696", "0.51255447", "0.512007", "0.51004875", "0.50979733", "0.50925505", "0.5090785", "0.5087802", "0.5084756", "0.50799036", "0.5075428", "0.5069384", "0.50617355", "0.5060105", "0.5058889" ]
0.65914536
6
Serialization of the key for testnet.
def testnet_bytes(self): return self._serialize(True)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def serialize_key(key: str) -> bytes:\n return key.encode(\"utf-8\")", "def _dumpKey(self, key):\n return self.serializer.dumpKey((self.path, self._internalNs, key))", "def raw(self) -> bytes:\n return bytes(self._signing_key)", "def save(self):\n if not self.fileKey:\n log.error(\"attempted to save a closed wallet\")\n return\n encrypted = self.fileKey.encrypt(tinyjson.dump(self).encode()).hex()\n w = tinyjson.dump({\n \"keyparams\": self.fileKey.params(),\n \"wallet\": encrypted,\n })\n helpers.saveFile(self.path, w)", "def _serializeKey(entityId, key):\n Identifier.checkIdentifier(key)\n return \"%s\\x1D%s\" % (entityId, key)", "def write_key(self):\n\t key = Fernet.generate_key()\n\t with open(\"key.key\", \"wb\") as key_file:\n\t key_file.write(key)", "def serialize(self):\n byte_array = bytearray()\n header = (\n self.sequence_number | (1 << 63)\n if self.type == KeyType.PUT\n else self.sequence_number\n )\n # append header first\n byte_array.extend(byte_utils.integer_to_n_bytes_array(header, 8))\n pickle_key = pickle.dumps(self.key)\n # key length\n byte_array.extend(byte_utils.integer_to_four_bytes_array(len(pickle_key)))\n # key byte array\n byte_array.extend(pickle_key)\n # it is a put operation, value is needed\n if self.type == KeyType.PUT:\n pickle_value = pickle.dumps(self.value)\n # value length\n byte_array.extend(byte_utils.integer_to_four_bytes_array(len(pickle_value)))\n # value byte array\n byte_array.extend(pickle_value)\n return bytes(byte_array)", "def raw_key(self) -> bytes:\n return bytes(self.data_bytes[ProofPath._Positions.KEY_POS : ProofPath._Positions.KEY_POS + KEY_SIZE])", "def encode_key(self, key):\n return key.private_bytes(\n encoding=serialization.Encoding.PEM,\n format=serialization.PrivateFormat.TraditionalOpenSSL,\n encryption_algorithm=serialization.NoEncryption(),\n ).decode(encoding='UTF-8')", "async def client_public_key(self) -> bytes:\n raise NotImplementedError", "def serialize_key(key) -> str:\n if not isinstance(key, str):\n key = repr(key)\n return key", "def get_encoded(self):\n return self.key", "def create_key ():", "async def server_public_key(self) -> bytes:\n raise NotImplementedError", "def write_key():\n key = fernet.Fernet.generate_key()\n keyfile = open(KEY_PATH,'wb')\n keyfile.write(key)\n keyfile.close()", "def get_key_id(self):\n jwk_data = {\n \"crv\": \"P-256\",\n \"kty\": \"EC\",\n \"x\": base64.urlsafe_b64encode(self.public_key_obj.public_numbers().x.to_bytes(32, \"big\")).decode().replace(\"=\", \"\"),\n \"y\": base64.urlsafe_b64encode(self.public_key_obj.public_numbers().y.to_bytes(32, \"big\")).decode().replace(\"=\", \"\")\n }\n jwk = json.dumps(jwk_data, separators=(',', ':'))\n return hashlib.sha256(jwk.encode()).digest()", "def serialize(self):\n\n if self.ser is not None:\n return self.ser\n\n ser = pack(\"HH\", len(self.inTx), len(self.outTx))\n\n ser += pack(\"32s\", self.R)\n\n # Serialize the In transations\n for intx in self.inTx:\n ser += pack(\"32sI\", intx.tx_id, intx.pos)\n\n # Serialize the Out transactions\n for outtx in self.outTx:\n ser += pack(\"32sQ\", outtx.key_id, outtx.value)\n\n self.ser = ser\n\n return ser", "def generate_keystream(self):", "def key(self):\n return self._key.decode('utf-8')", "def dump(self, include_address=True, include_id=True):\n d = {}\n d['crypto'] = self.keystore['crypto']\n d['version'] = self.keystore['version']\n if include_address and self.address is not None:\n d['address'] = encode_hex(self.address)\n if include_id and self.uuid is not None:\n d['id'] = str(self.uuid)\n return json.dumps(d)", "def serialized_private_key(self):\n if self._serialized_private_key is not None:\n return self._serialized_private_key\n\n location = self.settings.Location\n if location.AttachmentName:\n self._serialized_private_key = self.binaries[location.AttachmentName.text].content\n return self._serialized_private_key\n else:\n with open(location.FileName.text, 'rb') as file:\n self._serialized_private_key = file.read()\n return self._serialized_private_key", "def serialize(self, data):\n assert self._key is not None\n assert self._cert is not None\n try:\n data = self._serialize(data)\n signature = b64encode(self._key.sign(data, self._digest))\n signer = self._cert.get_id()\n return self._serialize(dict(data=data,\n signer=signer,\n signature=signature))\n except Exception, exc:\n raise SecurityError(\"Unable to serialize: %r\" % (exc, ))", "def forge_public_key(value) -> bytes:\n prefix = value[:4]\n res = base58.b58decode_check(value)[4:]\n\n if prefix == 'edpk':\n return b'\\x00' + res\n elif prefix == 'sppk':\n return b'\\x01' + res\n elif prefix == 'p2pk':\n return b'\\x02' + res\n\n raise ValueError(f'Unrecognized key type: #{prefix}')", "def serialize(self) -> bytes:\n pass", "def serialize(self) -> bytes:\n pass", "def encode_public_key(value: PublicKey) -> bytes:\n return bytes([value.algo.value]) + value.pbk", "def raw(self) -> bytes:\n return bytes(self._verify_key)", "def encrypt_data(self, params):\n from django.core.signing import dumps\n return dumps(params, salt=self.salt_namespace)", "def _encode_key(self, key: str) -> str:\n return key", "def public_key(self):", "def serialize(self) -> bytes:\n return json_dumps(self._to_dict()).encode()", "def serialize(self) -> bytes:\n return json_dumps(self._to_dict()).encode()", "def serialize(self) -> bytes:\n return json_dumps(self._to_dict()).encode()", "def generate_key(self):\n self.key = Fernet.generate_key()\n with open(\"secret.key\", \"wb\") as key_file:\n key_file.write(self.key)", "def to_json(self) -> Dict[str, Union[List[int], List[str]]]:\n fmt = serialization.PublicFormat.SubjectPublicKeyInfo\n obj: Dict[str, Union[List[int], List[str]]] = {}\n lst = []\n\n for pubkey in self.ringv2.values():\n try:\n pubbytes = pubkey.public_bytes(encoding=serialization.Encoding.DER, format=fmt)\n lst.append(pubbytes)\n except Exception as ex:\n logger.error(\"Could not serialize key: %s\", str(ex))\n\n obj[\"pubkeys\"] = [base64.b64encode(pubkey).decode(\"ascii\") for pubkey in lst]\n obj[\"keyids\"] = list(self.ringv2.keys())\n return obj", "def get_key(self):\r\n return self.__encryption_key", "def write_key(self, keyfile_name):\n\n print(self.key)\n with open(keyfile_name, 'wb') as f:\n f.write(self.key)", "def __repr__(self):\n return '''\n open key ({}, {})\n secret key {}\n '''.format(self.n, self.e, self.__d)", "def serialize(self) -> str:\n pass", "def serializePublicKey(public_key):\n\treturn public_key.public_bytes(\n\t\tencoding=serialization.Encoding.PEM,\n\t\tformat=serialization.PublicFormat.SubjectPublicKeyInfo\n\t)", "def make_hash(self):\n timestamp = str(int(round(time.time()*1000)))\n auth = b64encode(config.username) + ':' \\\n + b64encode(config.password) + ':' \\\n + b64encode(timestamp)\n rsa = RSA.load_pub_key(config.public_key)\n encrypted_auth = rsa.public_encrypt(auth, RSA.pkcs1_padding)\n key = b64encode(encrypted_auth)\n return key", "def public_key(self):\n keyfile = self._get_field('System', 'keyfile')\n return join(self.key_path, keyfile)", "def serialize(self):\n return {\n \"key\": self.key,\n \"value\": self.value\n }", "def rawPubkey(self):\n # note the first byte determines what type of address\n # and the last four are checksums\n return a2b_base58(self.pubkey)[1:-4]", "def convert_key_to_string(key):\n\n return key.encode(encoder=nacl.encoding.Base64Encoder).decode('utf-8')", "def generate_key(self)->bytes:\n return os.urandom(32)", "def serialize(self) -> bytes:\n return pickle.dumps(self)", "def serialize(self):\n return {\n 'sid' : self.sid,\n 'name' : self.name,\n 'passwd' : self.passwd,\n 'email' : self.email,\n 'phone' : self.phone,\n 'addr_1' : self.addr_1,\n 'addr_2' : self.addr_2,\n 'city' : self.city,\n 'state' : self.state,\n 'zip' : self.zip,\n }", "def encode(self):\r\n # Create dict from attributes. Maintain added order\r\n #jd = {'txpk': collections.OrderedDict()}\r\n jd = {'txpk':{}}\r\n\r\n for key in self.keys:\r\n val = getattr(self, key)\r\n\r\n if val is not None:\r\n if key == 'data':\r\n jd['txpk'][key] = val.decode('utf-8')\r\n else:\r\n jd['txpk'][key] = val\r\n #print('key',key)\r\n #print('valtype',type(val),val) \r\n #print(jd)\r\n \r\n return dumps(jd, separators=(',', ':'))", "def private_key(self):", "def get_public_key_in_der(self):\n serialized_public = self.public_key_obj.public_bytes(\n encoding=serialization.Encoding.DER,\n format=serialization.PublicFormat.SubjectPublicKeyInfo\n )\n return serialized_public", "def snapshot_encryption_key(self) -> 'outputs.CustomerEncryptionKeyResponse':\n return pulumi.get(self, \"snapshot_encryption_key\")", "def private_key():\n return \"Toholampi summer festival 2017 has the most harcore rock bands\"", "def bit_locker_key(self) -> str:\n return pulumi.get(self, \"bit_locker_key\")", "def main():\n key, plain = get_key_plain()\n encode(key, plain)", "def write_keys(path, keys):\n p_keys = pickle.dumps(keys)\n b_keys = base64.b64encode(p_keys)\n with open(path, \"wb+\") as walletfile:\n walletfile.write(b_keys)", "def __dict__(self):\r\n result = {}\r\n result['block_type'] = 'register'\r\n result['prev_hash'] = base64.b64encode(self.prev_hash).decode()\r\n result['timestamp'] = self.time\r\n result['user_id'] = self.user_id\r\n result['public_key'] = base64.b64encode(self.public_key.public_bytes(serialization.Encoding.X962,\r\n serialization.PublicFormat.CompressedPoint)).decode()\r\n return result", "def rawPrivkey(self):\n # note the first byte determines what type of address\n # and the last four are checksums\n return a2b_base58(self.privkey)[1:-4]", "def generate_key(self):\n return str(uuid4())", "def serializePrivateKey(private_key):\n\treturn private_key.private_bytes(\n\t\tencoding=serialization.Encoding.PEM,\n\t\tformat=serialization.PrivateFormat.PKCS8,\n\t\tencryption_algorithm=serialization.NoEncryption()\n\t)", "def _Hash(self):\n fullhash = util.Hash(util.IntToBytes(len(self.key_bytes)), self.key_bytes)\n return util.Encode(fullhash[:keyczar.KEY_HASH_SIZE])", "def public_key(self):\n return f'PublicKey = {self._peer.public_key}'", "def generate_key():\r\n # generating key\r\n key = Fernet.generate_key()\r\n\r\n key_dir = os.path.join(os.path.dirname(__file__), \"resources/key\")\r\n\r\n # writing key in file\r\n with open(key_dir, \"wb\") as keyFile:\r\n keyFile.write(key)", "def get_key(self) -> str:\n return f'{self.address}_{self.port}'", "def getJSONData(self):\n return {\"pubkey\": self.pubkey, \"privkey\": self.privkey}", "def serialize(self):\n return {\n \"id\": self.id,\n \"sid\": self.sid,\n \"sku\": self.sku,\n \"name\": self.name,\n \"price\": self.price,\n \"amount\": self.amount,\n \"create_time\": self.create_time,\n \"update_time\": self.update_time\n }", "def save(self, save_dir):\n path = os.path.join(save_dir, self.name + \".pem\")\n with open(path, \"wb\") as f:\n f.write(self.public_key)", "def from_key(self, public_id, key):\n otp = self.get_otp(key)\n from_key = modhex_encode(public_id.encode('hex')) + modhex_encode(otp.encode('hex'))\n return from_key", "def serialize(self):\n return {\n 'time_stamp' : self.time_stamp,\n 'email' : self.email,\n 'amount_deposit' : self.amount_deposit,\n 'amount_withdraw' : self.amount_withdraw,\n }", "def generate_key():\n key = Fernet.generate_key()\n with open(\"Secret.key\",\"wb\")as key_file:\n key_file.write(key)", "def encryption_key(self) -> pulumi.Output[Optional[str]]:\n return pulumi.get(self, \"encryption_key\")", "def key(self):\n return constants.DEB_KEY % self.to_dict()", "def serialize(self, value) -> bytes:\n pass", "def serialize_encrypted_data_key(encrypted_data_key):\n encrypted_data_key_format = (\n \">\" # big endian\n \"H\" # key provider ID length\n \"{provider_id_len}s\" # key provider ID\n \"H\" # key info length\n \"{provider_info_len}s\" # key info\n \"H\" # encrypted data key length\n \"{enc_data_key_len}s\" # encrypted data key\n )\n return struct.pack(\n encrypted_data_key_format.format(\n provider_id_len=len(encrypted_data_key.key_provider.provider_id),\n provider_info_len=len(encrypted_data_key.key_provider.key_info),\n enc_data_key_len=len(encrypted_data_key.encrypted_data_key),\n ),\n len(encrypted_data_key.key_provider.provider_id),\n to_bytes(encrypted_data_key.key_provider.provider_id),\n len(encrypted_data_key.key_provider.key_info),\n to_bytes(encrypted_data_key.key_provider.key_info),\n len(encrypted_data_key.encrypted_data_key),\n encrypted_data_key.encrypted_data_key,\n )", "def _GetKeyString(self):", "def _GetKeyString(self):", "def crypt_key(self):\n return self._crypt_key", "def _get_encryption_key(self, **options):\n\n return self._public_key", "def key_to_struct(key: RsaKey) -> bytes:\n mod = int_to_bytes(key.n)\n exponent = int_to_bytes(key.e)\n\n return b\"\\x00\\x00\\x00\\x80\" + mod + b\"\\x00\\x00\\x00\\x03\" + exponent", "def to_dictionary(self):\n return {'pubkey': self.pubkey.to_dictionary(), 'T': self.T, 'y': self.y, 'pi': self.pi}", "def generate_key():\n key = Fernet.generate_key()\n with open(\"pass.key\", \"wb\") as key_file:\n key_file.write(key)", "def __bytes__(self):\n return (\n pack_u32(self.version) +\n bytes(self.prev_block_hash) +\n bytes(self.merkle_root_hash) +\n pack_u32(self.time) +\n pack_u32(self.bits) +\n pack_u32(self.nonce)\n )", "def serialize(self):\n return {\n 'oid' : self.oid,\n 'sid' : self.sid,\n 'stopaddress' : self.stopaddress,\n 'cargosize' : self.cargosize,\n 'totalfee' : self.totalfee,\n # 'status' : self.status,\n 'grade' : self.grade,\n 'comment' : self.comment,\n 'stopaddr_lat' : self.stopaddr_lat,\n 'stopaddr_lng' : self.stopaddr_lng\n }", "def hash(self) -> bytes:\n block_string = json.dumps(self.serialize(), sort_keys=True).encode()\n return bytes.fromhex(hashlib.sha256(block_string).hexdigest())", "def _encode(klass, pwm_str_key):\n\t\t#print(\"Encoding pwm key %s\" % (pwm_str_key,))\n\t\tdec_pwm_key = int(pwm_str_key, 2)\n\t\t#print \"Decimal (PWN) key:\",dec_pwm_key\n\t\tkey_packed = ''\n\t\tfor byte in Lirc._chunk(pwm_str_key, 8, '0'):\n\t\t\tdec_pwm_key = int(byte, 2)\n\t\t\tkey_packed = key_packed + struct.pack(\">B\", dec_pwm_key)\n\t\treturn key_packed", "def dumpprivkey(self, address):\n return self.proxy.dumpprivkey(address)", "def get_pub_key(self):\n return \"RSA {0}\".format(self._cert.get_pubkey().bits)", "def serialize(self):\n pass", "def _encode_key(self, key):\n return key.encode() if isinstance(key, str) else key", "def dumps(self, obj, salt=None):\n payload = want_bytes(self.dump_payload(obj))\n rv = self.make_signer(salt).sign(payload)\n if self.is_text_serializer:\n rv = rv.decode(\"utf-8\")\n return rv", "def serialize(self):\n # Curve order and halforder, used to tame ECDSA malleability (see BIP-0062)\n order = Curve.N\n halforder = order >> 1\n # low 'S' malleability breaker\n sigS = self.s\n if sigS > halforder:\n sigS = order - sigS\n # Ensure the encoded bytes for the r and s values are canonical and\n # thus suitable for DER encoding.\n rb = canonicalizeInt(self.r)\n sb = canonicalizeInt(sigS)\n\n # total length of returned signature is 1 byte for each magic and\n # length (6 total), plus lengths of r and s\n length = 6 + len(rb) + len(sb)\n b = ByteArray(0, length=length)\n\n b[0] = 0x30\n b[1] = ByteArray(length - 2, length=1)\n b[2] = 0x02\n b[3] = ByteArray(len(rb), length=1)\n offset = 4\n b[offset] = rb\n offset += len(rb)\n b[offset] = 0x02\n offset += 1\n b[offset] = ByteArray(len(sb), length=1)\n offset += 1\n b[offset] = sb\n return b", "def serialize(self) -> str:\n return json.dumps(self.__dict__)", "def get_private_key_in_der(self):\n serialized_private = self.private_key_obj.private_bytes(\n encoding=serialization.Encoding.DER,\n format=serialization.PrivateFormat.TraditionalOpenSSL,\n encryption_algorithm=serialization.NoEncryption()\n )\n return serialized_private", "def public_key(self):\n return PublicKey(self._sk.public_key().public_bytes(serialization.Encoding.Raw, serialization.PublicFormat.Raw))", "def write_key(key_name):\n key = Fernet.generate_key()\n with open(key_name, \"wb\") as key_file:\n key_file.write(key)", "def serialize(self):\n keys = [\n 'uid',\n 'commit_sha',\n 'timestamp',\n 'filename',\n 'comment',\n 'train_data',\n 'val_data',\n 'test_data',\n 'model_files',\n 'custom_data',\n ]\n data = {key: self.__dict__[key] for key in keys}\n with open(os.path.join(self.root_path, self._data_file), 'w') as file:\n json.dump(data, file)", "def stellar_seed(self) -> str:\n return kin_utils.encode_check('seed', bytes(self._signing_key)).decode()", "def __GetKeyString(self):\n return self._GetKeyString()", "def deserialize_key(key: bytes) -> str:\n return key.decode()", "def get_private_key(self) -> str:\n raise NotImplementedError(\"Please implement your own get_public_key() method\")" ]
[ "0.6948801", "0.6768897", "0.66655487", "0.65602565", "0.6537538", "0.64810574", "0.64295965", "0.6374775", "0.6282263", "0.6258551", "0.6218504", "0.61828756", "0.6135277", "0.6117363", "0.6116815", "0.610087", "0.608277", "0.607366", "0.6041598", "0.6024077", "0.5981999", "0.59671116", "0.5951455", "0.59040624", "0.59040624", "0.58514583", "0.5821516", "0.58196", "0.58105546", "0.57990897", "0.579287", "0.579287", "0.579287", "0.57846755", "0.5753425", "0.57522774", "0.574928", "0.572447", "0.5714402", "0.56646526", "0.56461376", "0.5631177", "0.56263965", "0.5623905", "0.56167555", "0.5613049", "0.561091", "0.56094784", "0.5607013", "0.5602595", "0.5602106", "0.55972064", "0.55919045", "0.5582182", "0.5578408", "0.5565028", "0.55571157", "0.5544517", "0.5537317", "0.5534622", "0.5531335", "0.5521586", "0.551902", "0.5513367", "0.550668", "0.5501857", "0.5498233", "0.5497579", "0.5496075", "0.5495299", "0.5488442", "0.54823726", "0.54781127", "0.54769874", "0.5475188", "0.5475188", "0.5461631", "0.5460348", "0.5460024", "0.545973", "0.54498565", "0.5443823", "0.5441604", "0.54400736", "0.5438582", "0.5437654", "0.5434291", "0.5433277", "0.5432712", "0.54325086", "0.5431553", "0.54306805", "0.5429073", "0.54264987", "0.5425482", "0.5425131", "0.5408264", "0.54069865", "0.54054946", "0.53823805" ]
0.6667217
2
Get inventory list from config files builds a NetworkRunner inventory object and a mac_map dictionary according to ansible inventory file yaml definition
def __init__(self): self.inventory = {} self.mac_map = {} for conffile in CONF.config_file: # parse each config file sections = {} parser = cfg.ConfigParser(conffile, sections) try: parser.parse() except IOError as e: LOG.error(str(e)) # filter out sections that begin with the driver's tag hosts = {k: v for k, v in sections.items() if k.startswith(c.DRIVER_TAG)} # munge the oslo_config data removing the device tag and # turning lists with single item strings into strings for host in hosts: dev_id = host.partition(c.DRIVER_TAG)[2] dev_cfg = {k: v[0] for k, v in hosts[host].items()} for b in c.BOOLEANS: if b in dev_cfg: dev_cfg[b] = types.Boolean()(dev_cfg[b]) self.inventory[dev_id] = dev_cfg # If mac is defined add it to the mac_map if 'mac' in dev_cfg: self.mac_map[dev_cfg['mac'].upper()] = dev_id LOG.info('Ansible Host List: %s', ', '.join(self.inventory))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def main():\n\n # endpdoint = \"restconf/data/ietf-interfaces:interfaces\"\n # endpoint = f\"restconf/data/ietf-interfaces:interfaces/interface={name}\"\n\n if len(argv) > 1:\n try:\n inventory = load_inventory(argv[1])\n except FileExistsError as err:\n print(\"FileExistsError: \", err)\n else:\n print(\"You must provide a path to your inventory file.\")\n sys.exit()\n\n r1 = inventory['dev-r1']\n loop = [interface for interface in r1[\"interface\"] if interface[\"name\"] == \"Loopback0\"][0]\n\n payload = render_payload(\n loop,\n \"interface.j2\"\n )\n\n session = create_session(r1[\"username\"], r1[\"password\"])\n endpoint = f\"restconf/data/ietf-interfaces:interfaces/interface=Loopback0\"\n results = put_request(r1[\"host\"],session, endpoint, payload)\n print(results)\n\n save_endpoint = \"restconf/operations/cisco-ia:save-config/\"\n saved = save_config(r1[\"host\"], session, save_endpoint)\n\n # target_routers = [\"dev-r1\"]\n\n # for host_key, attribs in inventory.items():\n\n # if host_key in target_routers:\n # print(f\"configuring interfaces on {host_key}\")\n\n # # create a session imported from restconf_api\n # session = create_session(attribs)\n\n # # get all interfaces\n # results = get_interface(attribs, session, \"Loopback0\")\n\n # interface = results[\"ietf-interfaces:interface\"]\n\n # print(json.dumps(interface))\n # # convert to yaml\n # # yaml_output = yaml.safe_dump(results)\n # # with open(\"vars/interfaces.yml\", \"w\") as file:\n # # file.write(yaml_output)\n\n # # results = update_interfaces(attribs, session)\n # # print(results.text, results.status_code)\n\n # # print(get_interfaces(attribs, session))", "def build_inventory(self):\n self.inventory = {\n 'all': {\n 'hosts': [],\n 'vars': self.group_variables\n },\n '_meta': {'hostvars': {}}\n }\n\n # add all droplets by id and name\n for droplet in self.data['droplets']:\n for net in droplet['networks']['v4']:\n if net['type'] == 'public':\n dest = net['ip_address']\n else:\n continue\n\n self.inventory['all']['hosts'].append(dest)\n\n self.add_host(droplet['id'], dest)\n\n self.add_host(droplet['name'], dest)\n\n # groups that are always present\n for group in ('digital_ocean',\n 'region_' + droplet['region']['slug'],\n 'image_' + str(droplet['image']['id']),\n 'size_' + droplet['size']['slug'],\n 'distro_' + DigitalOceanInventory.to_safe(droplet['image']['distribution']),\n 'status_' + droplet['status']):\n self.add_host(group, dest)\n\n # groups that are not always present\n for group in (droplet['image']['slug'],\n droplet['image']['name']):\n if group:\n image = 'image_' + DigitalOceanInventory.to_safe(group)\n self.add_host(image, dest)\n\n if droplet['tags']:\n for tag in droplet['tags']:\n self.add_host(tag, dest)\n\n # hostvars\n info = self.do_namespace(droplet)\n self.inventory['_meta']['hostvars'][dest] = info", "def readConfig(file=\"config.ini\"):\n ip_pool = []\n cmd_pool = []\n Config=ConfigParser.ConfigParser()\n Config.read(file)\n machines = Config.items(\"MACHINES\")\n commands = Config.items(\"COMMANDS\")\n for ip in machines:\n ip_pool.append(ip[1])\n for cmd in commands:\n cmd_pool.append(cmd[1])\n print cmd[1]\n return ip_pool,cmd_pool", "def load_config(self, config_src, report_metadata):\n for card_type in config_src.keys(): #card_type is project|assignment|epic\n for board_t in config_src[card_type].keys():\n board_id = config_src[card_type][board_t][':board_id']\n if not board_id in report_metadata: # initialize if the board wasn't present during the iterations over other card_type's\n if not board_id in report_metadata[':boards']:\n report_metadata[':boards'][board_id] = {};\n report_metadata[':boards'][board_id][':board_id'] = config_src[card_type][board_t][':board_id'] #copy board id\n report_metadata[':boards'][board_id][':board_name'] = board_t\n if not ':lists' in report_metadata[':boards'][board_id]:\n report_metadata[':boards'][board_id][':lists'] = []\n\n #iterate through all the lists and populate them\n for list_t in config_src[card_type][board_t][':lists'].keys():\n self.logger.debug(\"Adding board %s, list %s to the report\" % (config_src[card_type][board_t][':board_id'], config_src[card_type][board_t][':lists'][list_t]))\n list_id = config_src[card_type][board_t][':lists'][list_t]\n report_metadata[':lists'][list_id] = {};\n report_metadata[':lists'][list_id][':list_id'] = list_id\n report_metadata[':lists'][list_id][':completed'] = False;\n report_metadata[':lists'][list_id][':card_type'] = card_type;\n report_metadata[':lists'][list_id][':board_id'] = board_id\n report_metadata[':boards'][board_id][':lists'].append(list_id)\n if ':done_lists' in config_src[card_type][board_t]:\n for list_t in config_src[card_type][board_t][':done_lists'].keys():\n self.logger.debug(\"Adding board %s, Done list %s to the report\" % (config_src[card_type][board_t][':board_id'], config_src[card_type][board_t][':done_lists'][list_t]))\n list_id = config_src[card_type][board_t][':done_lists'][list_t]\n report_metadata[':lists'][list_id] = {};\n report_metadata[':lists'][list_id][':list_id'] = list_id\n report_metadata[':lists'][list_id][':completed'] = True;\n report_metadata[':lists'][list_id][':card_type'] = card_type;\n report_metadata[':lists'][list_id][':board_id'] = board_id\n report_metadata[':boards'][board_id][':lists'].append(list_id)", "def load(identifier, network):\n file = f\"{network}.{DEPLOYMENTS_FILENAME}\"\n\n if not os.path.exists(file):\n return\n\n with open(file) as fp:\n for line in fp:\n [address, abi, *alias] = line.split(\":\")\n identifiers = [x.strip() for x in [address] + alias]\n if identifier in identifiers:\n yield address, abi", "def generate_config(self):\n self.log.debug(\"generate-config\")\n self.qemu.args = [\n \"-nodefaults\",\n \"-only-migratable\",\n \"-cpu {cpu_model},enforce\",\n # Watch out: kvm.name is used for sanity checking critical actions.\n \"-name {name},process=kvm.{name}\",\n \"-chroot {{chroot}}\",\n \"-runas nobody\",\n \"-serial file:/var/log/vm/{name}.log\",\n \"-display vnc={{vnc}}\",\n \"-pidfile {{pidfile}}\",\n \"-vga std\",\n # We use this '-m' flag to find what a running VM is actually\n # using at the moment. If this flag is changed then that code must\n # be adapted as well. This is used in incoming.py and qemu.py.\n \"-m {memory}\",\n \"-readconfig {{configfile}}\",\n ]\n self.qemu.args = [a.format(**self.cfg) for a in self.qemu.args]\n\n vhost = ' vhost = \"on\"' if self.vhost else \"\"\n\n netconfig = []\n for net, net_config in sorted(self.cfg[\"interfaces\"].items()):\n ifname = \"t{}{}\".format(net, self.cfg[\"id\"])\n netconfig.append(\n \"\"\"\n[device]\n driver = \"virtio-net-pci\"\n netdev = \"{ifname}\"\n mac = \"{mac}\"\n\n[netdev \"{ifname}\"]\n type = \"tap\"\n ifname = \"{ifname}\"\n script = \"/etc/kvm/kvm-ifup\"\n downscript = \"/etc/kvm/kvm-ifdown\"\n{vhost}\n\"\"\".format(\n ifname=ifname, mac=net_config[\"mac\"], vhost=vhost\n )\n )\n\n with open(self.vm_config_template) as f:\n tpl = f.read()\n accelerator = (\n ' accel = \"{}\"'.format(self.accelerator)\n if self.accelerator\n else \"\"\n )\n machine_type = detect_current_machine_type(self.machine_type)\n self.qemu.config = tpl.format(\n accelerator=accelerator,\n machine_type=machine_type,\n disk_cache_mode=self.qemu.disk_cache_mode,\n network=\"\".join(netconfig),\n **self.cfg,\n )", "def parse_inventory(filename):\n data = {}\n group = None\n state = None\n\n try:\n inventory = open(filename)\n except Exception as e:\n msg('E', 'Cannot open inventory file %s. %s' % (filename, str(e)))\n\n # Walk through the file and build the data structure\n for line in inventory:\n line = line.strip()\n\n # Skip comments and blank lines\n if line.startswith('#') or line.startswith(';') or len(line) == 0:\n continue\n\n if line.startswith('['):\n # Get group name\n section = line[1:-1]\n\n # Parse subsection\n if ':' in line:\n group, state = line[1:-1].split(':')\n else:\n group = section\n state = 'hosts'\n\n if group not in data:\n data[group] = {}\n\n if state not in data[group]:\n if 'children' not in state:\n data[group][state] = {}\n else:\n data[group][state] = []\n else:\n # Parse hosts or group members/vars\n try:\n tokens = shlex.split(line, comments=True)\n except ValueError as e:\n msg('E', \"Error parsing host definition '%s': %s\" % (line, e))\n\n # Create 'all' group if no group was defined yet\n if group is None:\n group = 'all'\n state = 'hosts'\n data['all'] = {\n 'hosts': []\n }\n\n # Get parsed hostname\n hostname = tokens[0]\n\n # Parse variables\n variables = []\n if state == 'hosts':\n variables = tokens[1:]\n elif state == 'vars':\n variables = tokens\n\n if 'hosts' in state:\n data[group][state].update({hostname: {}})\n\n if 'children' in state:\n data[group][state].append(hostname)\n\n for var in variables:\n if '=' not in var:\n msg(\n 'E',\n \"Expected key=value host variable assignment, \"\n \"got: %s\" % var)\n\n (key, val) = var.split('=', 1)\n\n if 'hosts' in state:\n data[group][state][hostname].update({key: val})\n if 'vars' in state:\n data[group][state].update({key: val})\n # Close file\n try:\n inventory.close()\n except IOError as e:\n msg('E', 'Cannot close inventory file %s. %s' % (filename, str(e)))\n\n return data", "def _generate_inventory(self, datapath):\n \n files = [file for file in listdir(datapath) if '.nc' in file and not 'xyz' in file]\n # file_prefixes = list(set([ file.split('_')[0] for file in files ]))\n # file_prefixes = list(set([ \"_\".join(file.split('_')[0:2]) for file in files ]))\n if self.extra_pref:\n file_prefixes = list(set([ \"_\".join(file.split('_')[0:2] + [self.extra_pref]) for file in files ]))\n else:\n file_prefixes = list(set([ \"_\".join(file.split('_')[0:2]) for file in files ]))\n \n inventory = {}\n for file_prefix in file_prefixes:\n fname = path.join(datapath,f'{file_prefix}{self.first_suffix}')\n if not self.metafile:\n self.metafile = fname\n vars = [ var for var in list(Dataset(fname).variables) if var not in self.skip_vars ]\n for var in vars:\n inventory[var] = {'files': sorted([path.join(datapath,file) \n for file in listdir(datapath) if file_prefix in file])}\n return inventory", "def ibns_intf(task):\n # init lists of interfaces\n access_interfaces = []\n uplink_interfaces = []\n # iterate over all interfaces\n for intf in task.host[\"intfs\"]:\n\n # uplink interfaces\n if intf[\"interface\"] in task.host[\"uplinks\"]:\n uplink_interfaces.append(intf)\n\n # other non-excluded access ports\n elif intf[\"interface\"] not in task.host[\"excluded_intf\"]:\n if intf[\"access_vlan\"] in task.host[\"vlans\"]:\n access_interfaces.append(intf)\n\n # assign uplink interface list to task.host\n task.host[\"uplink_interfaces\"] = uplink_interfaces\n # render uplink interface configs\n uplink_intf_cfg = task.run(\n task=text.template_file,\n template=\"IBNS_uplink_intf.j2\",\n path=\"templates/\",\n **task.host,\n )\n # assign access interface list to task.host\n task.host[\"access_interfaces\"] = access_interfaces\n # render access interface configs\n access_intf_cfg = task.run(\n task=text.template_file,\n template=f\"IBNS{task.host['ibns_ver']}_access_intf.j2\",\n path=\"templates/\",\n **task.host,\n )\n\n # init list of L3 vlan interfaces\n l3_vlan_int = [\"Vlan777\"]\n # list of vlan interfaces that will not relay\n no_relay_ints = [\"1\", \"666\", \"667\"]\n # iterate over active L3 interfaces\n for intf in task.host[\"ip_int_br\"]:\n # accept only those that are active vlan interfaces\n if intf[\"intf\"].startswith(\"Vlan\") == True and intf[\"status\"] == \"up\":\n # strip vlan id from interface name\n vlan_id = intf[\"intf\"].strip(\"Vlan\")\n # compare with list of no relay ints\n if vlan_id not in no_relay_ints:\n # add to list of interfaces for ISE DHPC relay\n l3_vlan_int.append(intf[\"intf\"])\n\n # save L3 vlan interfaces to task.host\n task.host[\"l3_vlan_int\"] = l3_vlan_int\n\n if \"emea\" in task.host['region']:\n L3VLAN_template = \"IBNS_EMEA_L3VLAN_intf.j2\"\n else:\n L3VLAN_template = \"IBNS_L3VLAN_intf.j2\"\n\n # render L3 vlan interface configs\n l3_vlan_int_cfg = task.run(\n task=text.template_file,\n template=L3VLAN_template,\n path=\"templates/\",\n **task.host,\n )\n\n # return configuration\n return uplink_intf_cfg.result + access_intf_cfg.result + l3_vlan_int_cfg.result", "def main():\n\n PASS = raw_input('password> ')\n\n with manager.connect(host=HOST, port=PORT, username=USER, password=PASS,\n hostkey_verify=False, device_params={'name': 'default'},\n look_for_keys=False, allow_agent=False) as m:\n\n # print all NETCONF capabilities\n with open('output/netconf_101_capability.txt', 'w') as file:\n for capability in m.server_capabilities:\n file.write(str(capability))\n file.write('\\n')\n\n result_xmllist = []\n # run commands on the remote device\n for key in xmlns_dic.keys():\n data = m.get(('subtree', xmlns_dic[key]))\n result_xmllist.append(data)\n\n with open('output/netconf_101_rpc.xml', 'w') as file:\n file.write(str(result_xmllist))\n\n result_jsonlist = []\n for data in result_xmllist:\n # print all in xml\n print(data)\n\n # print all in json\n result_xml_str = repr(data)\n result_json_parsed_str = json.dumps(xmltodict.parse(result_xml_str))\n result_json_parsed_dict = json.loads(result_json_parsed_str)\n\n print(json.dumps(result_json_parsed_dict, indent=4, sort_keys=True))\n result_jsonlist.append(result_json_parsed_dict)\n\n with open('output/netconf_101_rpc.json', 'w') as file:\n json.dump(result_jsonlist, file, indent=4, sort_keys=True)\n\n\n # xml_doc = xml.dom.minidom.parseString(result.xml)\n # mac_address = xml_doc.getElementsByTagName(\"mod:mac_address\")\n # print(mac_address)", "def get_configured_interfaces():\n with manager.connect(host=HOST, port=PORT, username=USER, password=PASS,\n hostkey_verify=False, device_params={'name': 'default'},\n allow_agent=False, look_for_keys=False) as m:\n\n with open(FILE) as f:\n return(m.get_config('running', f.read()))", "def get_hosts(self):\n self.logger.debug(colorama.Fore.BLUE +\n \"jsnapy.cfg file location used : %s\" %\n get_config_location(), extra=self.log_detail)\n self.logger.debug(colorama.Fore.BLUE +\n \"Configuration file location used : %s\" %\n get_path('DEFAULT', 'config_file_path'), extra=self.log_detail)\n \n if self.args.pre_snapfile is not None:\n output_file = self.args.pre_snapfile\n elif self.args.snapcheck is True and self.args.pre_snapfile is None:\n output_file = \"snap_temp\"\n self.snap_del = True\n else:\n output_file = \"\"\n conf_file = self.args.file\n check = self.args.check\n snap = self.args.snap\n if conf_file is not None:\n if os.path.isfile(conf_file):\n config_file = open(conf_file, 'r')\n self.main_file = yaml.load(config_file)\n elif os.path.isfile(os.path.join(get_path('DEFAULT', 'config_file_path'), conf_file)):\n fpath = get_path('DEFAULT', 'config_file_path')\n config_file = open(os.path.join(fpath, conf_file), 'r')\n self.main_file = yaml.load(config_file)\n else:\n self.logger.error(\n colorama.Fore.RED +\n \"ERROR!! Config file '%s' is not present \" %\n conf_file, extra=self.log_detail)\n sys.exit(1)\n else:\n if self.args.hostname and self.args.testfiles:\n temp_dict = {'hosts':[{'device':'', 'username':'', 'passwd':''}], 'tests':[]}\n temp_dict['hosts'][0]['device'] = self.args.hostname\n temp_dict['hosts'][0]['username'] = self.args.login\n temp_dict['hosts'][0]['passwd'] = self.args.passwd\n for tfile in self.args.testfiles:\n temp_dict['tests'].append(tfile)\n self.main_file = temp_dict\n\n\n #### if --check option is given for sqlite, then snap file name is not compulsory ####\n #### else exit the function saying arguments not correct ####\n if self.main_file.__contains__(\n 'sqlite') and self.main_file['sqlite'] and self.main_file['sqlite'][0]:\n self.chk_database(\n self.main_file,\n self.args.pre_snapfile,\n self.args.post_snapfile,\n check,\n snap)\n else:\n if (self.args.check is True and (\n self.args.file is None or self.args.pre_snapfile is None or self.args.post_snapfile is None)):\n self.logger.error(colorama.Fore.RED +\n \"Arguments not given correctly, Please refer help message\",\n extra=self.log_detail)\n self.parser.print_help()\n sys.exit(1)\n self.login(output_file)", "def get_hostsdata_from_hostsfile(hosts_file) -> dict:\n\n if not os.path.isfile(hosts_file):\n logger.error(f\"Suzieq inventory {hosts_file} must be a file\")\n print(f\"ERROR: Suzieq inventory {hosts_file} must be a file\")\n sys.exit(1)\n\n if not os.access(hosts_file, os.R_OK):\n logger.error(\"Suzieq inventory file is not readable: {}\", hosts_file)\n print(\"ERROR: hosts Suzieq inventory file is not readable: {}\",\n hosts_file)\n sys.exit(1)\n\n with open(hosts_file, \"r\") as f:\n try:\n data = f.read()\n hostsconf = yaml.safe_load(data)\n except Exception as e:\n logger.error(\"Invalid Suzieq inventory file:{}\", e)\n print(\"Invalid Suzieq inventory file:{}\", e)\n sys.exit(1)\n\n if not hostsconf or isinstance(hostsconf, str):\n logger.error(f\"Invalid Suzieq inventory file:{hosts_file}\")\n print(f\"ERROR: Invalid hosts Suzieq inventory file:{hosts_file}\")\n sys.exit(1)\n\n if not isinstance(hostsconf, list):\n if '_meta' in hostsconf.keys():\n logger.error(\"Invalid Suzieq inventory format, Ansible format??\"\n \" Use -a instead of -D with inventory\")\n print(\"ERROR: Invalid Suzieq inventory format, Ansible format??\"\n \" Use -a instead of -D with inventory\")\n else:\n logger.error(f\"Invalid Suzieq inventory file:{hosts_file}\")\n print(f\"ERROR: Invalid hosts Suzieq inventory file:{hosts_file}\")\n sys.exit(1)\n\n for conf in hostsconf:\n if any(x not in conf.keys() for x in ['namespace', 'hosts']):\n logger.error(\"Invalid inventory:{}, no namespace/hosts sections\")\n print(\"ERROR: Invalid inventory:{}, no namespace/hosts sections\")\n sys.exit(1)\n\n return hostsconf", "def main():\n # Take path argument and list all text files\n\n ip = '10.1.10.100'\n a_user = 'cisco'\n auth_key = 'cisco123'\n encr_key = 'cisco123'\n snmp_user = (a_user, auth_key, encr_key)\n sw1 = (ip, 161)\n\n sysDescr = '1.3.6.1.2.1.1.1.0'\n sysObjectID = '1.3.6.1.2.1.1.2.0'\n sysUpTime = '1.3.6.1.2.1.1.3.0'\n sysContact = '1.3.6.1.2.1.1.4.0'\n sysNmae = '1.3.6.1.2.1.1.5.0'\n ifNumber = '1.3.6.1.2.1.2.1.0'\n\n\n # Uptime when running config last changed\n RunLastChanged = '1.3.6.1.4.1.9.9.43.1.1.1.0'\n\n # Uptime when running config last saved (note any 'write' constitutes a save)\n RunLastSaved = '1.3.6.1.4.1.9.9.43.1.1.2.0'\n\n # Uptime when startup config last saved\n StartLastChanged = '1.3.6.1.4.1.9.9.43.1.1.3.0'\n\n ifAlias = '1.3.6.1.2.1.31.1.1.1.18.1'\n ifName = '1.3.6.1.2.1.31.1.1.1.1.1'\n\n snmp_data = snmp_helper.snmp_get_oid_v3(sw1, snmp_user, oid=ifName, auth_proto='sha', encrypt_proto='des')\n #print(snmp_data)\n\n # snmp_get_oid_v3(snmp_device, snmp_user, oid='.1.3.6.1.2.1.1.1.0', auth_proto='sha',\n # encrypt_proto='aes128', display_errors=True):\n\n #snmp_extract(snmp_data):\n\n output = snmp_helper.snmp_extract(snmp_data)\n print output", "def list_inventory(self):\n inventory = {}\n host_vars = {}\n\n for droplet in self.do.droplets:\n for rule in self.group_rules:\n rule.apply(droplet, inventory)\n\n host_vars[droplet[\"ip_address\"]] = {\n \"do_{}\".format(k): v for k, v in droplet.iteritems()\n }\n\n inventory[\"_meta\"] = {\n \"hostvars\": host_vars\n }\n\n return inventory", "def network_config(self):\n\n if self._network_config:\n return self._network_config\n\n interfaces = self.metadata.get('interfaces')\n\n if not interfaces:\n raise Exception(\"Unable to get meta-data from server....\")\n\n # Convert Vultr network configuration to cloudinit.net format\n\n # Example JSON:\n # [\n # {\n # \"ipv4\": {\n # \"additional\": [\n # {\n # \"address\": \"192.0.2.3\",\n # \"netmask\": \"255.255.255.0\"\n # }\n # ],\n # \"address\": \"192.0.2.2\",\n # \"gateway\": \"192.0.2.1\",\n # \"netmask\": \"255.255.255.0\"\n # },\n # \"ipv6\": {\n # \"additional\": [\n # {\n # \"network\": \"2001:0db8:0:2::\",\n # \"prefix\": \"64\"\n # }\n # ],\n # \"address\": \"2001:0db8:0:1:5428:d5ff:fe28:1910\",\n # \"network\": \"2001:0db8:0:1::\",\n # \"prefix\": \"64\"\n # },\n # \"mac\": \"00:00:00:00:00:00\",\n # \"network-type\": \"public\"\n # },\n # ......\n # ]\n\n nic_configs = []\n macs_to_nics = cloudnet.get_interfaces_by_mac()\n LOG.debug(\"nic mapping: %s\", macs_to_nics)\n\n config = []\n for vultr_ip_dict in interfaces:\n mac = vultr_ip_dict[\"mac\"]\n\n if mac not in macs_to_nics:\n raise ValueError(\"Did not find network interface on system \"\n \"with mac '%s'. Cannot apply configuration: %s\"\n % (mac_address, nic))\n if_name = macs_to_nics[mac] # if_name = string 'eth0', ...\n if_config= {\n 'type': 'physical',\n 'mac_address': mac,\n 'name': if_name,\n 'subnets': [{\n 'type': 'dhcp',\n 'control': 'auto',\n }\n ]\n }\n config.append(if_config)\n\n LOG.debug(\"nic '%s' configuration: %s\", if_name, if_config)\n\n LOG.debug(\"added dns servers: %s\", self.dns_servers)\n config.append({'type': 'nameserver', 'address': self.dns_servers})\n\n return {'version': 1, 'config': config}", "def _get_config_map():\n path = os.path.join(os.path.dirname(__file__), \"nadamw_configs.json\")\n configs = json.loads(open(path).read())\n return configs", "def readConfig(file=\"dispatcher.conf\"):\n\n parser = configparser.ConfigParser()\n parser.read(file)\n machines = parser.items(\"MACHINES\")\n commands = parser.items(\"COMMANDS\")\n\n return machines, commands", "def main():\n\n\n fab_list = get_fabric_list(SANNAV_IP_ADDRESS, SANNAV_FOS_USERNAME, SANNAV_FOS_PASSWORD)\n\n # Print all known facts about the fabrics and the switches\n # Comment out this print statement if this code will be used to generate\n # an Ansible Tower inventory.\n print(json.dumps(fab_list))\n\n # This section of code formats the results to be in a format acceptable to Ansible Tower (awx).\n # To use it, unblock the following block of code and comment out the preceeding print statement.\n\n _ = \"\"\"\n toAwx = {'_meta': {'hostvars': {}}}\n\n for fabric in fab_list[\"Fabrics\"]:\n toAwx[fabric[\"name\"]] = { 'hosts': []}\n for switch in fabric[\"Switches\"]:\n toAwx[fabric[\"name\"]]['hosts'].append(switch['ipAddress'])\n print(json.dumps(toAwx));\n \"\"\"", "def get_network_config2():\n interfaces = get_interfaces()\n ips = [get_ip_address2(ip) for ip in interfaces]\n return dict(zip(interfaces,ips))", "def network_config(args): # pylint: disable-msg=W0613\n if not NETLOCK.acquire_read(NET_LOCK_TIMEOUT):\n raise HttpReqError(503, \"unable to take NETLOCK for reading after %s seconds\" % NET_LOCK_TIMEOUT)\n try:\n netconf = xivo_config.load_current_configuration()\n return yaml_json.stringify_keys(netconf)\n finally:\n NETLOCK.release()", "def load_networks(self, start=False):\n logging.debug(\"%s load_networks entered\" % self)\n # networks = self.infra['networks']\n all_containers = cf.list_containers()\n if self.container_name in all_containers:\n logging.info(\"found existing container, checking for network configuration\")\n mycontainer = cf.get_container(self.container_name)\n try:\n index = mycontainer.get_object(\"index.json\")\n mconf = json.loads(index.fetch())\n for network in mconf['networks'].keys():\n logging.info(\"loading %s from file\" % network)\n new_network = Network(self, network)\n if mconf['networks'][network].has_key(\"uuid\"):\n uuid = mconf['networks'][network][\"uuid\"]\n # print \"YYY: \", uuid\n new_network.load(uuid, start=start)\n self.networks[network] = new_network\n except Exception, e:\n # print \"ALJKALDFDKSJFLSKJDf\"\n logging.warn(e.message)\n import traceback\n logging.debug(traceback.print_exc())\n \n # check if they exist...\n # for net in networks.keys():\n # # create the network object\n # new_net = Network(self, net) \n # ", "def _get_conf():\n configs = [\"mds_cache_memory_limit\",\n \"mds_cache_reservation\",\n \"mds_health_cache_threshold\"]\n holder = {}\n for config in configs:\n cmd = \"sudo ceph daemon mds.\" \\\n \"$HOSTNAME config show | grep {}\".format(config)\n conf = model.run_on_unit(self.TESTED_UNIT, cmd)\n for i in (conf['Stdout'].replace('\"', '')\n .replace(',', '')\n .strip()\n .split(\"\\n\")):\n key, val = i.split(\":\")\n holder[key] = val.strip()\n return holder", "def get_inventory(cls, gen, folder, inv_type='both', return_type='segment', cons_only=True):\n\n inventory = list()\n filename = 'temp_output{}.txt'.format(gen)\n if not os.path.exists(os.path.join(folder,filename)):\n return None\n with open(os.path.join(folder, filename), encoding='utf_8_sig') as f:\n f.readline()\n f.readline()#dunno why i have to do this twice...\n feature_names = f.readline()\n feature_names = feature_names.strip()\n feature_names = feature_names.split('\\t')\n\n for line in f:\n line = line.strip()\n if (not line) or line.startswith('VAR'):\n continue\n elif line.startswith('Phonemes'):\n phonemes = line.split(':')[-1]\n phonemes = phonemes.split(',')\n elif line.startswith('Allophones'):\n allophones = line.split(':')[-1]\n allophones = allophones.split(',')\n allophones = [a.split('~')[-1] for a in allophones]\n phonemes = [p for p in phonemes if not p in allophones]\n break\n else:\n inventory.append(line) #this creates a list of segments with phonological features values\n\n\n if return_type == 'segment':\n new_inventory = dict()\n for line in inventory:\n line = line.split('\\t')\n symbol = line[0]\n features = [sign+name for sign,name in zip(line[1:],feature_names)]\n if inv_type in ['underlying', 'core', 'ur', 'UR'] and symbol in phonemes:\n new_inventory[symbol] = features\n elif inv_type in ['surface', 'sr', 'SR', 'phonetic'] and symbol not in phonemes:\n new_inventory[symbol] = features\n elif inv_type == 'both':\n new_inventory[symbol] = features\n\n elif return_type == 'pyilm':\n new_inventory = list()\n for line in inventory:\n line = line.split('\\t')\n symbol = line[0]\n features = [sign+name for sign,name in zip(line[1:], feature_names)]\n new_inventory.append(phonology.Segment(symbol, features))\n\n elif return_type == 'string':\n new_inventory = [line.split('\\t')[0] for line in inventory]\n if inv_type in ['underlying', 'core', 'ur', 'UR']:\n new_inventory = [seg for seg in new_inventory if seg in phonemes]\n elif inv_type in ['surface', 'sr', 'SR', 'phonetic']:\n new_inventory = [seg for seg in new_inventory if not seg in phonemes]\n #else inv_type=='both', just return the new_inventory variable\n\n return new_inventory", "def ibns_snmp(task):\n snmp_cfg = task.run(\n task=text.template_file,\n template=f\"IBNS_snmp.j2\",\n path=\"templates/\",\n **task.host,\n )\n # return configuration\n return snmp_cfg.result", "def process_inventory(inv_name, output_file):\n try:\n gen_dict = _load_yml(inv_name)\n env_vars_dict = gen_dict.get('deployment-environment')\n out = open(output_file, 'w')\n if env_vars_dict is None or env_vars_dict == {}:\n out.write('---\\n')\n out.write('deployment_environment_variables: {}\\n')\n else:\n out.write('---\\n')\n out.write('deployment_environment_variables:\\n')\n for k in env_vars_dict:\n out.write(' ' + k + ': ' + env_vars_dict[k] + '\\n')\n out.close()\n except Exception:\n sys.stderr.write(\"Unable to write the file: \" + output_file + \"\\n\")\n sys.exit(1)", "def modif_network(self):\n print \"preparation du fichier network interfaces\"\n if version_os[\"OS\"] == \"CentOS\":\n self.exec_cmd(\"cp %s/etc/sysconfig/network_scripts/ifcfg-eth0 %s/etc/sysconfig/network_scripts/ifcfg-eth0.pre.p2v\" % (self.rep_vhosts_vm,self.rep_vhosts_vm))\n else:\n self.exec_cmd(\"cp %s/etc/network/interfaces %s/etc/network/interfaces.post.p2v\" % (self.rep_vhosts_vm,self.rep_vhosts_vm))\n self.exec_cmd(\"cp %s/etc/network/interfaces.pre.p2v %s/etc/network/interfaces\" % (self.rep_vhosts_vm,self.rep_vhosts_vm))", "def InitFromCfg(self, cfgfile):\n\n self.cfg = ConfigParser.RawConfigParser()\n self.cfg.read(cfgfile)\n\n # how many universes? read any config items starting with \"universe\"\n\n universes = [item[1] for item in self.cfg.items('DMX')\n if item[0].startswith('universe')]\n\n if len(universes) < 1:\n print 'no universes detected in config file! Bye.'\n exit()\n\n self.universes = universes\n print repr(universes)\n\n board_count = 0\n\n # get a list of pods\n\n podnames = self.cfg.get('pods', 'pods')\n podnames = podnames.split(',')\n\n self.pods = []\n\n for p in podnames:\n\n pname = 'pod' + p\n uni = self.cfg.getint(pname, 'universe')\n new_pod = Pod(pname, uni)\n\n # first, get start addresses of all boards\n nboards = len([item[1] for item in self.cfg.items(pname)\n if item[0].startswith('board')])\n starts = [0] * nboards\n bnames = [(n, 'board' + str(n)) for n in range(nboards)]\n for (n, b) in bnames:\n starts[n] = self.cfg.getint(pname, b)\n\n #print 'pod ' + new_pod.name\n\n # get ordered list of limbs\n lnames = ['branch-1', 'branch-2', 'branch-3', 'branch-4',\n 'branch-5']\n\n for lname in lnames: # for each limb\n\n # get list of branch names for this limb (ending with A, eg)\n lbrnames = [item[0] for item in self.cfg.items(pname)\n if item[0].startswith(lname)]\n\n nbranches = len(lbrnames)\n if nbranches > 0:\n\n # now we have list of branch names for this limb.\n # make a new limb with this many branches\n limb = Limb(p + lname, nbranches)\n\n # now for every branch in this limb, add it to the Limb\n for brname in lbrnames:\n\n data = self.cfg.get(pname, brname)\n data = [int(k) for k in data.split(',')]\n\n # data is a list of [board, rchan,bchan,gchan]\n board = data[0]\n start = starts[board] # start address for this branch\n\n new_branch = Branch(p + brname, start, uni,\n board, (data[1], data[2], data[3]))\n\n data = brname.split('-')\n index = int(data[2])\n\n # print \"adding branch %d\" % index + new_branch.name\n limb.addBranch(index, new_branch)\n\n sys.stdout.flush()\n new_pod.limbs.append(limb)\n self.pods.append(new_pod)\n\n # all boards read in. Now create list of limbs and branches[]\n brcount = 0\n self.branches = []\n self.limbs = []\n self.limblist = []\n for pod in self.pods:\n self.limbs.append(pod.limbs)\n self.limblist.extend(pod.limbs)\n for lb in pod.limbs:\n for br in lb.branches:\n br.brindex = brcount\n self.branches.append(br)\n brcount += 1\n\n self.make_branch_matrix()", "def main():\n dump(inventory(), fp=stdout, indent=4)", "def readConfig():\n hosts = []\n domains = []\n with open(\"./host.conf\", \"r\") as fd:\n for line in fd.readlines():\n line = line.strip().split()\n if line != []:\n # Parse config for zone files and hosts\n if line[0] == \"ZONE_FILE:\":\n zoneFile = line[1]\n if line[0] == \"REVERSE_ZONE_FILE:\":\n reverseZoneFile = line[1]\n if line[0] == \"HOST:\":\n hosts.append((line[1], line[2], line[3]))\n if line[0] == \"DOMAIN:\":\n domains.append((line[1], line[2], line[3]))\n\n return zoneFile, reverseZoneFile, hosts, domains", "async def init_hosts(**kwargs):\n\n nodes = {}\n\n inventory = kwargs.pop('inventory', None)\n if not inventory:\n ans_inventory = kwargs.pop('ans_inventory', None)\n else:\n _ = kwargs.pop('ans_inventory', None)\n ans_inventory = None\n\n namespace = kwargs.pop('namespace', 'default')\n passphrase = kwargs.pop('passphrase', None)\n ssh_config_file = kwargs.pop('ssh_config_file', None)\n jump_host = kwargs.pop('jump_host', None)\n jump_host_key_file = kwargs.pop('jump_host_key_file', None)\n ignore_known_hosts = kwargs.pop('ignore_known_hosts', False)\n user_password = kwargs.pop('password', None)\n connect_timeout = kwargs.pop('connect_timeout', 15)\n\n if kwargs:\n logger.error(f'Received unrecognized keywords {kwargs}, aborting')\n sys.exit(1)\n\n if inventory:\n hostsconf = get_hostsdata_from_hostsfile(inventory)\n else:\n hostsconf = yaml.safe_load('\\n'.join(\n convert_ansible_inventory(ans_inventory, namespace)))\n\n if not hostsconf:\n logger.error(\"No hosts specified in inventory file\")\n print(\"ERROR: No hosts specified in inventory file\")\n sys.exit(1)\n\n if jump_host_key_file:\n if not jump_host:\n logger.error(\"Jump host key file specified without jump host\")\n print(\"ERROR: Jump host key file specified without jump host\")\n sys.exit(1)\n else:\n if not os.access(jump_host_key_file, os.F_OK):\n logger.error(\n f\"Jump host key file {jump_host_key_file} does not exist\")\n print(f\"ERROR: Jump host key file {jump_host_key_file} \"\n f\"does not exist\")\n sys.exit(1)\n if not os.access(jump_host_key_file, os.R_OK):\n logger.error(\n f\"Jump host key file {jump_host_key_file} not readable\")\n print(f\"ERROR: Jump host key file {jump_host_key_file} \"\n f\"not readable\")\n sys.exit(1)\n\n for namespace in hostsconf:\n nsname = namespace[\"namespace\"]\n\n tasks = []\n hostlist = namespace.get(\"hosts\", [])\n if not hostlist:\n logger.error(f'No hosts in namespace {nsname}')\n continue\n\n for host in hostlist:\n if not isinstance(host, dict):\n logger.error(f'Ignoring invalid host specification: {host}')\n continue\n entry = host.get(\"url\", None)\n if entry:\n words = entry.split()\n result = urlparse(words[0])\n\n username = result.username\n password = result.password or user_password or \"vagrant\"\n port = result.port\n host = result.hostname\n devtype = None\n keyfile = None\n\n try:\n for i in range(1, len(words[1:])+1):\n if words[i].startswith('keyfile'):\n keyfile = words[i].split(\"=\")[1]\n elif words[i].startswith('devtype'):\n devtype = words[i].split(\"=\")[1]\n elif words[i].startswith('username'):\n username = words[i].split(\"=\")[1]\n elif words[i].startswith('password'):\n password = words[i].split(\"=\")[1]\n except IndexError:\n if 'password' not in words[i]:\n logger.error(f'Invalid key {words[i]}, missing \"=\"')\n else:\n logger.error(\n 'Invalid password specification, missing=')\n logger.error(f'Ignoring node {host}')\n continue\n\n newnode = Node()\n tasks += [newnode._init(\n address=host,\n username=username,\n port=port,\n password=password,\n passphrase=passphrase,\n transport=result.scheme,\n devtype=devtype,\n ssh_keyfile=keyfile,\n ssh_config_file=ssh_config_file,\n jump_host=jump_host,\n jump_host_key_file=jump_host_key_file,\n namespace=nsname,\n connect_timeout=connect_timeout,\n ignore_known_hosts=ignore_known_hosts,\n )]\n else:\n logger.error(f'Ignoring invalid host specification: {entry}')\n\n if not tasks:\n logger.error(\"No hosts detected in provided inventory file\")\n return []\n\n for f in asyncio.as_completed(tasks):\n newnode = await f\n if newnode.devtype is None:\n logger.error(\n \"Unable to determine device type for {}:{}\"\n .format(newnode.address, newnode.port))\n else:\n logger.info(f\"Added node {newnode.hostname}:{newnode.port}\")\n\n nodes.update(\n {\"{}.{}\".format(nsname, newnode.hostname): newnode})\n\n return nodes", "def loadCfg(self):\n objFile = open('/usr/local/bin/defaults.bind', 'r')\n fileContents = objFile.read()\n objFile.close()\n cfg = fileContents.split('\\n')\n cfgData = []\n for i in cfg:\n if i.startswith('bind '):\n bind, gKey, kbKey = i.split(' ')\n cfgData.append([gKey, kbKey.rstrip('\\n')])\n return cfgData", "def yaml_inventory(self):\n inventory_file = 'inventory_file'\n with open(inventory_file, 'w') as invfile:\n yaml.dump(self.inventory_dict, invfile, default_flow_style=False, sort_keys=False)", "def ansible_inventory(self):\n path_inventory = u'%s/inventories/%s' % (self.ansible_path, self.environment)\n path_lib = u'%s/library/beehive/' % (self.ansible_path)\n runner = Runner(inventory=path_inventory, verbosity=self.verbosity, \n module=path_lib)\n res = runner.get_inventory()\n resp = []\n for k,v in res.items():\n resp.append({u'group':k, u'hosts':u', '.join(v)})\n self.logger.debug(u'Ansible inventory nodes: %s' % res)\n self.result(resp, headers=[u'group', u'hosts'])", "def _parse_and_validate(raw_config_list):\n items = []\n for raw in raw_config_list:\n\n # Validation.\n for key in CONFIGS_REQUIRED:\n if key not in raw or raw[key] is None:\n raise ConfigError(\"must specify '%s' in item config: %s\" % (key, raw))\n\n if \"version_string\" in raw and not _CONFIG_VERSION_RE.match(str(raw[\"version_string\"])):\n raise ConfigError(\"invalid version string: '%s'\" % raw[\"version_string\"])\n if \"version_string\" not in raw and \"version_hashable\" not in raw and \"version_command\" not in raw:\n raise ConfigError(\"must specify 'version_string', 'version_hashable', or 'version_command' in item config: %s\" % raw)\n\n # Validate shell templates.\n # For these, we don't expand environment variables here, but instead do it at once at call time.\n for key in \"upload_command\", \"download_command\":\n try:\n strif.shell_expand_to_popen(raw[key], {\"REMOTE\": \"dummy\", \"LOCAL\": \"dummy\"})\n except ValueError as e:\n raise ConfigError(\"invalid command in config value for %s: %s\" % (key, e))\n\n # Normalize and expand environment variables.\n for key in \"local_path\", \"remote_prefix\", \"remote_path\":\n if key.startswith(\"/\"):\n raise ConfigError(\"currently only support relative paths for local_path and remote_path: %s\" % key)\n raw[key] = raw[key].rstrip(\"/\")\n\n try:\n raw[key] = strif.expand_variables(raw[key], os.environ)\n except ValueError as e:\n raise ConfigError(\"invalid command in config value for %s: %s\" % (key, e))\n\n # Parse enums.\n try:\n raw[\"install_method\"] = InstallMethod[raw[\"install_method\"]]\n except KeyError:\n raise ConfigError(\"invalid install_method: %s\" % raw[\"install_method\"])\n\n # Parse booleans. Values True and False may already be converted.\n try:\n if (type(raw[\"make_backup\"]) is str):\n raw[\"make_backup\"] = raw[\"make_backup\"].lower() in (\"on\", \"t\", \"true\", \"y\", \"yes\")\n except KeyError:\n raise ConfigError(\"invalid make_backup: %s\" % raw[\"make_backup\"])\n\n items.append(Config(**raw))\n\n log.debug(\"final configs: %s\", items)\n return items", "def build_configs():", "def parse_inventory(inventory_fn):\n try:\n if not os.path.exists(inventory_fn):\n log(\"INFO\", \"No inventory file found at {}. Creating an empty one.\".format(inventory_fn))\n return {}\n with open(inventory_fn, 'r') as inventory_file:\n # TODO: verify the contents??\n return json.load(inventory_file)\n except Exception as ex:\n log(\"WARN\", \"Error parsing the inventory file. Assuming an empty inventory: {}\".format(ex))\n return {}", "def _config_interfaces(self):\n self.interfaces['loopback'] = \"127.0.0.1\"\n self.interfaces['internal'] = \"127.0.0.1\"\n self.interfaces['external'] = \"0.0.0.0\"\n self.interfaces[\"any\"] = \"0.0.0.0\"\n self.interfaces[\"localhost\"] = \"127.0.0.1\"", "def loadsavedconfig(configFile=None, chassisIp=None, portList=None, includeCrc=False):\n if '.json' in configFile:\n runjsonconfig(configFile, chassisIp, portList, includeCrc)\n\n if '.ixncfg' in configFile:\n runixncfgconfig(configFile, chassisIp, portList, includeCrc)", "def read_cli_args(self):\n parser = argparse.ArgumentParser(description='Produce an Ansible Inventory file based on DigitalOcean credentials')\n\n parser.add_argument('--list', action='store_true', help='List all active Droplets as Ansible inventory (default: True)')\n parser.add_argument('--host', action='store', help='Get all Ansible inventory variables about a specific Droplet')\n\n parser.add_argument('--all', action='store_true', help='List all DigitalOcean information as JSON')\n parser.add_argument('--droplets', '-d', action='store_true', help='List Droplets as JSON')\n parser.add_argument('--regions', action='store_true', help='List Regions as JSON')\n parser.add_argument('--images', action='store_true', help='List Images as JSON')\n parser.add_argument('--sizes', action='store_true', help='List Sizes as JSON')\n parser.add_argument('--ssh-keys', action='store_true', help='List SSH keys as JSON')\n parser.add_argument('--domains', action='store_true', help='List Domains as JSON')\n parser.add_argument('--tags', action='store_true', help='List Tags as JSON')\n\n parser.add_argument('--pretty', '-p', action='store_true', help='Pretty-print results')\n\n parser.add_argument('--cache-path', action='store', help='Path to the cache files (default: .)')\n parser.add_argument('--cache-max_age', action='store', help='Maximum age of the cached items (default: 0)')\n parser.add_argument('--force-cache', action='store_true', default=False, help='Only use data from the cache')\n parser.add_argument('--refresh-cache', '-r', action='store_true', default=False,\n help='Force refresh of cache by making API requests to DigitalOcean (default: False - use cache files)')\n\n parser.add_argument('--env', '-e', action='store_true', help='Display DO_API_TOKEN')\n parser.add_argument('--api-token', '-a', action='store', help='DigitalOcean API Token')\n\n self.args = parser.parse_args()\n\n if self.args.api_token:\n self.api_token = self.args.api_token\n\n # Make --list default if none of the other commands are specified\n if (not self.args.droplets and not self.args.regions and\n not self.args.images and not self.args.sizes and\n not self.args.ssh_keys and not self.args.domains and\n not self.args.tags and\n not self.args.all and not self.args.host):\n self.args.list = True", "def load_config():\n model_type, run_name, run_comment, epoch, verbose = get_args()\n name = run_name + '-' + run_comment\n if model_type == \"s2s\": \n run_title = \"seq2seq\"\n else:\n run_title = \"def2vec\"\n path = \"outputs/{}/logs/{}/config.json\".format(run_title, name)\n config = None\n with open(path) as f:\n config = dict(json.load(f))\n config = load_config(eval=True)\n return (config, name, model_type)", "def _setupFiles(self):\r\n with open(self._conf, 'w') as f:\r\n # Write base config\r\n f.write('lxc.utsname = {0}\\n'.format(self._hostname))\r\n f.write('\\n')\r\n f.write('lxc.rootfs = {0}\\n'.format(self._rootfs))\r\n f.write('lxc.mount = {0}\\n'.format(self._fstab))\r\n\r\n # Write interface config\r\n for name, link, ip, up, down in self._ifs:\r\n f.write('\\n')\r\n f.write('lxc.network.type = veth\\n')\r\n f.write('lxc.network.flags = up\\n')\r\n f.write('lxc.network.name = {0}\\n'.format(name))\r\n\r\n if link:\r\n f.write('lxc.network.link = {0}\\n'.format(link))\r\n\r\n if ip:\r\n f.write('lxc.network.ipv4 = {0}/24\\n'.format(ip))\r\n\r\n if up:\r\n f.write('lxc.network.script.up = {0}\\n'.format(up))\r\n\r\n if down:\r\n f.write('lxc.network.script.down = {0}\\n'.format(down))\r\n\r\n\r\n # Write cgroup config\r\n f.write(_CONFIG_CGROUP)\r\n\r\n # Write capabilities config\r\n # TODO: Add at some point?\r\n # f.write(_CONFIG_CAP)\r\n\r\n with open(self._fstab, 'w') as f:\r\n f.write(_FSTAB_BASE.format(proc=pjoin(self._rootfs, 'proc'),\r\n devpts=pjoin(self._rootfs, 'dev/pts'),\r\n sysfs=pjoin(self._rootfs, 'sys')))\r\n\r\n for src, dst, ro in self._fstabExt:\r\n f.write(_FSTAB_BIND.format(srcDir=src, dstDir=dst,\r\n ro=',ro' if ro else ''))", "def convert_network_configuration(config, dns_servers):\n\n def _get_subnet_part(pcfg, nameservers=None):\n subpart = {'type': 'static',\n 'control': 'auto',\n 'address': pcfg.get('ip_address'),\n 'gateway': pcfg.get('gateway')}\n\n if nameservers:\n subpart['dns_nameservers'] = nameservers\n\n if \":\" in pcfg.get('ip_address'):\n subpart['address'] = \"{0}/{1}\".format(pcfg.get('ip_address'),\n pcfg.get('cidr'))\n else:\n subpart['netmask'] = pcfg.get('netmask')\n\n return subpart\n\n all_nics = []\n for k in ('public', 'private'):\n if k in config:\n all_nics.extend(config[k])\n\n macs_to_nics = cloudnet.get_interfaces_by_mac()\n nic_configs = []\n\n for nic in all_nics:\n\n mac_address = nic.get('mac')\n sysfs_name = macs_to_nics.get(mac_address)\n nic_type = nic.get('type', 'unknown')\n # Note: the entry 'public' above contains a list, but\n # the list will only ever have one nic inside it per digital ocean.\n # If it ever had more than one nic, then this code would\n # assign all 'public' the same name.\n if_name = NIC_MAP.get(nic_type, sysfs_name)\n\n LOG.debug(\"mapped %s interface to %s, assigning name of %s\",\n mac_address, sysfs_name, if_name)\n\n ncfg = {'type': 'physical',\n 'mac_address': mac_address,\n 'name': if_name}\n\n subnets = []\n for netdef in ('ipv4', 'ipv6', 'anchor_ipv4', 'anchor_ipv6'):\n raw_subnet = nic.get(netdef, None)\n if not raw_subnet:\n continue\n\n sub_part = _get_subnet_part(raw_subnet)\n if nic_type == 'public' and 'anchor' not in netdef:\n # add DNS resolvers to the public interfaces only\n sub_part = _get_subnet_part(raw_subnet, dns_servers)\n else:\n # remove the gateway any non-public interfaces\n if 'gateway' in sub_part:\n del sub_part['gateway']\n\n subnets.append(sub_part)\n\n ncfg['subnets'] = subnets\n nic_configs.append(ncfg)\n LOG.debug(\"nic '%s' configuration: %s\", if_name, ncfg)\n\n return {'version': 1, 'config': nic_configs}", "def process_config(self, filename):\n \n self.log_message(\"processing config file: \"+filename)\n parser = SafeConfigParser()\n parser.optionxform = str\n parser.read(filename)\n self.source_files[filename] = parser\n \n sections = parser.sections()\n for section in sections:\n \n options = parser.options(section)\n params = {}\n non_std = {}\n for option in options:\n ## any option that ends with the word \"password\" will be encrypted and will automatically be decrypted upon\n ## processing \n if option in self.standard_options:\n params[option] = self.get_value(option, parser.get(section, option))\n else:\n non_std[option] = self.get_value(option, parser.get(section, option))\n\n params['non_std'] = non_std\n params['source_file'] = filename\n params['name']=section\n params['run_date']=self.run_date\n c_entry = ConfigEntry(params)\n if c_entry.ready: \n entry_num = c_entry.get_entry_type()\n self.entries[self.entry_types[entry_num]].append(c_entry)\n self.entry_dict[section] = {'source':filename,'entry':c_entry}\n self.log_message(\"Loaded Config Entry: \"+section)\n else:\n self.log_message(\"Failed to load config entry: \"+section)\n\n return self.entries", "def main():\n\n # the AnsibleModule object will be our abstraction for working with Ansible.\n # This includes instantiation, a couple of common attr that will be the\n # args/params passed to the execution, as well as if the module\n # supports check mode\n module = AnsibleModule(\n argument_spec=dict(\n hostvars=dict(type='raw', required=True),\n report_timestamp=dict(type=str, required=False, default=''),\n registered_dict_name=dict(type=str, required=False, default=\"get_sas_host_details_results\"),\n include_hotfix_report=dict(type=bool, required=False, default=True),\n hotfix_url = dict(type=str, required=True),\n hotfix_master_file = dict(type=str, required=True)\n ),\n supports_check_mode=True\n )\n\n # get module parameters\n hostvars = module.params['hostvars']\n report_timestamp = module.params['report_timestamp']\n registered_dict_name = module.params['registered_dict_name']\n include_hotfix_report = module.params['include_hotfix_report']\n hotfix_url = module.params['hotfix_url']\n hotfix_master_file = module.params['hotfix_master_file']\n\n # Starting in Ansible 2.8.1, there is the potential for hostvars\n # to be passed as a byte string, if the dict is too large\n # This will convert the str back to a dict before proceeding\n if isinstance(hostvars, str):\n hostvars = ast.literal_eval(hostvars.decode())\n\n results = dict()\n results['sas_hosts'] = dict()\n results['created'] = report_timestamp\n\n for inventory_hostname, host_vars in hostvars.items():\n\n # set up returnable values\n unreachable = True\n failed = True\n failure_details = dict(\n msg=\"\",\n rc=0,\n stderr=\"\",\n stdout=\"\",\n )\n\n # get the host details dict\n host_details = host_vars.get(registered_dict_name)\n\n # check if the host has the registered dict\n if host_details is not None:\n\n # host details exist, so host was reachable\n unreachable = False\n\n # check if the host failed\n failed = host_details['failed']\n\n # if the module reported a failure, collect details\n if failed:\n failure_details['msg'] = host_details['msg']\n failure_details['rc'] = host_details['rc']\n failure_details['stderr'] = host_details['module_stderr']\n failure_details['stdout'] = host_details['module_stdout']\n else:\n # get module results\n host_results = host_details.get('sas_host_details')\n\n if host_results is not None:\n results['sas_hosts'].update(host_results)\n else:\n failed = True\n\n # if the results dict could not be found, mark the host as unreachable\n if failed or unreachable:\n host_groups = host_vars.get('group_names')\n\n if host_groups is not None and 'sas_all' in host_groups:\n hostname = host_vars.get('ansible_fqdn')\n if hostname is None or hostname == \"\":\n hostname = host_vars.get('ansible_hostname')\n if hostname is None or hostname == \"\":\n hostname = host_vars.get('ansible_host')\n if hostname is None or hostname == \"\":\n hostname = host_vars.get('inventory_hostname')\n if hostname is None or hostname == \"\":\n hostname = inventory_hostname\n\n try:\n host_groups.remove('sas_all')\n host_groups.remove('sas-all')\n except ValueError:\n pass # do nothing\n\n results['sas_hosts'][hostname] = dict(\n _id=hostname.replace('.', '-'),\n _unreachable=unreachable,\n _failed=failed,\n _failure_details=failure_details,\n ansible_host_groups=host_groups\n )\n else:\n pass # this host isn't in sas_all so there's no need to try and report on it\n\n ##################################################################################\n # This section will find all of the hotfixes available and add them to the report.\n ##################################################################################\n\n # There are a few data structures that are complicated enough to warrant a description:\n # fullReport\n # This will hold all of the data in a format condusive to printing it out in the final report. This is how\n # It is structured:\n # fullReport (dict):\n # key=Hot Fix Name, point to another dict:\n # key=\"released\", points to a string containing the release date of the hotfix.\n # key= \"installed\", points to a boolean that will reflect whether any of the packages used by this hotfix are installed on any of the machines in the deployment.\n # key=\"upToDate\", point to a boolean that will reflest whether ALL of the packages used by this hotfix are up to date on ALL of the machines in the deployment.\n # key=\"sasnote\", points to another dict:\n # key=SASNote number, points to the description of the SASNote.\n # key=\"package\", points to another dict:\n # key=\"platform\" , points to another dict:\n # key=OS, points to another dict:\n # key=\"version\", points to the string of the version of the package.\n # key=\"installed\", points to a boolean which reflects whether this package is installed on any machine in the deployment.\n # key=\"upToDate\", points to a boolean which reflects whether this package is up to data on ALL of the machines in the deployment.\n # key=\"os\", points to the fully qualified name of the operating system.\n # key=\"arch\", points to the architecture of the OS (NOTE: This does not exist on Windows systems.)\n # key=\"alreadyUpdated\", points to a boolean, which is used to keep track of whether the upToDate has already been set.\n # key=\"installedVersions\", points to another dict:\n # key=machineName, points to a 2 element list:\n # [0]=string containing package version that is currently installed.\n # [1]=boolean reflecting whether this version is at or above the package delevered in this hotfix.\n #\n ###########################################################################\n #\n # packageToHotFix\n # This will hold a dict of lists:\n # key: package name, pointing to a 2 element list:\n # [0] OS\n # [1] The Hotfix that this package is associated with.\n #\n ###########################################################################\n #\n # environmentReportDict\n # This is inherited from the environment report, but it's probably worth documenting what it looks like.\n # There is a lot of data inerherited, and I'm only describing what is used in this script.\n # environmentReportDict\n # key=hostname (for each machine in the deployment), pointing to another dict:\n # key=\"OS\", pointing to string for the OS family.\n # key=\"arch\", pointing to the string for the architecture of the host.\n # key=\"sas_packages\", pointing to another dict:\n # key=package number, pointing to another dict:\n # key=\"attributes\", pointing to another dict:\n # key=\"version\", pointing to a string of the package versions currently installed on the host.\n ############################################################################\n\n results[\"include_hotfix_report\"] = include_hotfix_report\n if include_hotfix_report:\n # This is the URL from which to pull the hotfix files.\n if hotfix_url[-1:] == '/':\n baseURL = hotfix_url\n else:\n baseURL = hotfix_url + '/'\n # This is the master file that lists which other files should be examined for the actual hotfixes themselves.\n masterFile = hotfix_master_file\n # This is the top level object to store the hotfix report information (see above).\n fullReport = {}\n # This is a dict of package to hotfixes (see above).\n packageToHotfix = {}\n # This boolean will help with debugging.\n debug = False\n\n try:\n # Parse the master file to obtain where the hotfix files are.\n masterFileXML = urllib2.urlopen(baseURL + masterFile)\n\n # Parse the master file and build a list of all files.\n allFilesRoot = ET.fromstring(masterFileXML.read())\n results[\"contact_hotfix_website\"] = True\n except urllib2.URLError :\n results[\"contact_hotfix_website\"] = False\n results[\"master_website\"] = baseURL + masterFile\n if debug:\n print(\"***** Error parsing \" + baseURL + masterFile)\n print(traceback.format_exc())\n print(\"***** No hot fix information obtained. Skipping hot fix report.\\n\\n\")\n\n if results[\"contact_hotfix_website\"]:\n # Loop through the files discoverd in the master file\n if debug:\n print(\"Building hot fix report, based on master file input.\")\n for file_tag in allFilesRoot.findall('File'):\n currentFile = file_tag.get('fileName')\n fileToParse = baseURL + currentFile\n # Retrieve each file.\n # Inside of each file, the lines are keyed by the hot fix id. There are three types of lines, in order:\n # 1) id and release date\n # 2) id, sasnote, sasnotetitle\n # 3) id, OS, package.\n # This script loops through to build a dictionary of dictonaries with the basic structure:\n # ID\n # Release Date\n # SASNotes\n # SASNote and Title\n # ...\n # Packages\n # Package Name, Version, and OS\n try:\n currentFileXML = urllib2.urlopen(fileToParse)\n currentFileRoot = ET.fromstring(currentFileXML.read())\n updateID = \"\"\n for update_tag in currentFileRoot.findall('update'):\n currentUpdate = update_tag.get('id')\n releaseDate = update_tag.get('released')\n # To get the top level Dictionary seeded with the hot fix Name and release date.\n if releaseDate is not None:\n if currentUpdate in fullReport:\n if debug:\n print(\"WARNING! Hot Fix \" + currentUpdate + \" already discovered. Skipping\")\n updateID = \"DUPLICATE-SKIP\"\n else:\n # The SCXXXX hot fixes are special. The package files are only included in\n # Viya_<version>_<platform>_home.xml files. So, the entries in the\n # scheduled_update_<platform>_<shipevent>.xml files can be skipped.\n if currentUpdate.startswith(\"SC\") and currentFile.find(\"scheduled_update_\") < 0:\n continue\n updateID = currentUpdate\n fullReport[updateID] = {}\n fullReport[updateID][\"release_date\"] = releaseDate\n fullReport[updateID][\"installed\"] = False\n fullReport[updateID][\"upToDate\"] = False\n # To get the SASNote information under the hot fix\n else:\n if updateID == \"DUPLICATE-SKIP\":\n continue\n sasNote = update_tag.get('sasnote')\n sasNoteTitle = update_tag.get('sasnoteTitle')\n if sasNote is not None:\n if \"sasnote\" not in fullReport[updateID]:\n fullReport[updateID][\"sasnote\"] = {}\n # This string needs to be encoded because some non-ASCII characters are\n # in some of the titles.\n fullReport[updateID][\"sasnote\"][sasNote] = sasNoteTitle.encode('utf-8')\n # To get the Package information under the hot fix.\n else:\n os = update_tag.get(\"os\")\n fullPackage = update_tag.get(\"package\")\n if fullPackage is not None:\n if \"package\" not in fullReport[updateID]:\n fullReport[updateID][\"package\"] = {}\n\n lastPeriodIndex = fullPackage.rfind(\".\")\n # Format the package information.\n # Windows does not have a dash in the version; Linux does. So, we need to break differently,\n # depending on the OS.\n if os.lower().find(\"windows\") > -1:\n versionStartIndex = fullPackage.rfind(\"-\")\n achitectureStartIndex = -1\n versionEndIndex = lastPeriodIndex\n osFamily = \"Windows\"\n else:\n versionStartIndex = fullPackage.rfind(\"-\", 0, fullPackage.rfind(\"-\"))\n # Linux has architecture in the package. This will be stored in its own key.\n achitectureStartIndex = fullPackage.rfind(\".\", 0, lastPeriodIndex)\n # SLES has the string 'suse' in its package. This will strip it out (as well as an extra .).\n if os.lower().find(\"suse\") > -1:\n versionEndIndex = achitectureStartIndex - 5\n osFamily = \"Suse\"\n else:\n if os.lower().find(\"yocto\") > -1:\n versionEndIndex = achitectureStartIndex - 6\n osFamily = \"Yocto\"\n else:\n if os.lower().find(\"ubuntu\") > -1:\n versionStartIndex = fullPackage.rfind(\"_\", 0, fullPackage.rfind(\"_\"))\n versionEndIndex = fullPackage.rfind(\"_\")\n achitectureStartIndex = versionEndIndex\n osFamily = \"Ubuntu\"\n else:\n if os.lower().find(\"red hat enterprise linux 7\") > -1:\n versionStartIndex = versionStartIndex = fullPackage.rfind(\":\")\n versionEndIndex = len(fullPackage)\n achitectureStartIndex = -1\n osFamily = \"RedHat\"\n else:\n versionEndIndex = achitectureStartIndex\n osFamily = \"RedHat\"\n package = fullPackage[:versionStartIndex]\n packageVersion = fullPackage[versionStartIndex + 1:versionEndIndex]\n architecture = fullPackage[achitectureStartIndex + 1:lastPeriodIndex]\n\n if package not in fullReport[updateID][\"package\"]:\n fullReport[updateID][\"package\"][package] = {}\n if \"platform\" not in fullReport[updateID][\"package\"][package]:\n fullReport[updateID][\"package\"][package][\"platform\"] = {}\n if osFamily not in fullReport[updateID][\"package\"][package][\"platform\"]:\n fullReport[updateID][\"package\"][package][\"platform\"][osFamily] = {}\n fullReport[updateID][\"package\"][package][\"platform\"][osFamily][\"version\"] = packageVersion\n fullReport[updateID][\"package\"][package][\"platform\"][osFamily][\"installed\"] = False\n fullReport[updateID][\"package\"][package][\"platform\"][osFamily][\"upToDate\"] = False\n fullReport[updateID][\"package\"][package][\"platform\"][osFamily][\"os\"] = os\n fullReport[updateID][\"package\"][package][\"platform\"][osFamily][\"installedVersions\"] = {}\n if achitectureStartIndex != -1:\n fullReport[updateID][\"package\"][package][\"platform\"][osFamily][\"arch\"] = architecture\n # This property is used to make sure that when evaluating the installed packages,\n # the upToDate=false does not get overridden by a True at the end.\n fullReport[updateID][\"package\"][package][\"platform\"][osFamily][\"alreadyUpdated\"] = False\n\n # Add to the package to hot fix dict.\n if package not in packageToHotfix:\n packageToHotfix[package] = []\n packageToHotfix[package].append([osFamily, updateID])\n\n except ET.ParseError:\n if debug:\n print(\"***** Error parsing \" + fileToParse)\n print(traceback.format_exc())\n print(\"***** Skipping file.\\n\\n\")\n except urllib2.HTTPError:\n if debug:\n print(\"***** Cannot access \" + fileToParse)\n print(traceback.format_exc())\n print(\"***** Skipping the file.\\n\\n\")\n except:\n if debug:\n print(\"***** Error encountered with \" + fileToParse)\n print(traceback.format_exc())\n print(\"***** Skipping the file.\\n\\n\")\n\n if debug:\n print(\"**** Build complete. Here are the hot fixes:\")\n print_Full_Report(fullReport)\n print(\"***********************************************************************************\")\n print(\"**** Here is the package to hot fix dict:\")\n print(\"***********************************************************************************\")\n for current_package in packageToHotfix:\n print(\" \" + current_package)\n for machine_list in packageToHotfix[current_package]:\n print(\" \" + machine_list[0] + \" @ \" + machine_list[1] + \".\")\n print(\"***********************************************************************************\")\n print(\"Report built.\")\n print(\"Accessing environment Data.\")\n\n for currentMachine in results['sas_hosts']:\n if not results['sas_hosts'][currentMachine][\"_unreachable\"] and not results['sas_hosts'][currentMachine][\"_failed\"]:\n currentOS = results['sas_hosts'][currentMachine]['os']['family']\n for currentPackage in results['sas_hosts'][currentMachine]['sas_packages']:\n if currentPackage in packageToHotfix:\n for osHotfix in packageToHotfix[currentPackage]:\n if osHotfix[0] == currentOS:\n currentHotfix = osHotfix[1]\n installedVersion = \\\n results['sas_hosts'][currentMachine]['sas_packages'][currentPackage]['attributes']['version']\n if installedVersion.endswith('.suse'):\n installedVersion = installedVersion[:-5]\n else:\n if installedVersion.endswith('.yocto'):\n installedVersion = installedVersion[:-6]\n else:\n if '_' in installedVersion:\n installedVersion = installedVersion[0:installedVersion.rfind(\"_\")]\n hotfixVersion = fullReport[currentHotfix][\"package\"][currentPackage][\"platform\"][currentOS][\"version\"]\n upToDate = compare_versions(installedVersion, hotfixVersion) >= 0\n fullReport[currentHotfix][\"installed\"] = True\n fullReport[currentHotfix][\"package\"][currentPackage][\"platform\"][currentOS][\"installed\"] = True\n # If a previous pacakage marked updateToDate=True, it can still be pulled back to false if another package isn't\n # up to date. If the previous package was marked upToDate=false, the hotfix cannot be marked true.\n if not fullReport[currentHotfix][\"package\"][currentPackage][\"platform\"][currentOS][\"alreadyUpdated\"] or \\\n (fullReport[currentHotfix][\"package\"][currentPackage][\"platform\"][currentOS][\"alreadyUpdated\"] and\n fullReport[currentHotfix][\"package\"][currentPackage][\"platform\"][currentOS][\"upToDate\"]):\n fullReport[currentHotfix][\"package\"][currentPackage][\"platform\"][currentOS][\"upToDate\"] = upToDate\n fullReport[currentHotfix][\"package\"][currentPackage][\"platform\"][currentOS][\"alreadyUpdated\"] = True\n fullReport[currentHotfix][\"package\"][currentPackage][\"platform\"][currentOS][\"installedVersions\"][currentMachine] = [installedVersion, upToDate]\n\n if debug:\n print(\"Comparing evironment data to hotfix data.\")\n for currentHotFix in fullReport:\n cumulativeOverallUpToDate = True\n # This will only allow the top level \"upToDate\" property to be set, if there is a package installed on this OS.\n allowTopLevelUpdate = False\n for currentPackage in fullReport[currentHotFix][\"package\"]:\n cumulativeOSUpToDate = True\n for currentOS in fullReport[currentHotFix][\"package\"][currentPackage][\"platform\"]:\n if len(fullReport[currentHotFix][\"package\"][currentPackage][\"platform\"][currentOS][\"installedVersions\"]) > 0:\n cumulativeOSUpToDate = cumulativeOSUpToDate and \\\n fullReport[currentHotFix][\"package\"][currentPackage][\"platform\"][currentOS][\n \"upToDate\"]\n allowTopLevelUpdate = True\n\n cumulativeOverallUpToDate = cumulativeOverallUpToDate and cumulativeOSUpToDate\n if allowTopLevelUpdate:\n fullReport[currentHotFix][\"upToDate\"] = cumulativeOverallUpToDate\n\n # Now that the fullReport has been updated, go back and add to results, for the final report.\n results[\"available_hotfixes\"] = {}\n results[\"installed_hotfixes\"] = {}\n\n for currentHotfix in fullReport:\n if not fullReport[currentHotfix][\"installed\"]:\n continue\n if fullReport[currentHotfix][\"upToDate\"]:\n hotfix_dict_to_use = \"installed_hotfixes\"\n else:\n hotfix_dict_to_use = \"available_hotfixes\"\n results[hotfix_dict_to_use][currentHotfix] = {}\n results[hotfix_dict_to_use][currentHotfix][\"release_date\"] = fullReport[currentHotfix][\"release_date\"]\n results[hotfix_dict_to_use][currentHotfix][\"packages\"] = []\n for currentPackage in fullReport[currentHotfix][\"package\"]:\n for currentOS in fullReport[currentHotfix][\"package\"][currentPackage][\"platform\"]:\n if not fullReport[currentHotfix][\"package\"][currentPackage][\"platform\"][currentOS][\"installed\"]:\n continue\n for currentHost in fullReport[currentHotfix][\"package\"][currentPackage][\"platform\"][currentOS][\"installedVersions\"]:\n temp_dict = {}\n temp_dict[\"hostname\"] = currentHost\n temp_dict[\"package\"] = currentPackage\n temp_dict[\"installed_version\"] = fullReport[currentHotfix][\"package\"][currentPackage][\"platform\"][currentOS][\"installedVersions\"][currentHost][0]\n temp_dict[\"hotfix_version\"] = fullReport[currentHotfix][\"package\"][currentPackage][\"platform\"][currentOS][\"version\"]\n temp_dict[\"up_to_date\"] = fullReport[currentHotfix][\"package\"][currentPackage][\"platform\"][currentOS][\"installedVersions\"][currentHost][1]\n results[hotfix_dict_to_use][currentHotfix][\"packages\"].append(temp_dict)\n # Format the SAS Note description so that we can respect any HTML tags that are included in the text.\n results[hotfix_dict_to_use][currentHotfix][\"sas_notes\"] = {}\n for current_number in fullReport[currentHotfix][\"sasnote\"]:\n # Honor any html that is coming through.\n temp_sasnote_description = fullReport[currentHotfix][\"sasnote\"][current_number]\n temp_sasnote_description = temp_sasnote_description.replace(\"&lt;\", \"<\")\n temp_sasnote_description = temp_sasnote_description.replace(\"&gt;\", \">\")\n # Build a link to the URL for the SAS Note.\n hot_fix_prefix = current_number[:2]\n hot_fix_postfix = current_number[2:]\n sas_note_url = \"http://support.sas.com/kb/\" + hot_fix_prefix + \"/\" + hot_fix_postfix + \".html\"\n sas_note_html_link = \"<a href=\\\"\" + sas_note_url + \"\\\"\\>\" + current_number + \"</a>\"\n results[hotfix_dict_to_use][currentHotfix][\"sas_notes\"][current_number] = {\"sas_note_link\":sas_note_html_link, \"description\":temp_sasnote_description}\n\n # in the event of a successful module execution, you will want to\n # simple AnsibleModule.exit_json(), passing the key/value results\n #\n # changed will always be 'False' since we'll never alter state on a host\n module.exit_json(changed=False, processed_host_details=results)", "def _create_dict_from_file(self, **kwargs):\r\n\r\n if not self.linux_handle.download(local_file='ipsec.conf', remote_file=self.conf_path, protocol='scp'):\r\n self.linux_handle.log(\"Downloading ipsec.conf file failed\")\r\n raise Exception(\"Downloading ipsec.conf file failed \")\r\n self.linux_handle.log(\"Reading ipsec.conf file\")\r\n try:\r\n with open('ipsec.conf', 'r') as f:\r\n lines = f.readlines()\r\n except Exception as err:\r\n self.linux_handle.log(level='ERROR', messsage=\"Unable to open file ipsec.conf\")\r\n raise err\r\n ipsec_conf_dict = dict()\r\n line_key = ''\r\n for line in lines:\r\n line = line.strip()\r\n if re.match('#', line) or not line:\r\n next\r\n elif re.match('conn ', line) or re.match('config setup', line):\r\n # (conn_string, conn_name) = line.split()\r\n ipsec_conf_dict[line] = dict()\r\n line_key = line\r\n elif re.search('=', line):\r\n (key, value) = line.split('=', 1)\r\n ipsec_conf_dict[line_key][key] = value\r\n else:\r\n print(\"\\n None matched line: %s\" % line)\r\n print(ipsec_conf_dict)\r\n return ipsec_conf_dict", "def get_inventory(self, context):\n # See below some example code demonstrating how to return the resource structure\n # and attributes. In real life, of course, if the actual values are not static,\n # this code would be preceded by some SNMP/other calls to get the actual resource information\n '''\n # Add sub resources details\n sub_resources = [ AutoLoadResource(model ='Generic Chassis',name= 'Chassis 1', relative_address='1'),\n AutoLoadResource(model='Generic Module',name= 'Module 1',relative_address= '1/1'),\n AutoLoadResource(model='Generic Port',name= 'Port 1', relative_address='1/1/1'),\n AutoLoadResource(model='Generic Port', name='Port 2', relative_address='1/1/2'),\n AutoLoadResource(model='Generic Power Port', name='Power Port', relative_address='1/PP1')]\n\n\n attributes = [ AutoLoadAttribute(relative_address='', attribute_name='Location', attribute_value='Santa Clara Lab'),\n AutoLoadAttribute('', 'Model', 'Catalyst 3850'),\n AutoLoadAttribute('', 'Vendor', 'Cisco'),\n AutoLoadAttribute('1', 'Serial Number', 'JAE053002JD'),\n AutoLoadAttribute('1', 'Model', 'WS-X4232-GB-RJ'),\n AutoLoadAttribute('1/1', 'Model', 'WS-X4233-GB-EJ'),\n AutoLoadAttribute('1/1', 'Serial Number', 'RVE056702UD'),\n AutoLoadAttribute('1/1/1', 'MAC Address', 'fe80::e10c:f055:f7f1:bb7t16'),\n AutoLoadAttribute('1/1/1', 'IPv4 Address', '192.168.10.7'),\n AutoLoadAttribute('1/1/2', 'MAC Address', 'te67::e40c:g755:f55y:gh7w36'),\n AutoLoadAttribute('1/1/2', 'IPv4 Address', '192.168.10.9'),\n AutoLoadAttribute('1/PP1', 'Model', 'WS-X4232-GB-RJ'),\n AutoLoadAttribute('1/PP1', 'Port Description', 'Power'),\n AutoLoadAttribute('1/PP1', 'Serial Number', 'RVE056702UD')]\n\n return AutoLoadDetails(sub_resources,attributes)\n '''\n\n self._log(context, 'Begin autoload')\n resources = []\n attributes = []\n\n\n attributes.append(AutoLoadAttribute('', 'replication_address', self.get_replication_address(context)))\n attributes.append(AutoLoadAttribute('', 'connection_key', self.get_connection_key(context)))\n\n networks = self._get_newtork_interfaces(context)\n self._log(context, 'got networks')\n\n controllers = self._get_controllers(context)\n self._log(context, 'got controllers')\n ports = self._get_ports(context)\n\n model = None\n for controller in controllers:\n self._log(context, 'Processing ctrlt: ' + controller['name'] + ':' + controller['model'])\n resources.append(AutoLoadResource(model='Generic Storage Controller', name=controller['name'],\n relative_address=controller['name']))\n if model is None:\n model = controller['model']\n\n attributes.append(AutoLoadAttribute('', 'Model', model))\n\n for network in networks:\n self._log(context, 'Processing netwk: ' + network['name'] + ':' + str(network['address']))\n net_name = network['name']\n controller = net_name.split('.')[0]\n if 'vir0' in controller or 'vir1' in controller:\n attributes.append(AutoLoadAttribute('',str(controller + '_address'), str(network['address'])))\n continue\n if 'vir' in controller:\n continue\n if 'management' not in network['services']:\n continue\n resources.append(AutoLoadResource(model='Storage Network Port', name=net_name,\n relative_address=controller.upper() + '/' + str(network['address'])))\n\n for port in ports:\n if port['iqn'] is not None:\n port_name = port['name']\n controller = port_name.split('.')[0]\n resources.append(AutoLoadResource(model='iSCSI Storage Port', name=port['name'],\n relative_address=controller + '/' + port['portal']))\n attributes.append(AutoLoadAttribute(controller + '/' + port['portal'], 'iqn', port['iqn']))\n elif port['wwn'] is not None:\n port_name = port['name']\n controller = port_name.split('.')[0]\n resources.append(AutoLoadResource(model='FC Storage Port', name=port['name'],\n relative_address=controller + '/' + port['name'].split('.')[1]))\n attributes.append(AutoLoadAttribute(controller + '/' + port['name'].split('.')[1], 'wwn', port['wwn']))\n\n return AutoLoadDetails(resources, attributes)", "def get_config(client):\n func = client.get_config()\n config = run_in_loop_now('get_config', func)\n\n a = {}\n b = {}\n for i in config['activity']:\n a[i['label']] = i['id']\n b[i['id']] = i['label']\n activities_by_name = a\n activities_by_id = b\n d = {}\n for device in config['device']:\n device_cmds = []\n for grp in device['controlGroup']:\n for fnc in grp['function']:\n device_cmds.append(json.loads(fnc['action'])['command'])\n d[device['label']] = {\"id\": device['id'],\n \"cmds\": device_cmds}\n devices = d\n return config", "def readTestFile(self, filename):\n size = 0\n agentNum = 0\n block = {}\n agentList = []\n f = open(filename, 'r')\n for line in f:\n if line[0] != '#':\n c = line.split(' ')\n if c[0] == 'grid':\n size = int(line[5:7])\n elif c[0] =='block':\n block[(int(c[2]), int(c[1]))] = (int(c[3]) - int(c[1]) + 1, int(c[4]) - int(c[2]) + 1)\n elif c[0] == 'nets':\n agentNum = int(c[1])\n elif c[0] == 'net' or c[0] == 'xet':\n print(c)\n agentList.append([int(c[1]), (int(c[3]), int(c[2])), (int(c[6]), int(c[5]))])\n f.close()\n print(size)\n print(block)\n print(agentNum)\n print(agentList)\n return size, block, agentNum, agentList", "def config():\n return {\n \"CLEAN_OUTBOX\": \"TRUE\",\n \"COMPONENT_NAME\": \"testing-unpacker\",\n \"DEST_SITE\": \"WIPAC\",\n \"FILE_CATALOG_REST_TOKEN\": \"fake-file-catalog-token\",\n \"FILE_CATALOG_REST_URL\": \"http://kVj74wBA1AMTDV8zccn67pGuWJqHZzD7iJQHrUJKA.com/\",\n \"HEARTBEAT_PATCH_RETRIES\": \"3\",\n \"HEARTBEAT_PATCH_TIMEOUT_SECONDS\": \"30\",\n \"HEARTBEAT_SLEEP_DURATION_SECONDS\": \"60\",\n \"INPUT_STATUS\": \"unpacking\",\n \"LTA_REST_TOKEN\": \"fake-lta-rest-token\",\n \"LTA_REST_URL\": \"http://RmMNHdPhHpH2ZxfaFAC9d2jiIbf5pZiHDqy43rFLQiM.com/\",\n \"OUTPUT_STATUS\": \"completed\",\n \"PATH_MAP_JSON\": \"/tmp/lta/testing/path_map.json\",\n \"RUN_ONCE_AND_DIE\": \"False\",\n \"SOURCE_SITE\": \"NERSC\",\n \"UNPACKER_OUTBOX_PATH\": \"/tmp/lta/testing/unpacker/outbox\",\n \"UNPACKER_WORKBOX_PATH\": \"/tmp/lta/testing/unpacker/workbox\",\n \"WORK_RETRIES\": \"3\",\n \"WORK_SLEEP_DURATION_SECONDS\": \"60\",\n \"WORK_TIMEOUT_SECONDS\": \"30\",\n }", "def build_configs(subnets, host_count, dev_div, domain=None):\n global VERBOSE\n jsons = [] # subnet breakdown\n unlabeled_hosts = [] # number of hosts in the network w/o roles\n ip_addr = [] # keeping track of the 2nd and 3rd octets in IP\n roles = dict.fromkeys(dev_div.keys(), 0)\n\n if len(subnets)/254 > 254:\n print(\"WARNING: You're about to see some really sick IPs. Have fun.\")\n\n for n in subnets:\n addy = (randint(0,253), randint(0,253))\n while addy in ip_addr:\n addy = (randint(0,253), randint(0,253))\n ip_addr.append(addy)\n jsons.append({\n \"start_ip\" : '10.{}.{}.2'.format(addy[0],addy[1]),\n \"subnet\" : '10.{}.{}.0/24'.format(addy[0], addy[1]),\n \"hosts\" : n,\n \"roles\" : roles.copy()\n })\n unlabeled_hosts.append(n)\n if VERBOSE:\n print(\"start_ip: {}\\t number of hosts: {}\\t\".format(jsons[-1]['start_ip'], jsons[-1]['hosts']))\n\n # divvy up the roles, now that the subnets are defined\n labeled_hosts = 0\n for dev in dev_div:\n dev_total = dev_div[dev]\n labeled_hosts += dev_total\n while dev_total > 0:\n while True:\n n = randrange(0, len(subnets))\n if (unlabeled_hosts[n] > 0):\n jsons[n]['roles'][dev] += 1\n unlabeled_hosts[n] -= 1\n break\n dev_total -= 1\n if labeled_hosts != host_count:\n print(\"WARNING: Labeled hosts ({}) didn't equal host count ({})\".format(labeled_hosts, host_count))\n\n return jsons", "def init_ip(list_of_ips):\n pms = db['phy_mach']\n\n data = []\n id_counter = 1\n\n for ip in list_of_ips:\n info = {}\n\n info['pmid'] = id_counter\n info['vm_count'] = 0\n info['vm_id'] = []\n info['ip'] = ip\n info['uri'] = 'qemu+ssh://'+str(ip)+'/system'\n if ip == '127.0.0.1':\n info['uri'] = 'qemu:///system'\n available = info['available'] = {}\n free = info['free'] = {}\n # TODO: Sun Aug 30 03:32:25 IST 2015 Error Handling.\n conn = libvirt.open(info['uri'])\n available['vcpu'] = free['vcpu'] = conn.getMaxVcpus(None)\n \n mem = conn.getMemoryStats(0) # Returns memory in KiB\n available['memory'] = mem['total']/1024\n \n data.append(info)\n conn.close()\n id_counter += 1\n \n pms.insert(data)", "def _get_pinned_docker_images() -> Mapping[str, Mapping[str, str]]:\n\n pinned_docker_images_file = resources_dir / \"pinned_docker_images.cfg\"\n all_pinned_docker_images = ConfigParser()\n all_pinned_docker_images.read(pinned_docker_images_file)\n return all_pinned_docker_images", "def get_expected_config(vm_name):\n expected_config = copy.copy(\n config.EXTERNAL_VM_CONFIGURATIONS[vm_name]\n )\n testflow.step(\"Get mac range for the environment\")\n default_mac_range = ll_mac_pool.get_mac_range_values(\n ll_mac_pool.get_default_mac_pool()\n )[0]\n expected_config['nic_mac_address']['start'] = default_mac_range[0]\n expected_config['nic_mac_address']['end'] = default_mac_range[1]\n return expected_config", "def __init__(self) -> None:\n self.path_config = '/home/equipment/EQ-scripts/equipment.conf'\n self.configParse()\n self.request_devices = \"\"\"With arm_address as (SELECT av.obj_id device_id,\n av.value_raw house_id\n FROM os_usr.dev_attr_values av\n WHERE av.attr_id = 3),\n swithes as (SELECT device_type_id\n FROM os_eqm.device_types\n WHERE device_class IN\n (\n SELECT device_class_id\n FROM os_eqm.device_classes\n WHERE guid IN\n (\n SELECT obj_guid\n FROM os_lib.objects_in_nav_categories\n WHERE nav_cat_id in\n (\n SELECT nav_cat_id\n FROM nav_categories\n WHERE guid = '75C0F3733B084DBDAC604167D298B2F5'\n )\n )\n ))\n SELECT d.device_id,\n na.net_address,\n dt.name,\n trim(os_usr.ertel_utils.get_prop_str(d.device_id,'MAC_ADRES_USTROJSTVA')) \n mac_sw\n FROM os_usr.geo_addresses ga,\n os_eqm.net_addresses na,\n arm_address arm ,\n device_types dt,\n devices d,\n swithes sw\n WHERE arm.house_id = ga.house_id\n and arm.device_id = d.device_id\n and na.device_id = d.device_id and na.is_management = '1'\n AND dt.device_type_id = d.device_type\n and dt.device_type_id in sw.device_type_id\n and ga.unified_house_id = '<house_id>'\n \"\"\"\n self.request_adresses = \"\"\"SELECT av.obj_id device_id, av.value_raw house_id\n FROM os_usr.dev_attr_values av \n WHERE av.attr_id = 2 AND av.VALUE_RAW LIKE '%<house>%'\"\"\"", "def ipv4_interface_setup(self):\n\n for i in self._nodes.items():\n node = i[1]\n\n # Show the current interfaces with IP addresses\n current_ints = VPPUtil.get_int_ip(node)\n if current_ints != {}:\n print(\"\\nThese are the current interfaces with IP addresses:\")\n for items in sorted(current_ints.items()):\n name = items[0]\n value = items[1]\n if \"address\" not in value:\n address = \"Not Set\"\n else:\n address = value[\"address\"]\n print(\"{:30} {:20} {:10}\".format(name, address, value[\"state\"]))\n question = \"\\nWould you like to keep this configuration \" \"[Y/n]? \"\n answer = self._ask_user_yn(question, \"y\")\n if answer == \"y\":\n continue\n else:\n print(\"\\nThere are currently no interfaces with IP \" \"addresses.\")\n\n # Create a script that add the ip addresses to the interfaces\n # and brings the interfaces up\n ints_with_addrs = self._ipv4_interface_setup_questions(node)\n content = \"\"\n for ints in ints_with_addrs:\n name = ints[\"name\"]\n addr = ints[\"addr\"]\n setipstr = \"set int ip address {} {}\\n\".format(name, addr)\n setintupstr = \"set int state {} up\\n\".format(name)\n content += setipstr + setintupstr\n\n # Write the content to the script\n rootdir = node[\"rootdir\"]\n filename = rootdir + \"/vpp/vpp-config/scripts/set_int_ipv4_and_up\"\n with open(filename, \"w+\") as sfile:\n sfile.write(content)\n\n # Execute the script\n cmd = \"vppctl exec {}\".format(filename)\n (ret, stdout, stderr) = VPPUtil.exec_command(cmd)\n if ret != 0:\n logging.debug(stderr)\n\n print(\"\\nA script as been created at {}\".format(filename))\n print(\"This script can be run using the following:\")\n print(\"vppctl exec {}\\n\".format(filename))", "def GetSlotConfiguration(slotnum):\n slotDirectory = GetSlotDirectory(slotnum)\n regionDir = os.path.join(slotDirectory, \"bin\", \"Regions\")\n configFiles = glob.glob(os.path.join(regionDir,\"*.xml\"))\n result = {}\n for config in configFiles:\n if (os.access(config, os.O_RDONLY) == False):\n continue\n tree = ET.parse(config)\n root = tree.getroot()\n record = {}\n for child in root.iter('Config'):\n for entry in child.attrib:\n record[str.lower(entry)] = child.attrib[entry]\n sim_uuid = record['sim_uuid']\n result[sim_uuid] = record\n return result", "def configure(task):\n r = task.run(\n name=\"Base Configuration\",\n task=template_file,\n template=\"base.j2\",\n path=f\"templates/{task.host.nos}\",\n severity_level=0,\n )\n # r.result holds the result of rendering the template\n config = r.result\n\n r = task.run(\n name=\"Loading extra underlay data\",\n task=load_yaml,\n file=f\"extra_data/{task.host}/underlay.yaml\",\n severity_level=0,\n )\n # r.result holds the data contained in the yaml files\n # we load the data inside the host itself for further use\n task.host[\"underlay\"] = r.result\n\n r = task.run(\n name=\"Loading extra evpn data\",\n task=load_yaml,\n file=f\"extra_data/{task.host}/evpn.yaml\",\n severity_level=0,\n )\n # r.result holds the data contained in the yaml files\n # we load the data inside the host itself for further use\n task.host[\"evpn\"] = r.result\n\n r = task.run(\n name=\"Loading extra vxlan data\",\n task=load_yaml,\n file=f\"extra_data/{task.host}/vxlan.yaml\",\n severity_level=0,\n )\n # r.result holds the data contained in the yaml files\n # we load the data inside the host itself for further use\n task.host[\"vxlan\"] = r.result\n\n r = task.run(\n name=\"Interfaces Configuration\",\n task=template_file,\n template=\"interfaces.j2\",\n path=f\"templates/{task.host.nos}\",\n severity_level=0,\n )\n # we append the generated configuration\n config += r.result\n\n r = task.run(\n name=\"Routing Configuration\",\n task=template_file,\n template=\"routing.j2\",\n path=f\"templates/{task.host.nos}\",\n severity_level=0,\n )\n config += r.result\n\n r = task.run(\n name=\"EVPN Configuration\",\n task=template_file,\n template=\"evpn.j2\",\n path=f\"templates/{task.host.nos}\",\n severity_level=0,\n )\n config += r.result\n\n r = task.run(\n name=\"Role-specific Configuration\",\n task=template_file,\n template=f\"{task.host['role']}.j2\",\n path=f\"templates/{task.host.nos}\",\n severity_level=0,\n )\n # we update our hosts' config\n config += r.result\n\n task.run(\n name=\"Loading Configuration on the device\",\n task=napalm_configure,\n replace=True,\n configuration=config,\n )", "def action_listall():\n\n def parse_file(filename):\n config = {}\n\n # get all content\n with open(filename, 'r') as f:\n lines = f.read().split('\\n')\n\n # parse the content\n for l_nb in range(len(lines)):\n items = [_.strip() for _ in lines[l_nb].split('#')[0].split('=')]\n if len(items) > 1:\n v = '='.join(items[1:]).strip()\n # handle [...] for param value\n if '[' in v and ']' not in v:\n l_nb += 1\n # get the next line until the array in not closed by ']'\n while ']' not in v:\n v += lines[l_nb].split('#')[0].strip()\n l_nb += 1\n # remove '' around param value\n if v[0] == \"'\" and v[-1:] == \"'\":\n v = v[1:len(v)]\n config[items[0]] = v\n return config\n\n out = []\n for root, dirs, files in os.walk('/etc/xen'):\n for cfgfile in files:\n if cfgfile.endswith('.cfg') and not cfgfile.startswith('.'):\n out.append(parse_file(os.path.join(root, cfgfile)))\n return out", "def system_configuration_extract(parsed_sshow_maps_lst, project_constants_lst, software_path_sr):\n\n # chassis parameters parsing\n chassis_params_df, slot_status_df, licenseport_df = chassis_params_extract(parsed_sshow_maps_lst, project_constants_lst)\n # maps parameters parsing\n maps_params_df = maps_params_extract(parsed_sshow_maps_lst, project_constants_lst)\n # switch parameters parsing\n switch_params_df, switchshow_ports_df = switch_params_extract(chassis_params_df, project_constants_lst)\n # fabric membership pasing (AG swithe information extracted from Principal switches)\n fabricshow_df, ag_principal_df = fabric_membership_extract(switch_params_df, project_constants_lst)\n # portshow statistics parsing\n portshow_df = portcmd_extract(chassis_params_df, project_constants_lst)\n # port sfp and cfg parsing\n sfpshow_df, portcfgshow_df = portcfg_sfp_extract(switch_params_df, project_constants_lst)\n # nameserver parsing\n fdmi_df, nsshow_df, nscamshow_df, nsshow_dedicated_df, nsportshow_df = connected_devices_extract(switch_params_df, project_constants_lst)\n # inter switch connection parsing\n isl_df, trunk_df, porttrunkarea_df, lsdb_df = interswitch_connection_extract(switch_params_df, project_constants_lst)\n # fabric routing parsing\n fcrfabric_df, fcrproxydev_df, fcrphydev_df, lsan_df, fcredge_df, fcrresource_df, fcrxlateconfig_df = \\\n fcr_membership_extract(switch_params_df, project_constants_lst)\n # zoning configuration parsing\n cfg_df, zone_df, alias_df, cfg_effective_df, zone_effective_df, peerzone_df, peerzone_effective_df = \\\n zoning_extract(switch_params_df, project_constants_lst)\n # switch sensors parsing\n sensor_df = sensor_extract(chassis_params_df, project_constants_lst)\n # error log parsing\n errdump_df = log_extract(chassis_params_df, project_constants_lst)\n # blade system configuration parsing\n blade_module_df, blade_servers_df, blade_vc_df = blade_system_extract(project_constants_lst)\n # synergy system configuration parsing\n synergy_module_df, synergy_servers_df = synergy_system_extract(project_constants_lst)\n # 3PAR storage system configuration download and parsing\n system_3par_df, port_3par_df, host_3par_df = \\\n storage_3par_extract(nsshow_df, nscamshow_df, project_constants_lst, software_path_sr)\n\n # Huawei OceanStor storage system configuration download and parsing\n system_oceanstor_df, port_oceanstor_df, host_oceanstor_df, \\\n host_id_name_oceanstor_df, host_id_fcinitiator_oceanstor_df, hostid_ctrlportid_oceanstor_df \\\n = storage_oceanstor_extract(project_constants_lst)\n \n extracted_configuration_lst = [chassis_params_df, slot_status_df, licenseport_df, maps_params_df, \n switch_params_df, switchshow_ports_df,\n fabricshow_df, ag_principal_df, \n portshow_df, sfpshow_df, portcfgshow_df,\n fdmi_df, nsshow_df, nscamshow_df, nsshow_dedicated_df, nsportshow_df,\n isl_df, trunk_df, porttrunkarea_df, lsdb_df,\n fcrfabric_df, fcrproxydev_df, fcrphydev_df, lsan_df, fcredge_df, fcrresource_df, fcrxlateconfig_df,\n cfg_df, zone_df, alias_df, cfg_effective_df, zone_effective_df, peerzone_df, peerzone_effective_df,\n sensor_df, errdump_df,\n blade_module_df, blade_servers_df, blade_vc_df,\n synergy_module_df, synergy_servers_df,\n system_3par_df, port_3par_df, host_3par_df,\n system_oceanstor_df, port_oceanstor_df, host_oceanstor_df,\n host_id_name_oceanstor_df, host_id_fcinitiator_oceanstor_df, hostid_ctrlportid_oceanstor_df]\n return extracted_configuration_lst", "def generate_inventory(baremetal_info, server_info):\n\n hosts = defaultdict(list)\n hosts_meta = {}\n\n for node in baremetal_info:\n if node['Provisioning State'].lower() == 'active':\n role = re.findall('.*profile:(compute|control)', node['Properties']['capabilities'])[0]\n for server in server_info:\n if server['ID'] == node['Instance UUID']:\n node_ip = re.findall('.+=(\\d+.\\d+.\\d+.\\d+)$', server['Networks'])[0]\n hosts[role].append(node_ip)\n # To match ssh.cfg.j2 template\n hosts_meta[node_ip] = {'ansible_ssh_host': node_ip,\n 'ansible_user': 'heat-admin'}\n\n for host in hosts:\n hosts[host].sort()\n\n return {'hosts': hosts, 'hosts_meta': hosts_meta}", "def get_dns_config():\n\n # Config \n # TODO : to put in a external config file\n zones_config_file = '/etc/named/zones.conf'\n\n # load bind zone config file\n zones_config = iscpy.ParseISCString(open(zones_config_file, 'r').read())\n\n # Build zone tab to store zone name / config file\n zone_dict = dict()\n\n for z in zones_config:\n zone = z.split(' ')\n # check if the dns is master for this zone\n if zones_config[z]['type'] == 'master':\n zone_name = zone[1].replace(\"\\\"\", \"\")\n zone_file = zones_config[z]['file'].replace(\"\\\"\", \"\")\n zone_dict.update({zone_name : {'name': zone_name, 'file': zone_file}})\n\n return zone_dict", "def get_heat_json_from_topology_config(config, project_name='admin'):\n\n template = dict()\n template[\"heat_template_version\"] = \"2013-05-23\"\n template[\"resources\"] = dict()\n\n for network in config[\"networks\"]:\n nr = dict()\n nr[\"type\"] = \"OS::Neutron::Net\"\n\n nrp = dict()\n nrp[\"shared\"] = False\n nrp[\"name\"] = network[\"name\"]\n nrp[\"admin_state_up\"] = True\n\n nr[\"properties\"] = nrp\n\n nrs = dict()\n nrs[\"type\"] = \"OS::Neutron::Subnet\"\n #\n p = dict()\n p[\"cidr\"] = \"1.1.1.0/24\"\n p[\"enable_dhcp\"] = False\n p[\"gateway_ip\"] = \"\"\n p[\"name\"] = network[\"name\"] + \"_subnet\"\n if network[\"name\"] == \"virbr0\":\n p[\"network_id\"] = configuration.openstack_mgmt_network\n elif network[\"name\"] == configuration.openstack_external_network:\n p[\"network_id\"] = configuration.openstack_external_network\n else:\n p[\"network_id\"] = {\"get_resource\": network[\"name\"]}\n\n nrs[\"properties\"] = p\n\n template[\"resources\"][network[\"name\"]] = nr\n template[\"resources\"][network[\"name\"] + \"_subnet\"] = nrs\n\n # cache the image_details here to avoid multiple REST calls for details about an image type\n # as many topologies have lots of the same types of images around\n image_details_dict = dict()\n\n for device in config[\"devices\"]:\n\n if device[\"imageId\"] in image_details_dict:\n image_details = image_details_dict[device[\"imageId\"]]\n else:\n image_details = imageUtils.get_image_detail(device[\"imageId\"])\n image_details_dict[device[\"imageId\"]] = image_details\n\n image_name = image_details[\"name\"]\n\n image_disk_size = 20\n\n # set the size in GB, rounding up to the nearest int\n if 'size' in image_details:\n current_size = float(image_details['size'])\n image_disk_size = int(math.ceil(current_size / 1000000000))\n\n # if the glance image asks for a minimum disk size, let's see if it's larger that what we have\n if \"min_disk\" in image_details and image_details['min_disk'] > image_disk_size:\n image_disk_size = image_details[\"min_disk\"]\n\n # if the user has specified a desired disk size, grab it here so we get the correct flavor\n if type(image_disk_size) is int and device[\"resizeImage\"] > image_disk_size:\n image_disk_size = device[\"resizeImage\"]\n\n # determine openstack flavor here\n device_ram = int(device[\"ram\"])\n device_cpu = int(device[\"cpu\"])\n\n flavor_detail = openstackUtils.get_minimum_flavor_for_specs(configuration.openstack_project,\n device_cpu,\n device_ram,\n image_disk_size\n )\n\n flavor = flavor_detail[\"name\"]\n\n dr = dict()\n dr[\"type\"] = \"OS::Nova::Server\"\n dr[\"properties\"] = dict()\n dr[\"properties\"][\"flavor\"] = flavor\n dr[\"properties\"][\"networks\"] = []\n index = 0\n for p in device[\"interfaces\"]:\n port = dict()\n port[\"port\"] = dict()\n port[\"port\"][\"get_resource\"] = device[\"name\"] + \"_port\" + str(index)\n index += 1\n dr[\"properties\"][\"networks\"].append(port)\n\n dr[\"properties\"][\"image\"] = image_name\n dr[\"properties\"][\"name\"] = device[\"name\"]\n\n if device[\"configDriveSupport\"]:\n dr[\"properties\"][\"config_drive\"] = True\n dr[\"properties\"][\"user_data_format\"] = \"RAW\"\n metadata = dict()\n metadata[\"hostname\"] = device[\"name\"]\n metadata[\"console\"] = \"vidconsole\"\n dr[\"properties\"][\"metadata\"] = metadata\n\n # let's check all the configDriveParams and look for a junos config\n # FIXME - this may need tweaked if we need to include config drive cloud-init support for other platforms\n # right now we just need to ignore /boot/loader.conf\n for cfp in device[\"configDriveParams\"]:\n\n if \"destination\" in cfp and cfp[\"destination\"] == \"/boot/loader.conf\":\n logger.debug(\"Creating loader.conf config-drive entry\")\n template_name = cfp[\"template\"]\n loader_string = osUtils.compile_config_drive_params_template(template_name,\n device[\"name\"],\n device[\"label\"],\n device[\"password\"],\n device[\"ip\"],\n device[\"managementInterface\"])\n\n logger.debug('----------')\n logger.debug(loader_string)\n logger.debug('----------')\n for l in loader_string.split('\\n'):\n if '=' in l:\n left, right = l.split('=')\n if left not in metadata and left != '':\n metadata[left] = right.replace('\"', '')\n\n if \"destination\" in cfp and cfp[\"destination\"] == \"/juniper.conf\":\n logger.debug(\"Creating juniper.conf config-drive entry\")\n template_name = cfp[\"template\"]\n personality_string = osUtils.compile_config_drive_params_template(template_name,\n device[\"name\"],\n device[\"label\"],\n device[\"password\"],\n device[\"ip\"],\n device[\"managementInterface\"])\n\n dr[\"properties\"][\"personality\"] = dict()\n dr[\"properties\"][\"personality\"] = {\"/config/juniper.conf\": personality_string}\n else:\n logger.debug('No juniper.conf found here ')\n\n if device['cloudInitSupport']:\n logger.debug('creating cloud-init script')\n dr[\"properties\"][\"config_drive\"] = True\n dr[\"properties\"][\"user_data_format\"] = \"RAW\"\n metadata = dict()\n metadata[\"hostname\"] = device[\"name\"]\n dr[\"properties\"][\"metadata\"] = metadata\n # grab the prefix len from the management subnet which is in the form 192.168.122.0/24\n if '/' in configuration.management_subnet:\n management_prefix_len = configuration.management_subnet.split('/')[1]\n else:\n management_prefix_len = '24'\n\n management_ip = device['ip'] + '/' + management_prefix_len\n\n device_config = osUtils.get_cloud_init_config(device['name'],\n device['label'],\n management_ip,\n device['managementInterface'],\n device['password'])\n\n script_string = \"\"\n if \"configScriptId\" in device and device[\"configScriptId\"] != 0:\n logger.debug(\"Passing script data!\")\n try:\n script = Script.objects.get(pk=int(device[\"configScriptId\"]))\n script_string = script.script\n device_config[\"script_param\"] = device.get(\"configScriptParam\", '')\n logger.debug(script_string)\n except ObjectDoesNotExist:\n logger.info('config script was specified but was not found!')\n\n user_data_string = osUtils.render_cloud_init_user_data(device_config, script_string)\n dr[\"properties\"][\"user_data\"] = user_data_string\n\n template[\"resources\"][device[\"name\"]] = dr\n\n for device in config[\"devices\"]:\n index = 0\n for port in device[\"interfaces\"]:\n pr = dict()\n pr[\"type\"] = \"OS::Neutron::Port\"\n p = dict()\n\n if port[\"bridge\"] == \"virbr0\":\n p[\"network_id\"] = configuration.openstack_mgmt_network\n\n # specify our desired IP address on the management interface\n p['fixed_ips'] = list()\n fip = dict()\n fip['ip_address'] = device['ip']\n p['fixed_ips'].append(fip)\n\n elif port[\"bridge\"] == configuration.openstack_external_network:\n p[\"network_id\"] = configuration.openstack_external_network\n else:\n p[\"network_id\"] = {\"get_resource\": port[\"bridge\"]}\n # disable port security on all other ports (in case this isn't set globally)\n p['port_security_enabled'] = False\n\n pr[\"properties\"] = p\n template[\"resources\"][device[\"name\"] + \"_port\" + str(index)] = pr\n index += 1\n\n return json.dumps(template)", "def get_roles_config():\n return load_yaml_contents(\"./configuration/roles/main.yml\")", "def main(args):\n # Getting the missing parameters, if any.\n if not args.username:\n args.username = raw_input(\"Please enter username: \")\n if not args.password:\n args.password = getpass(\"Please enter password: \")\n if not args.filename:\n args.filename = raw_input(\"Please enter filename: \")\n # Reading file.\n nodes = read_file(args.filename)\n # Open the vars file.\n vars_file = open(\"./roles/router/vars/main.yml\", 'a')\n # Obtaining loopback information.\n neighbors = {\"cisco\" : \"\", \"juniper\" : \"\", \"vyatta\" : \"\" }\n cisco, cisco_as = CiscoLoopback()\n juniper, juniper_as = JuniperLoopback()\n vyatta, vyatta_as = VyattaLoopback()\n # Obtaining neighbors information.\n cisco_ne = Neighbors(\"cisco\")\n juniper_ne = Neighbors(\"juniper\")\n vyatta_ne = Neighbors(\"vyatta\")\n # Starting to buil the file's schema.\n cisco_vars = CISCOTEMPLATE % (cisco_as)\n vars_file.write(cisco_vars)\n for interface in cisco:\n a = CISCOLOOPBACK % (interface, cisco[interface][\"address\"], cisco[interface][\"network\"], cisco[interface][\"mask\"])\n vars_file.write(a)\n\n vars_file.write(CISCONEIGHBORS)\n for neighbor in cisco_ne:\n if neighbor == \"cisco\":\n for element in cisco_ne[neighbor]:\n a = NEIGHBORS % (element, cisco_ne[neighbor][element])\n vars_file.write(a)\n\n juniper_vars = JUNIPERTEMPLATE % (juniper_as)\n vars_file.write(juniper_vars)\n for interface in juniper:\n a = LOOPBACK % (interface, juniper[interface][\"address\"], juniper[interface][\"network\"])\n vars_file.write(a)\n vars_file.write(JUNIPERNEIGHBORS)\n for neighbor in juniper_ne:\n if neighbor == \"juniper\":\n for element in juniper_ne[neighbor]:\n a = NEIGHBORS % (element, juniper_ne[neighbor][element])\n vars_file.write(a)\n\n vyatta_vars = VYATTATEMPLATE % (vyatta_as)\n vars_file.write(vyatta_vars)\n for interface in vyatta:\n a = LOOPBACK % (interface, vyatta[interface][\"address\"], vyatta[interface][\"network\"])\n vars_file.write(a)\n vyatta_neighbors_var = VYATTANEIGHBORS\n vars_file.write(vyatta_neighbors_var)\n for neighbor in vyatta_ne:\n if neighbor == \"vyatta\":\n for element in vyatta_ne[neighbor]:\n a = NEIGHBORS % (element, vyatta_ne[neighbor][element])\n vars_file.write(a)\n # Closing and saving the file.\n vars_file.close()\n\n time.sleep(2)\n # Generating the templates.\n os.system(\"ansible-playbook site.yml\")\n time.sleep(2)\n # Loading cisco configuration.\n try:\n with open(\"cisco_template.txt\", 'r') as f:\n cisco_template = f.read()\n except IOError:\n print \"File cisco_template does not exist!\"\n # Loading Juniper configuration.\n try:\n with open(\"juniper_template.txt\", 'r') as f:\n juniper_template = f.read()\n except IOError:\n print \"File juniper_template does not exist!\"\n # Loading Vyatta configuration.\n try:\n with open(\"vyatta_template.txt\", 'r') as f:\n vyatta_template = f.read()\n except IOError:\n print \"File vyatta_template does not exist!\"\n # Configuring the devices.\n for device in nodes:\n if nodes[device][\"platform\"] == \"CiscoIOS\":\n run_commands(nodes[device][\"ipv4_address\"], args.username, args.password, cisco_template, platform=\"cisco\")\n print \"***CISCO CONFIGURATION COMPLETED***\"\n elif nodes[device][\"platform\"] == \"Juniper\":\n run_commands(nodes[device][\"ipv4_address\"], args.username, args.password, juniper_template, platform=\"juniper\")\n print \"***JUNIPER CONFIGURATION COMPLETED***\"\n else:\n run_commands(nodes[device][\"ipv4_address\"], args.username, args.password, vyatta_template, platform=\"vyatta\")\n print \"***VYATTA CONFIGURATION COMPLETED***\"", "def add_vars(inventory):\n etc_hosts = format_hosts_dict(inventory)\n inventory.setdefault('all', dict()).\\\n setdefault('vars', dict()).\\\n setdefault('prometheus', dict())['etc_hosts'] = etc_hosts\n inventory['all']['vars'].\\\n setdefault('grafana', dict())['admin_password'] = 'admin'\n return inventory", "def _parse_zpool_config(lines):\n # Don't bother parsing if the first line isn't the header we expect\n if not lines[0].split() == ['NAME', 'STATE', 'READ', 'WRITE', 'CKSUM']:\n return None\n\n # Match two or more spaces\n spaces = re.compile('\\s{2,}')\n\n lines = lines[1:]\n config = {}\n i_level = 0\n\n for line in lines:\n # Ignore leading tab character\n indent = indentation(line.lstrip('\\t'))\n fields = spaces.split(line.strip())\n name = fields[0]\n status = {}\n\n # Matches healthy active vdevs\n # NOTE the fields cannot be directly cast to ints because they may\n # contain suffixes such as K or M that indicate magnitude.\n if len(fields) == 5:\n status['state'] = fields[1]\n status['read'] = fields[2]\n status['write'] = fields[3]\n status['cksum'] = fields[4]\n\n # Matches active vdevs with additional status\n if len(fields) == 6:\n status['state'] = fields[1]\n status['read'] = fields[2]\n status['write'] = fields[3]\n status['cksum'] = fields[4]\n status['info'] = fields[5]\n\n # Matches spare drives\n if len(fields) == 2:\n status['state'] = fields[1]\n\n # Matches spare drives that are in use\n if len(fields) == 3:\n status['state'] = fields[1]\n status['info'] = fields[2]\n\n # If the indent is 0, it's a root node\n if indent == 0:\n config[name] = status\n stack = []\n stack.append(config[name])\n i_level = 0\n\n # This line is a child of the previous (indent)\n elif indent > i_level:\n stack[-1]['vdev'] = {}\n stack[-1]['vdev'][name.lower()] = status\n stack.append(stack[-1]['vdev'][name.lower()])\n i_level = indent\n\n # This line is a sibling of the previous\n elif indent == i_level:\n stack.pop()\n stack[-1]['vdev'][name.lower()] = status\n stack.append(stack[-1]['vdev'][name.lower()])\n\n # This line is not related to the previous (dedent)\n elif indent < i_level:\n while indent <= i_level:\n stack.pop()\n i_level -= 1\n stack[-1]['vdev'][name.lower()] = status\n stack.append(stack[-1]['vdev'][name.lower()])\n\n return config", "def read_config(file, destination=None, user=None, host=None, cmd_host=None, copy_protocol=None):\n with open(file) as config_yaml:\n base_yaml = yaml.safe_load(config_yaml)\n\n # with config loaded, make sure we have the keys that we need\n\n base_config = {\n 'keys': [],\n 'map': [],\n 'default': [],\n 'required_files': [],\n 'path': None,\n 'destination': destination,\n 'command': {\n 'exts': [],\n 'run': None\n }\n }\n\n router_config = {\n 'key': [],\n 'files': [],\n 'filter': None,\n 'invert': None,\n 'lowercase': None,\n 'exact': None\n }\n\n remote_config = {\n 'user': user,\n 'host': host,\n 'copy_protocol': copy_protocol,\n 'cmd_host': cmd_host,\n }\n\n if 'dassort' in base_yaml.keys() and 'remote' in base_yaml.keys():\n tree_yaml = base_yaml['dassort']\n map_json = tree_yaml['json']\n base_config = merge_dicts(base_config, map_json)\n base_config = merge_dicts(base_config, tree_yaml)\n remote_yaml = base_yaml['remote']\n remote_config = merge_dicts(remote_config, remote_yaml)\n router_config = None\n elif 'dassort' in base_yaml.keys():\n tree_yaml = base_yaml['dassort']\n map_json = tree_yaml['json']\n base_config = merge_dicts(base_config, map_json)\n base_config = merge_dicts(base_config, tree_yaml)\n remote_config = None\n router_config = None\n elif 'router' in base_yaml.keys():\n tree_yaml = base_yaml['router']\n router_config = merge_dicts(router_config, tree_yaml)\n # all router items should be iterables\n for k, v in router_config.items():\n if type(v) is not list:\n router_config[k] = [v]\n base_config = None\n remote_config = None\n else:\n base_config = None\n remote_config = None\n router_config = None\n\n # reformat base configuration\n if base_config is not None:\n base_config = {\n 'keys': base_config['keys'],\n 'map': base_config['map'],\n 'default': base_config['default'],\n 'required_files': base_config['required_files'],\n 'value': [],\n 'path': {\n 'path_string': base_config['path'],\n 're': {'root': base_config['destination']}\n },\n 'command': base_config['command'],\n }\n\n return base_config, remote_config, router_config", "def render_configs(task):\n\n # run function to render global configs\n global_cfg = ibns_global(task)\n # write global config file for each host\n with open(f\"configs/{task.host}_dot1x_global.txt\", \"w+\") as file:\n file.write(global_cfg)\n # print completed hosts\n c_print(f\"*** {task.host}: dot1x global configuration rendered ***\")\n\n # run function to render snmp configs\n snmp_cfg = ibns_snmp(task)\n # function to run interface configs\n with open(f\"configs/{task.host}_snmp.txt\", \"w+\") as file:\n file.write(snmp_cfg)\n # print completed hosts\n c_print(f\"*** {task.host}: SNMP configuration rendered ***\")\n\n # run function to run interface configs\n intf_cfg = ibns_intf(task)\n # write interface config file for each host\n with open(f\"configs/{task.host}_dot1x_intf.txt\", \"w+\") as file:\n file.write(intf_cfg)\n # print completed hosts\n c_print(f\"*** {task.host}: dot1x intf configurations rendered ***\")", "def parse_ir(self, filename):\n with open(filename, 'r') as f:\n configs_data = f.readlines()\n for line in configs_data:\n # remove the \\n char\n cur_line = line[:-1]\n title = re.findall('.+?:', cur_line)\n # remove the : char\n cur_title = title[0][:-1]\n content = re.findall(':.+', cur_line)\n cur_content = content[0][1:]\n exec('self.config_dict[cur_title]=' + cur_content)", "def get_switch_machines_from_file(filename):\n switches = []\n switch_machines = {}\n with open(filename) as switch_file:\n for line in switch_file:\n line = line.strip()\n if not line:\n # ignore empty line\n continue\n\n if line.startswith('#'):\n # ignore comments\n continue\n\n columns = [column for column in line.split(',')]\n if not columns:\n # ignore empty line\n continue\n\n if columns[0] == 'switch':\n (switch_ip, switch_vendor, switch_version,\n switch_community, switch_state) = columns[1:]\n switches.append({\n 'ip': switch_ip,\n 'vendor': switch_vendor,\n 'credentials': {\n 'version': switch_version,\n 'community': switch_community,\n },\n 'state': switch_state,\n })\n elif columns[0] == 'machine':\n switch_ip, switch_port, mac = columns[1:]\n switch_machines.setdefault(switch_ip, []).append({\n 'mac': mac,\n 'port': switch_port,\n })\n\n return (switches, switch_machines)", "def get_netlist_info_dict(self) -> Dict[str, Any]:\n sim_envs: Sequence[str] = self._specs['sim_envs']\n env_values = {var: [val_table[env] for env in sim_envs]\n for var, val_table in self.env_params.items()}\n sim_setup = dict(\n sim_envs=sim_envs,\n params=self.sim_params,\n swp_info=self.swp_info,\n options=self._specs.get('sim_options', {}),\n monte_carlo=self._specs.get('monte_carlo_params', {}),\n env_params=env_values,\n )\n return sim_setup", "def load_inventory(file_name, lst_Inventory):\r\n \r\n try:\r\n objFile = open(file_name, 'r')\r\n lst_Inventory.clear()\r\n for line in objFile:\r\n data = line.strip().split(',')\r\n inventory = CD(data[0],data[1],data[2])\r\n lst_Inventory.append(inventory)\r\n objFile.close()\r\n except FileNotFoundError:\r\n pass\r\n return lst_Inventory", "def crm2dict(conf_list=None):\n if conf_list is None:\n conf_list=configure_parse()\n conf_dict=dict(conf_list)\n results={}\n groupkeys = getkeys(conf_dict, 'group')\n primitivekeys = getkeys(conf_dict, 'primitive')\n for gk in groupkeys:\n results.setdefault(gk.split()[1], {})\n locationkeys = getkeys(conf_dict, 'location')\n for key in conf_dict.keys():\n conf_type, tag = key.split()\n if conf_type == 'group':\n members=[x for x in conf_dict[key] if not (x.startswith('target-role') or x == 'meta')]\n results[tag].update({'members' : members })\n elif conf_type == 'location':\n service_name, loc=parse_tag(tag)\n balancer = conf_dict[key][2]\n if service_name not in results.keys():\n results.setdefault(service_name, {'loadbalancers' : {loc:balancer}})\n elif 'loadbalancers' not in results[service_name].keys():\n results[service_name].update({'loadbalancers' : {loc:balancer}})\n else:\n results[service_name]['loadbalancers'].update({loc:balancer})\n elif conf_type == 'primitive':\n service_name, service_type = parse_tag(tag)\n if service_type == 'ld':\n results[service_name].update({'type' : 'ldirectord'})\n elif service_type[:2] == 'ip':\n params = conf_dict[key]\n parsed_params={}\n for param in params:\n if param[:3] == 'ip=':\n parsed_params.setdefault('ip', param[4:-1])\n elif param[:13] == 'cidr_netmask=':\n parsed_params.setdefault('cidr_netmask', param[14:-1])\n elif param[:4] == 'nic=':\n parsed_params.setdefault('nic', param[5:-1])\n if 'ips' not in results[service_name].keys():\n results[service_name].update({'ips' : [haipstr(parsed_params)]})\n else:\n results[service_name]['ips'].append(haipstr(parsed_params))\n return results", "def get_ipv4_interfaces(device_name):\n interfaces = {}\n if DEBUG:\n print note + \"Entering into get_ipv4_interfaces function\"\n # Needs to be fixed. Get list of interfaces first, then IP addresses, then VLAN, then ACLs\n config_element = nipper_xml.find(\"./report/part/[@ref='CONFIGURATION']\")\n\n for section in config_element.findall('./section'):\n device_item = None\n\n for i in section.get('title').split():\n if device_name == i:\n device_item = device_name\n if DEBUG:\n print \"\\t\" + note + \"Set Device: %s\" % device_name\n\n if device_item is not None:\n interface_element = section.find(\"./section/[@ref='CONFIGURATION.ADDRESSES']/section/\"\n \"[@ref='ADDRESSES.IPV4']\")\n if interface_element is not None:\n headings = []\n items = []\n for heading in interface_element.findall(\"./table/[@title='IPv4 addresses']/headings/heading\"):\n headings.append(heading.text)\n if DEBUG:\n print \"\\t\" + note + \"Set Heading: %s\" % heading.text\n for item in interface_element.findall(\"./table/[@title='IPv4 addresses']/tablebody\"\n \"/tablerow/tablecell/item\"):\n items.append(item.text)\n if DEBUG:\n print \"\\t\" + note + \"Set Item: %s\" % item.text\n i = 0\n interface_id = None\n if DEBUG:\n print \"\\t\" + note + \"Heading List: %s\" % headings\n print \"\\t\" + note + \"Items List: %s\" % items\n for item in items:\n if i > (len(headings) - 1):\n i = 0\n if DEBUG:\n print \"\\t\" + info + \"Heading: %s\\t Item: %s\" % (headings[i], item)\n if i is 0:\n interface_id = item\n interfaces[interface_id] = {}\n interfaces[interface_id].update({headings[i]: item})\n i += 1\n\n interfaces_element = section.find(\"./section/[@ref='CONFIGURATION.INTERFACES']/section/\"\n \"[@ref='ETHINTERFACESLAYER3']\")\n if interfaces_element is not None:\n headings = []\n for heading in interfaces_element.findall(\"./table/[@title='Layer 3 Ethernet Interfaces']\"\n \"/headings/heading\"):\n headings.append(heading.text)\n for tr in interfaces_element.findall(\"./table/[@title='Layer 3 Ethernet Interfaces']\"\n \"/tablebody/tablerow\"):\n items = []\n for i in tr.findall(\"./tablecell/item\"):\n items.append(i.text)\n if 'Zone' in headings:\n interfaces[items[headings.index('Interface')]].update({'Zone': items[headings.index('Zone')]})\n if 'VLAN' in headings:\n interfaces[items[headings.index('Interface')]].update({'VLAN': items[headings.index('VLAN')]})\n if DEBUG:\n print info + \"Interfaces object: \"\n print interfaces\n raw_input(warn + \"Press any key to continue\")\n return interfaces", "def load_jail_config(conf_f):\n# open jail.conf\n jc = open(conf_f, 'r')\n\n# take all elements from jail.conf, remove \"junk\" data and applay to mas[]\n# only data which will be used is taken, all valuse are in order \n mas = []\n for i in jc:\n i = i.strip('\\n').strip(';').replace('\\\"', '').replace('host.hostname = ', '').replace('$jip = ', '').replace('jid =', '' ).replace('$ip = ', '').replace('$jgw =', '').replace(' ', '')\n mas.append(i)\n jc.close()\n return mas", "def getDetectorMapSpecs(\n dbname: str, nameprefix: str, criteria: SelectionCriteria,\n config: ConfigOverrides, *, maxarcs: int) -> List[Dict[str, Any]]:\n blocks = []\n for beamConfig in sorted(getBeamConfigs([\"scienceArc\"], dbname, criteria)):\n for arm in all_arms:\n sources = getSourcesFromDB(\"scienceArc\", arm, dbname, criteria, beamConfig=beamConfig)\n for srcs in splitSources(sources, maxarcs):\n calibBlock: Dict[str, Any] = {}\n\n if sources:\n calibBlock[\"detectorMap\"] = {\n \"id\": getSourceFilterFromListOfFileId(srcs)\n }\n calibBlock[\"detectorMap\"].update(config.toYaml(\"detectorMap\"))\n\n if calibBlock:\n # This name is not unique\n # but a serial number will be added to it\n # after a merge process.\n blocks.append(nameYamlMapping(f\"{nameprefix}{arm}\", calibBlock))\n\n return addSerialNumbersToNames(mergeCalibBlocks(blocks))", "def load_configs(self):\n\t\tshutit_global.shutit_global_object.yield_to_draw()\n\t\t# Get root default config.\n\t\t# TODO: change default_cnf so it places whatever the values are at this stage of the build.\n\t\tconfigs = [('defaults', StringIO(default_cnf)), os.path.expanduser('~/.shutit/config'), os.path.join(self.host['shutit_path'], 'config'), 'configs/build.cnf']\n\t\t# Add the shutit global host- and user-specific config file.\n\t\t# Add the local build.cnf\n\t\t# Get passed-in config(s)\n\t\tfor config_file_name in self.build['extra_configs']:\n\t\t\trun_config_file = os.path.expanduser(config_file_name)\n\t\t\tif not os.path.isfile(run_config_file):\n\t\t\t\tshutit_global.shutit_global_object.shutit_print('Did not recognise ' + run_config_file + ' as a file - do you need to touch ' + run_config_file + '?')\n\t\t\t\tshutit_global.shutit_global_object.handle_exit(exit_code=0)\n\t\t\tconfigs.append(run_config_file)\n\t\t# Image to use to start off. The script should be idempotent, so running it\n\t\t# on an already built image should be ok, and is advised to reduce diff space required.\n\t\tif self.action['list_configs'] or self.loglevel <= logging.DEBUG:\n\t\t\tmsg = ''\n\t\t\tfor c in configs:\n\t\t\t\tif isinstance(c, tuple):\n\t\t\t\t\tc = c[0]\n\t\t\t\tmsg = msg + ' \\n' + c\n\t\t\t\tself.log(' ' + c,level=logging.DEBUG)\n\n\t\t# Interpret any config overrides, write to a file and add them to the\n\t\t# list of configs to be interpreted\n\t\tif self.build['config_overrides']:\n\t\t\t# We don't need layers, this is a temporary configparser\n\t\t\toverride_cp = ConfigParser.RawConfigParser()\n\t\t\tfor o_sec, o_key, o_val in self.build['config_overrides']:\n\t\t\t\tif not override_cp.has_section(o_sec):\n\t\t\t\t\toverride_cp.add_section(o_sec)\n\t\t\t\toverride_cp.set(o_sec, o_key, o_val)\n\t\t\toverride_fd = StringIO()\n\t\t\toverride_cp.write(override_fd)\n\t\t\toverride_fd.seek(0)\n\t\t\tconfigs.append(('overrides', override_fd))\n\n\t\tself.config_parser = self.get_configs(configs)\n\t\tself.get_base_config()", "def create_config(hostname: str) -> List[str]:\n\n yaml_data = yaml.safe_load(open(f\"host_vars/{hostname}.yaml\"))\n validated_data = Model(**yaml_data)\n env = Environment(\n loader=FileSystemLoader(\"./templates\"), trim_blocks=True, lstrip_blocks=True\n )\n template = env.get_template(\"config.j2\")\n my_configs = template.render(validated_data)\n configuration = my_configs.splitlines()\n return configuration", "def test_list_namespaced_deployment_config(self):\n pass", "def create_net(args):\n\n # Load config file for this experiment\n xinfo = yaml.load(open(args.exp)) # experiment info\n\n # copy config to run directory\n assert osp.isdir(args.cache_dir), 'Working directory not found: ' + args.cache_dir\n # output config file\n yaml.dump(xinfo, open(args.exp_config_path, 'w'),\n default_flow_style=False)\n\n # Load dataset config file\n dcfg_path = osp.join(args.data_config_path, xinfo['INPUT']['DATASET'])\n dinfo = yaml.load(open(dcfg_path)) # dataset info\n data_dir = dinfo['ROOT']\n\n layout = xinfo['INPUT']['LAYOUT']\n inps = [s.strip() for l in layout for s in l.split(',')]\n outs = [s.strip() for s in xinfo['REFINE']['TARGETS'].split(',')]\n\n supports = ['seg', 'flow', 'norm', 'rgb', 'depth']\n\n nets = {}\n for split in ['train', 'test']:\n net_inps = []\n net_outs = []\n for inp in inps:\n match = re.search('^(gt|pr)({})'.format('|'.join(supports)), inp)\n assert match is not None, 'Error in config INPUT-LAYOUT: ' + inp\n\n modality = match.group(2)\n nchannels = dinfo[modality]['n']\n path = osp.join(data_dir, dinfo[modality][match.group(1) + '-' + split])\n\n assert osp.exists(path), 'File not found: ' + path\n net_inps.append((inp, path, nchannels))\n\n for out in outs:\n # TODO: read target type: zero couplings, tight, loose couplings\n match = re.search('({})'.format('|'.join(supports)), out)\n assert match is not None, 'Error in config REFINE-TARGET: '+ out\n\n modality = match.group(1)\n nchannels = dinfo[modality]['n']\n path = osp.join(data_dir, dinfo[modality]['gt-' + split])\n\n assert osp.exists(path), 'File not found: ' + path\n net_outs.append((out, path, nchannels))\n\n loss_params = dict()\n mapping = None\n if 'mapping' in dinfo['seg']:\n idx = dinfo['seg']['mapping']\n mapping = dict(zip(idx, xrange(len(idx))))\n\n if split == 'train':\n\n # if the class weights is not in the dataset config file\n if 'gt-train-weights' not in dinfo['seg']:\n print 'Generating median frequency balancing weights.'\n (weights, mapping) = gcw.get_mfb(osp.join(dinfo['ROOT'], dinfo['seg']['gt-train']),\n dinfo['seg']['ignore_label'],\n mapping)\n # save back to dataset config\n dinfo['seg']['gt-train-weights'] = weights\n yaml.dump(dinfo, open(dcfg_path, 'w'), default_flow_style=False)\n else:\n weights = dinfo['seg']['gt-train-weights']\n # update data\n # update loss parameter\n ignore_label = dinfo['seg']['ignore_label']\n ignore_label = mapping[ignore_label] if mapping is not None else ignore_label\n loss_params['loss_param'] = {\n 'ignore_label': ignore_label,\n 'class_weighting': weights\n }\n\n # generate net prototxt\n loader = dinfo['NAME'] + '_loader'\n net_proto = arch.create_net(net_inps, net_outs, split, loader, layout, mapping, **loss_params)\n\n # output to file\n path = osp.join(args.cache_dir, getattr(args, 'exp_{}_path'.format(split)))\n open(path, 'w').write(str(net_proto))\n nets[split] = net_proto\n\n return nets", "def cook_config(ext_config_filename):\n mc = base_model_config()\n with open(ext_config_filename, \"r\") as fp:\n ext_mc = edict(json.load(fp, encoding=\"utf8\"))\n for s in ext_mc.keys():\n mc[s] = ext_mc[s]\n # mc.ANCHOR_BOX = set_anchors(mc)\n # print(np.max(np.square(np.array(set_anchors_testing(mc)) - np.array(set_anchors(mc)))))\n # mc.ANCHORS = len(mc.ANCHOR_BOX)\n # H, W, C = _get_output_shape(mc)\n # mc.MODEL_OUTPUT_SHAPE = [H, W, mc.ANCHOR_PER_GRID]\n return mc", "def topo_conf():\n for k in switches.keys():\n switches_ip[k] = IPAddr((192<<24)+int(k))\n switches_mac[k] = EthAddr(\"aa\"+ \"%010d\"%(k))", "def get_runconfig(master_host, namespace, target):\n response = tf_operator_util.send_request(master_host, namespace, target,\n \"runconfig\", {})\n return yaml.load(response)", "def parse_config(fpath):\n\n with open(fpath, \"r\") as f:\n # Ignore lines consisting only of whitespace or commented lines.\n lines = [\n line.strip() for line in f.readlines()\n if not (line.isspace() or line.startswith(\"#\"))\n ]\n\n # Each block begins with a line of the form \"[type]\", with the block type\n # (eg, \"convolutional\") enclosed in square brackets. Chunk config text\n # into blocks.\n block_start_lines = [\n line_num for line_num, line in enumerate(lines) if line.startswith(\"[\")\n ]\n block_start_lines.append(len(lines))\n\n text_blocks = []\n for i in range(1, len(block_start_lines)):\n block_start, block_end = block_start_lines[i-1], block_start_lines[i]\n text_blocks.append(lines[block_start:block_end])\n\n def str2type(raw_val):\n \"\"\"\n Helper function to convert a string input to the appropriate\n type (str, int, or float).\n \"\"\"\n try:\n return int(raw_val)\n except ValueError:\n pass\n\n try:\n return float(raw_val)\n except ValueError:\n return raw_val\n\n blocks = []\n net_info = None\n for text_block in text_blocks:\n block = {\"type\": text_block[0][1:-1]}\n for line in text_block[1:]:\n key, raw_val = line.split(\"=\")\n key = key.strip()\n\n # Convert fields with multiple comma-separated values into lists.\n if \",\" in raw_val:\n val = [str2type(item.strip()) for item in raw_val.split(\",\")]\n else:\n val = str2type(raw_val.strip())\n\n # If this is a \"route\" block, its \"layers\" field contains either\n # a single integer or several integers. If single integer, make it\n # a list for consistency.\n if (\n block[\"type\"] == \"route\"\n and key == \"layers\"\n and isinstance(val, int)\n ):\n val = [val]\n\n # If this is a \"yolo\" block, it contains an \"anchors\" field\n # consisting of (anchor width, anchor height) pairs of values;\n # thus, we group anchor values into chunks of two.\n if key == \"anchors\":\n val = [val[i:i+2] for i in range(0, len(val), 2)]\n\n block[key] = val\n\n if block[\"type\"] == \"net\":\n net_info = block\n else:\n blocks.append(block)\n\n return blocks, net_info", "def test_basic_mac_move(self):\n\n args = self.get_args()\n self.write_config_file(self.create_config_file(), args)\n\n execute_tool(args, test_mode=True)\n\n ip = '3.4.3.4'\n mac = '00:11:22:33:33:33'\n self.assertFalse(self.verify_remote_site_has_entry(mac, ip, 'intersite-testsuite', 'l3out',\n 'intersite-testsuite-app-epg'))\n\n time.sleep(2)\n self.add_endpoint(mac, ip, 'intersite-testsuite', 'app', 'epg')\n time.sleep(2)\n self.assertTrue(self.verify_remote_site_has_entry(mac, ip, 'intersite-testsuite',\n 'l3out', 'intersite-testsuite-app-epg'))\n\n mac = '00:11:22:33:44:44'\n self.add_endpoint(mac, ip, 'intersite-testsuite', 'app', 'epg')\n self.remove_endpoint('00:11:22:33:33:33', ip, 'intersite-testsuite', 'app', 'epg')\n time.sleep(2)\n self.assertTrue(self.verify_remote_site_has_entry(mac, ip, 'intersite-testsuite',\n 'l3out', 'intersite-testsuite-app-epg'))", "def get_configs(candidate_filename):\n return (sortby('name')(haresources2.load(haresources2_file)),\n sortby('name')(crmdict2haresources(crm2dict(configure_parse()))))", "def GenerateConfig(context):\n\n resources = [{\n 'name': context.env['name'],\n 'type': 'compute.v1.instance',\n 'properties': {\n 'zone': context.properties['zone'],\n 'machineType': ''.join([COMPUTE_URL_BASE, 'projects/',\n context.env['project'], '/zones/',\n context.properties['zone'], '/machineTypes/',\n context.properties['machineType']]),\n 'disks': [{\n 'deviceName': 'boot',\n 'type': 'PERSISTENT',\n 'boot': True,\n 'autoDelete': True,\n 'initializeParams': {\n 'sourceImage': ''.join([COMPUTE_URL_BASE, 'projects/',\n 'ubuntu-os-cloud/global/',\n 'images/family/ubuntu-1604-lts'])\n }\n }],\n 'networkInterfaces': [{\n 'network': '$(ref.' + context.properties['network']\n + '.selfLink)',\n 'accessConfigs': [{\n 'name': 'External NAT',\n 'type': 'ONE_TO_ONE_NAT'\n }]\n }],\n 'metadata': {\n 'items': [{\n 'key': 'startup-script',\n 'value': ''.join(['#!/bin/bash\\n',\n 'sudo apt-get install openjdk-9-jre-headless -y\\n',\n 'sudo python -m SimpleHTTPServer 80'])\n }]\n }\n }\n }]\n return {'resources': resources}", "def get_configmap_dict():\n template = textwrap.dedent(\n \"\"\"\n kind: ConfigMap\n apiVersion: v1\n metadata:\n name: fio-config\n data:\n workload.fio: |\n # here comes workload configuration\n \"\"\"\n )\n cm_dict = yaml.safe_load(template)\n return cm_dict", "def get_ignition_details(self):\n self.clear_screen()\n default = 'core'\n install_user = input('enter the user used to install openshift\\n'\n 'DONOT CHANGE THIS VALUE\\n'\n 'default [core]: ')\n install_user = set_values(install_user, default)\n default = 'openshift'\n install_dir = input('enter the directory where openshift installs\\n'\n 'directory will be created under /home/core\\n'\n 'default [openshift]: ')\n install_dir = set_values(install_dir, default)\n default = '10.128.0.0/14'\n pod_network_cidr = input('enter the pod network cidr\\n'\n 'default [10.128.0.0/14]: ')\n pod_network_cidr = set_values(pod_network_cidr, default)\n logging.info('pod network cidr: {}'.format(pod_network_cidr))\n pod_network_cidr = validate_network_cidr(pod_network_cidr)\n default = 23\n host_prefix = input('specify cidr notation for number of ips in each node: \\n'\n 'cidr number should be an integer and less than 32\\n'\n 'default [23]: ')\n host_prefix = set_values(host_prefix, default)\n host_prefix = validate_cidr(host_prefix)\n default = '172.30.0.0/16'\n service_network_cidr = input('specify the service network cidr\\n'\n 'default [172.30.0.0/16]: ')\n service_network_cidr = set_values(service_network_cidr, default)\n service_network_cidr = validate_network_cidr(service_network_cidr)\n logging.info('adding install_user: {} install_dir: {} cluster_network_cidr: {}\\\n host_prefix: {} service_network_cidr: {}'.format(install_user, install_dir,\n pod_network_cidr, host_prefix, \n service_network_cidr))\n self.inventory_dict['csah']['vars']['install_user'] = install_user\n self.inventory_dict['csah']['vars']['install_dir'] = install_dir\n self.inventory_dict['csah']['vars']['cluster_network_cidr'] = pod_network_cidr\n self.inventory_dict['csah']['vars']['host_prefix'] = int(host_prefix)\n self.inventory_dict['csah']['vars']['service_network_cidr'] = service_network_cidr", "def loadConfigInfo(self):\n reg = self.reg\n yield reg.cd(['', 'Servers', 'SIM900 Serial', 'links'], True)\n dirs, keys = yield reg.dir()\n p = reg.packet()\n for k in keys:\n p.get(k, key=k)\n ans = yield p.send()\n b = 0\n hostname = gethostname()\n for ss in ans['Serial Links']:\n if ss[0] == hostname + ' Serial Server':\n self.serialLinks = {ss[0]:ss[1]}\n # self.serialLinks = dict((ans[k][0][0], ans[k][0][1]) for k in keys) \n print self.serialLinks", "def get_valid_config(args):\n source = confuse.YamlSource(args.config)\n config = confuse.RootView([source])\n\n job_template = {\n \"job\": {\n \"name\": str,\n \"dir\": confuse.Optional(\n FilenameValidate(\n cwd=str(pathlib.Path(__file__).parent.absolute())),\n default=str(pathlib.Path(__file__).parent.absolute())\n ),\n }\n }\n job_config = config.get(job_template)\n\n logging_template = confuse.Optional(\n confuse.MappingTemplate({\n 'ids': confuse.StrSeq(),\n 'data': confuse.Sequence(\n confuse.Choice(['objectives', 'state', 'variables'])),\n 'timestamped': confuse.Optional(bool, default=True),\n \"to_file\": confuse.Optional(bool, default=True),\n \"to_console\": confuse.Optional(bool, default=False)\n })\n )\n\n sumo_template = {\n \"dir\": FilenameValidate(\n cwd=job_config.job.dir),\n \"gui\": confuse.Optional(bool, default=True),\n \"max_steps\": confuse.Optional(int, default=10e5),\n \"network\": FilenameValidate(relative_to=\"dir\"),\n }\n sumo_config = config.get({\"sumo\": sumo_template})\n sumo_template[\"additional\"] = confuse.Sequence(\n FilenameValidate(cwd=sumo_config.sumo.dir))\n sumo_template[\"route\"] = confuse.Sequence(\n FilenameValidate(cwd=sumo_config.sumo.dir))\n\n tls_template = confuse.Sequence({\n \"id\": str,\n \"controller\": confuse.Choice(\n TLSFactory.get_registered_keys()),\n \"constants\": confuse.MappingValues(\n confuse.OneOf([\n confuse.Number(),\n AllowedContainers(list),\n AllowedContainers(dict),\n FilenameValidate(cwd=job_config.job.dir),\n ExecutableValidate()\n ])\n ),\n \"variables\": confuse.MappingValues(\n confuse.OneOf([\n confuse.Number(),\n AllowedContainers(list)\n ])\n ),\n \"extract\": {\n \"user_data\": confuse.Sequence({\n \"feature\": confuse.Choice(\n [\"count\", \"speed\", \"eta\", \"delay\", \"waiting_time\"]),\n \"user_class\": confuse.Choice(\n [\"bicycle\", \"passenger\", \"pedestrian\", \"bus\", \"truck\", \"moped\"]),\n \"at\": confuse.Choice(\n [\"lane\", \"detector\", \"phase\"]),\n \"mapping\": AllowedContainers(dict)\n }),\n \"tls_data\": confuse.Sequence({\n \"feature\": confuse.Choice(\n [\"elapsed_time\", \"integer_phase\", \"binary_phase\"]),\n \"to_variable\": str\n })\n }\n })\n\n full_template = {\n \"logging\": logging_template,\n \"sumo\": sumo_template,\n \"tls\": tls_template,\n }\n job_template.update(full_template)\n valid_config = config.get(job_template)\n\n # second round of sumo validation\n assert len(valid_config.sumo.route) > 0, \\\n \"No demand definition: sumo.route is an empty list, expected at least one *.rou.xml\"\n \n # second round of logger validation, look if ids are given\n if valid_config.logging:\n if valid_config.logging.ids and valid_config.logging.data:\n output_dir = os.path.join(valid_config.job.dir, \"output\")\n os.makedirs(output_dir, exist_ok=True)\n valid_config.logging.update({\"dir\": output_dir})\n else:\n del valid_config['logging']\n\n return valid_config", "def main():\n\n ip_filename = arguments.ip_file.strip()\n\n # Set project directory to 'logs' unless an optional directory was given\n if arguments.project_dir:\n project = arguments.project_dir\n else:\n project = 'logs'\n\n if arguments.device_class:\n device_cls = arguments.device_class.strip()\n else:\n # Default device class for Netmiko\n device_cls = 'cisco_ios'\n\n ips = []\n ips = load_txt_file(ip_filename)\n\n total_devices = len(ips)\n # Track devices which fail login or pings\n missing_devices = []\n # Track devices which were successfully accessed\n devices_verified = 0\n\n # Create Directory for show output based on the Project Name\n path = os.path.join(\"./\", project.strip())\n # print path\n if not os.path.exists(path):\n os.makedirs(path)\n print(f\"Created directory: {path}\")\n\n # Create logfile for the discovery run in same directory as the resulting show commands\n # logfilename = project + \"-logfile.log\"\n # logfilename = os.path.join(path, logfilename)\n\n if total_devices > 1:\n heading = f\"##### Executing show commands for discovery project {project} for {str(total_devices)} devices! #####\"\n else:\n heading = f\"##### Executing show commands for discovery project {project} for {str(total_devices)} device! #####\"\n\n print(\"#\" * len(heading))\n print(heading)\n print(\"#\" * len(heading))\n\n print(f\"Device IP(s) in project {project}:\")\n for i in ips:\n print(f\"\\t{i}\")\n print(\"--------------------------\")\n print(f\"Total devices: {str(len(ips))}\")\n print(\"#\" * len(heading))\n print(\"\\n\")\n\n ## Default Credentials\n # Default list of credentials in format username, user password, enable password\n credentials = ['cisco, cisco, cisco']\n\n ## Load Credentials if -c or --creds option was used\n if arguments.creds:\n # Override default credentials as a new credential file with one or more sets of credentials was provided\n cred_filename = arguments.creds\n credentials = load_txt_file(cred_filename)\n\n ##### SHOW COMMANDS\n commands = []\n\n ## Load custom show commands if -c or --show option was used\n if arguments.show:\n # Override default list of show commands as a new file with one or more show commands was provided\n show_filename = arguments.show\n custom_showcmds = load_txt_file(show_filename)\n\n # first command to send is an end to get back to the main prompt\n commands = custom_showcmds\n\n else:\n # DEFAULT SHOW COMMANDS\n commands = [\"show version\",\n ]\n\n # if not arguments.pingonly:\n # print(\"Sending \" + str(len(commands)) + \" show commands:\")\n # for x in range(0, len(commands)):\n # print(\"\\t\" + commands[x])\n\n # For each IP in the ip address file, attempt to ping, attempt to log in, attempt to enter enable mode and\n # execute and save show command output\n for mgmt_ip in ips:\n\n login_success = False\n enable_success = False\n output = ''\n hostname = \"dev_\" + mgmt_ip\n\n # If Ping is successful attempt to log in and if that is successful attempt to enter enable mode and\n # execute list of show commands\n device_pings = ping_device(mgmt_ip)\n\n if device_pings:\n print(f\"Device {mgmt_ip} Responds to Pings!\\n\")\n\n # If the -i or --icmppingonly option was provided when the script was called, then only execute the ping code.\n if arguments.icmppingonly:\n # Keep a count of the devices that are pingable\n devices_verified += 1\n # Skip everything else as the icmp ping only option was given\n continue\n\n if len(credentials) > 1:\n print(\"**** Attempting multiple credentials to access device....\")\n\n try_telnet = False\n # Credential Loop\n for line in credentials:\n\n lineitem = line.split(',')\n uname = lineitem[0].strip()\n upwd = lineitem[1].strip()\n epwd = lineitem[2].strip()\n\n if not try_telnet:\n\n print(f\"\\t**** Attempting user credentials for {uname} with SSH.\")\n\n try:\n dev_conn = ConnectHandler(device_type=device_cls, ip=mgmt_ip, username=uname, password=upwd,\n secret=epwd)\n login_success = True\n\n\n except NetMikoAuthenticationException:\n print(f\"\\tNetMikoAuthenticationException: Device failed SSH Authentication with username {uname}\")\n missing_devices = missing_device_log(missing_devices, mgmt_ip, 'Failed Authentication')\n login_success = False\n # continue\n\n except (EOFError, SSHException, NetMikoTimeoutException):\n print('\\tSSH is not enabled for this device.')\n missing_devices = missing_device_log(missing_devices, mgmt_ip, 'Failed SSH')\n login_success = False\n try_telnet = True\n # continue\n\n except Exception as e:\n print(\"\\tGeneral Exception: ERROR!:\" + str(sys.exc_info()[0]) + \"==>\" + str(sys.exc_info()[1]))\n print(str(e))\n missing_devices = missing_device_log(missing_devices, mgmt_ip, 'General Exception')\n login_success = False\n # continue\n\n if login_success:\n print(\"\\t**** SSH Login Succeeded! Will not attempt login with any other credentials.\")\n # Break out of credential loop\n break\n else:\n print(\"\\t**** SSH Login Failed!\")\n # continue\n\n # Try Telnet\n if try_telnet:\n print(\"\\t**** Attempting user credentials for \" + uname + \" with Telnet.\")\n\n try:\n dev_conn = ConnectHandler(device_type='cisco_ios_telnet', ip=mgmt_ip, username=uname,\n password=upwd,\n secret=epwd)\n login_success = True\n\n except NetMikoAuthenticationException:\n print(f\"\\tNetMikoAuthenticationException: Device failed SSH Authentication with username {uname}\")\n missing_devices = missing_device_log(missing_devices, mgmt_ip, 'Failed Authentication')\n login_success = False\n continue\n\n except Exception as e:\n print(\"\\tGeneral Exception: ERROR!:\" + str(sys.exc_info()[0]) + \"==>\" + str(sys.exc_info()[1]))\n print(str(e))\n missing_devices = missing_device_log(missing_devices, mgmt_ip, 'General Exception')\n login_success = False\n continue\n\n if login_success:\n print(\"\\t**** Telnet Login Succeeded! Will not attempt login with any other credentials.\")\n # Break out of credential loop\n break\n else:\n print(\"\\t**** Telnet Login Failed!\")\n continue\n\n if login_success:\n # Check to see if login has resulted in enable mode (i.e. priv level 15)\n is_enabled = dev_conn.check_enable_mode()\n\n if not is_enabled:\n try:\n dev_conn.enable()\n enable_success = True\n except Exception as e:\n print(str(e))\n print(\"\\tCannot enter enter enable mode on device!\")\n missing_devices = missing_device_log(missing_devices, mgmt_ip, 'failed enable')\n enable_success = False\n continue\n else:\n print(\"\\tDevice already in enabled mode!\")\n enable_success = True\n\n if enable_success:\n\n for cmd in commands:\n output += dev_conn.send_command(cmd, strip_prompt=False, strip_command=False)\n dev_conn.exit_config_mode()\n dev_conn.disconnect()\n\n # output contains a stream of text vs individual lines\n # split into individual lies for further parsing\n # output_lines = re.split(r'[\\n\\r]+', output)\n\n # show_info = get_show_info(output_lines)\n #\n # if show_info['hostname']:\n # hostname = show_info.pop('hostname')\n\n # print(\"Information for device: \" + hostname)\n # for k, v in show_info.items():\n # print(\"\\t\" + k +\"\\t\\t-\\t\" + v)\n\n # Save output to file\n timestr = time.strftime(\"%Y%m%d-%H%M%S\")\n\n log_filename = hostname + \"-\" + timestr + \".txt\"\n log_filename = os.path.join(path, log_filename)\n\n log_file = open(log_filename, 'w')\n log_file.write(\"!#Output file for device \" + hostname + \"\\n\")\n log_file.write(\"!#Commands executed on \" + timestr + \"\\n\\r\")\n log_file.write(\"!\\n\")\n log_file.write(output)\n log_file.close()\n devices_verified += 1\n print(\"\\nOutput results saved in: \" + log_filename + \"\\n\\n\")\n\n\n else:\n # Device does not PING\n print(\"Device is unreachable\")\n missing_devices.append(mgmt_ip)\n\n # Totals Verification\n if arguments.icmppingonly:\n info = (\"Total number of devices in IP list:\\t\\t\" + str(total_devices) + \"\\n\",\n \"Total number of devices which responded to pings:\\t\" + str(devices_verified) + \"\\n\")\n else:\n info = (\"Total number of devices in IP list:\\t\\t\" + str(total_devices) + \"\\n\",\n \"Total number of show command output files:\\t\" + str(devices_verified) + \"\\n\")\n\n\n # Print Note on totals\n for note in info:\n print(note)", "def get_raid_config(bmc):\n controllers = bmc.list_raid_controllers()\n pdisks = bmc.list_physical_disks()\n vdisks = bmc.list_virtual_disks()\n controllers = namedtuples_to_dicts(controllers)\n pdisks = namedtuples_to_dicts(pdisks)\n vdisks = namedtuples_to_dicts(vdisks)\n return controllers, pdisks, vdisks", "def _update_auto_config(self):\n\n # Initialize the yaml data\n nodes = {}\n with open(self._autoconfig_filename, \"r\") as stream:\n try:\n ydata = yaml.load(stream)\n if \"nodes\" in ydata:\n nodes = ydata[\"nodes\"]\n except yaml.YAMLError as exc:\n print(exc)\n return\n\n for i in nodes.items():\n key = i[0]\n node = i[1]\n\n # Interfaces\n node[\"interfaces\"] = {}\n for item in self._nodes[key][\"interfaces\"].items():\n port = item[0]\n interface = item[1]\n\n node[\"interfaces\"][port] = {}\n addr = \"{}\".format(interface[\"pci_address\"])\n node[\"interfaces\"][port][\"pci_address\"] = addr\n if \"mac_address\" in interface:\n node[\"interfaces\"][port][\"mac_address\"] = interface[\"mac_address\"]\n\n if \"total_other_cpus\" in self._nodes[key][\"cpu\"]:\n node[\"cpu\"][\"total_other_cpus\"] = self._nodes[key][\"cpu\"][\n \"total_other_cpus\"\n ]\n if \"total_vpp_cpus\" in self._nodes[key][\"cpu\"]:\n node[\"cpu\"][\"total_vpp_cpus\"] = self._nodes[key][\"cpu\"][\n \"total_vpp_cpus\"\n ]\n if \"reserve_vpp_main_core\" in self._nodes[key][\"cpu\"]:\n node[\"cpu\"][\"reserve_vpp_main_core\"] = self._nodes[key][\"cpu\"][\n \"reserve_vpp_main_core\"\n ]\n\n # TCP\n if \"active_open_sessions\" in self._nodes[key][\"tcp\"]:\n node[\"tcp\"][\"active_open_sessions\"] = self._nodes[key][\"tcp\"][\n \"active_open_sessions\"\n ]\n if \"passive_open_sessions\" in self._nodes[key][\"tcp\"]:\n node[\"tcp\"][\"passive_open_sessions\"] = self._nodes[key][\"tcp\"][\n \"passive_open_sessions\"\n ]\n\n # Huge pages\n node[\"hugepages\"][\"total\"] = self._nodes[key][\"hugepages\"][\"total\"]\n\n # Write the auto config config file\n with open(self._autoconfig_filename, \"w\") as yamlfile:\n yaml.dump(ydata, yamlfile)", "def __init__ (self, config_yaml):\n configdef = yaml.safe_load(io.StringIO(config_yaml)) \n\n if \"filters\" not in configdef:\n configdef = dict(filters=[configdef])\n\n self._configs = []\n\n for definition in configdef[\"filters\"]:\n config = Bunch( valid_from = None\n , volume_follows = False\n , copy_last_price = False\n , copy_last_volume = False\n , qualifier_include_filters = []\n , qualifier_exclude_filters = []\n , exclude_filters = [] )\n\n if \"filter\" in definition and definition[\"filter\"] != None:\n for exclude_filter in definition[\"filter\"]:\n parts = exclude_filter.split(\",\")\n if parts[0] == \"floor\":\n config.exclude_filters.append(FloorFilter(float(parts[1]), \"price\"))\n elif parts[0] == \"cap\":\n config.exclude_filters.append(CapFilter(float(parts[1]), \"price\")) \n elif parts[0] == \"step\":\n config.exclude_filters.append(StepFilter(int(parts[1]), float(parts[2]), float(parts[3]), \"price\"))\n else:\n raise Exception(\"Unknown filter (%s)\" % (parts[0])) \n \n if \"remove\" in definition and definition[\"remove\"] != None:\n for exclude_filter in definition[\"remove\"]:\n config.qualifier_exclude_filters.append(QualifierFilter(exclude_filter))\n \n if \"allow\" in definition and definition[\"allow\"] != None:\n for include_filter in definition[\"allow\"]:\n config.qualifier_include_filters.append(QualifierFilter(include_filter))\n\n if \"volFollows\" in definition: config.volume_follows = definition[\"volFollows\"] \n if \"copyLast\" in definition and definition[\"copyLast\"] != None:\n config.copy_last_price = definition[\"copyLast\"] \n config.copy_last_volume = definition[\"copyLast\"] \n if \"volumeLimit\" in definition and definition[\"volumeLimit\"] != None:\n config.exclude_filters.append(CapFilter(definition[\"volumeLimit\"], \"volume\"))\n if \"validFrom\" in definition and definition[\"validFrom\"] != None:\n valid_from = datetime.datetime.strptime(definition[\"validFrom\"], \"%Y-%m-%d %H:%M:%S\")\n valid_from.replace(tzinfo=pytz.utc)\n config.valid_from = common.Time.tick(valid_from)\n if \"weekTimezone\" in definition and definition[\"weekTimezone\"] != None:\n config.exclude_filters.append(WeekendFilter(definition[\"weekTimezone\"], definition[\"weekEnd\"], definition[\"weekStart\"]))\n\n self._configs.append(config)\n \n self._config_index = 0\n self._config_count = len(self._configs)", "def main():\n required_if = [\n (\"state\", \"merged\", (\"config\",)),\n (\"state\", \"replaced\", (\"config\",)),\n (\"state\", \"rendered\", (\"config\",)),\n (\"state\", \"overridden\", (\"config\",)),\n (\"state\", \"parsed\", (\"running_config\",)),\n ]\n module = AnsibleModule(\n argument_spec=Acl_interfacesArgs.argument_spec,\n required_if=required_if,\n supports_check_mode=True,\n )\n result = Acl_interfaces(module).execute_module()\n\n module.exit_json(**result)", "def load_configs()->dict:\n \n print(os.getcwd())\n module_path = str(Path(__file__).parent.absolute())\n print(module_path)\n \n with open(os.path.join(module_path, \"config.yaml\")) as f:\n output = yaml.safe_load(f)\n f.close()\n \n return dict(output)", "def read_all_containers():\n with open(\"group_vars/container/container_frontend.yml\", 'r') as stream:\n try:\n fe_data = yaml.load(stream)\n except yaml.YAMLError as exc:\n print(exc)\n with open(\"group_vars/container/container_backend.yml\", 'r') as stream:\n try:\n be_data = yaml.load(stream)\n except yaml.YAMLError as exc:\n print(exc)\n fe_data.update(be_data)\n return fe_data", "def test_iosxr_netconf_get_config(nornir):\n nr = nornir.filter(name=DEVICE_NAME)\n\n result = nr.run(\n netconf_get_config,\n source=\"running\",\n path=\"\"\"\n <interfaces xmlns=\"http://openconfig.net/yang/interfaces\">\n </interfaces>\n \"\"\",\n filter_type=\"subtree\",\n xmldict=True,\n )\n assert \"MgmtEth0/0/CPU0/0\" == result[DEVICE_NAME].result[\"xml_dict\"][\"data\"][\"interfaces\"][\"interface\"][0][\"name\"]\n assert result[DEVICE_NAME].result[\"rpc\"]\n assert result[DEVICE_NAME].result[\"rpc\"].data_xml\n # with open(\"tests/test_data/get-iosxr-config.xml\", \"w+\") as file:\n # file.write(result[DEVICE_NAME].result[\"rpc\"].data_xml)" ]
[ "0.57955146", "0.5357593", "0.5309615", "0.52972466", "0.52823687", "0.52815616", "0.5225071", "0.5193531", "0.51922613", "0.5180207", "0.5171256", "0.5163886", "0.51440394", "0.5126848", "0.5065334", "0.5064806", "0.5061558", "0.50566524", "0.5048825", "0.5046078", "0.50313157", "0.50112766", "0.4965577", "0.49616477", "0.496006", "0.48806787", "0.48702684", "0.48635146", "0.48424098", "0.48233685", "0.4818057", "0.48022527", "0.48018914", "0.4798401", "0.4797551", "0.4790246", "0.47823155", "0.47665498", "0.4752413", "0.47504455", "0.47459045", "0.47436354", "0.4739246", "0.47228628", "0.47192308", "0.47177914", "0.47157392", "0.4710916", "0.47103956", "0.4708633", "0.4702336", "0.469929", "0.46956506", "0.46930316", "0.46895653", "0.46872184", "0.46829966", "0.46782157", "0.46695128", "0.46674028", "0.46569592", "0.46568647", "0.46548223", "0.46465313", "0.464478", "0.46417055", "0.4641078", "0.46354672", "0.46152723", "0.46135655", "0.46112627", "0.4610114", "0.46006602", "0.45935386", "0.45866448", "0.4586486", "0.45804194", "0.45788333", "0.45785812", "0.45711666", "0.45653546", "0.45603308", "0.45595506", "0.45554957", "0.4550486", "0.45418605", "0.45412493", "0.4538668", "0.45330793", "0.45252013", "0.4521308", "0.45113224", "0.45045027", "0.4500696", "0.4488729", "0.44857225", "0.4481349", "0.4478699", "0.44776484", "0.4476175" ]
0.6786082
0
read (tables, columns) from the table definition file
def table_col(file_name='tpch'): path = './data/' + file_name + "/sql/{}-create.sql".format("tpch") regex = re.compile(';\($') tbl_name = {} tbl = "" with open(path, 'r') as f: for line in f.readlines(): if "CREATE TABLE" in line: tbl = line.split()[2] tbl_name[tbl.lower()] = [] elif line != "\n" and ');' not in line and regex.search(line) == None: col = line.split()[0] tbl_name[tbl.lower()].append(col.lower()) return tbl_name
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def tabdes(filename, body):\n # XXX checksums ignored\n head = Struct(\"!BiHBxxxB\")\n body = Struct(body)\n # foot = Struct(\"!4s\")\n\n data = []\n with open(filename, \"rb\") as f:\n buffer = f.read()\n _, _, count, length, _ = head.unpack_from(buffer, 0)\n offset = head.size\n for i in range(count):\n row = body.unpack_from(buffer, offset)\n data.append(row)\n offset += body.size\n else:\n print(\"read %d rows\" % len(data))\n # offset = 2 ** 16 - foot.size\n # _, foot.unpack_from(buffer, offset))\n return data", "def _read_table(hdulist, extname, **kwargs):\n t = _read_ext(Table, hdulist, extname, **kwargs)\n h = hdulist[extname].header\n for i in range(h['TFIELDS']):\n try:\n t.columns[i].unit = h['TUNIT%d' % (i + 1)]\n except Exception:\n pass\n return t", "def load(file_name):\n with open(file_name, newline='') as f:\n reader = csv.reader(f)\n data = list(reader)\n\n schema = [x.strip() for x in data[0]]\n table = [[int(el) for el in row] for row in data[1:]]\n\n return schema, table", "def __load_handler(self):\n with open(self.path) as file:\n for line in file:\n if line.startswith(\"\"\"# TABLE: \"\"\"):\n self.columndefinition = (line.strip('\\n')\n .replace(\"\"\"# TABLE: \"\"\", ''))\n self.tablename = self.name.replace('.', '_')\n self.tablename = self.tablename.replace('-', '_')\n self.md5_tablename = (hashlib.md5(self.tablename)\n .hexdigest()[:30])\n for columnelement in self.columndefinition.split(','):\n column = columnelement.split(':')[0].strip()\n self.columnnames.append(column)\n\n self.is_mime_handler = True", "def parse(self, sql):\n # Get a temporary file name for sqlite\n db_file = tempfile.NamedTemporaryFile('w')\n # Connect to the temporary file.\n self.db = sqlite3.connect(db_file.name)\n # Enable foreign keys.\n self.db.execute('pragma foreign_keys=ON')\n # Get a cursor instance.\n self.cursor = self.db.cursor()\n\n # If sql is not a string assume it is a file.\n if not isinstance(sql, str):\n # Read the file into sql.\n sql = str(sql.read())\n\n # Execute the SQL statements from the input.\n self.cursor.executescript(sql)\n\n # Get all table names.\n self.cursor.execute(\n \"SELECT name FROM sqlite_master WHERE type='table';\")\n tables = self.cursor.fetchall()\n\n # Initialise the variable containing the parsed tables.\n self.tables = OrderedDict()\n # Run through all tables.\n for table in tables:\n # Create an entry for each table.\n self.tables[table[0]] = OrderedDict()\n\n # Get info on columns and primary keys.\n self.cursor.execute('PRAGMA table_info({})'.format(table[0]))\n # For each column\n for sql_column in self.cursor.fetchall():\n # Create an empty column entry.\n column = dict()\n # Set the name.\n column['name'] = sql_column[1]\n # Set the type\n column['type'] = sql_column[2]\n # Determine if this is a primary key\n column['primary'] = False\n if sql_column[5] == 1:\n column['primary'] = True\n # We do not know if this key has a reference yet.\n column['foreign'] = False\n\n # Add the column to the table.\n self.tables[table[0]][sql_column[1]] = column\n\n # Get information on foreign keys.\n self.cursor.execute('PRAGMA foreign_key_list({});'.format(table[0]))\n # Run through all foreign keys\n for foreign_key in self.cursor.fetchall():\n # Find the column by its name.\n for name, column in self.tables[table[0]].items():\n # Search for the name of the source column.\n if name == foreign_key[3]:\n # Add the referenced table and column in dot notation.\n self.tables[table[0]][name]['foreign'] = '{}.{}'.format(foreign_key[2], foreign_key[4])\n\n # Close the database connection\n self.db.close()\n # Make the cursor unusable for good measure.\n self.cursor = None\n\n # Run through the parsed tables and dispatch to the related call backs.\n for table_name, columns in self.tables.items():\n # New table.\n self.add_table(table_name)\n\n # Table columns.\n for column in columns.values():\n # Primary key.\n if column['primary'] is True:\n self.add_column_primary(column['name'], column['type'])\n # Foreign key.\n if column['foreign'] is not False:\n self.add_column_foreign(column['name'], column['type'], column['foreign'])\n # Just a column.\n if ((column['primary'] is not True) and\n (column['foreign'] is False)):\n self.add_column(column['name'], column['type'])", "def parse_table_schema(conn):\r\n cur = conn.cursor()\r\n\r\n cur.execute(\"PRAGMA table_info({})\".format(\"week5\"))\r\n print(cur.fetchall())", "def read_descriptions(args):\n\n table_list = check_output(\n 'echo \"show tables %s;\" | tql' % args.database, shell=True\n ).split(\n \"\\n\"\n )\n for table in table_list:\n table_details = table.split(\"|\")\n if len(table_details) >= 2:\n schema_name = table_details[0].strip()\n table_name = table_details[1].strip()\n\n schema = descriptions.get(schema_name, None)\n if schema is None:\n schema = {}\n\n table = schema.get(table_name, None)\n if table is None:\n table = {}\n\n column_list = check_output(\n 'echo \"show table %s.%s.%s;\" | tql'\n % (args.database, schema_name, table_name),\n shell=True,\n ).split(\n \"\\n\"\n )\n for column in column_list:\n column_details = column.split(\"|\")\n if len(column_details) >= 2:\n column_name = column_details[0].strip()\n column_type = column_details[2].strip()\n table[column_name] = column_type\n\n schema[table_name] = table\n descriptions[schema_name] = schema", "def load_schema_for_modelling():\n filename = \"modelling_schema.csv\"\n folder = os.path.abspath(os.path.dirname(__file__))\n path = os.path.join(folder, filename)\n return pd.read_csv(path).set_index('table_name')", "def read_database():\n file = tables.open_file(glob.datafile)\n table_d = file.root.VelibData.dynamic\n table_s = file.root.VelibData.static\n n_rows = len(table_d)\n print \"Nrows in dynamic table:\", n_rows\n print \"N stations:\", len(table_d[0][\"last_update\"])\n print \"Time of most recent sampling:\", \\\n time.asctime(time.localtime(recover_time(table_d[-1][\"sample_time\"])))\n print \"Nbikes available at most recent sampling:\", \\\n table_d[n_rows-1][\"available_bikes\"]\n print \"Time of last_update at most recent sampling:\", \\\n time.asctime(\n time.localtime(recover_time(table_d[n_rows-1][\"last_update\"][0])))\n print \"Number arr\", table_s[0][\"number\"]\n file.close()", "def readFromFile(self, inp):\n f = open(inp, \"r\")\n line = f.readline()\n line = line.strip().split(sep=\" \", maxsplit=3)\n self.columns, self.chars, self.pwdLength, _ = line\n self.columns = int(self.columns)\n self.pwdLength = int(self.pwdLength)\n self.func = lmdes\n line = f.readline()\n while line != '':\n pwd, hashV = line.strip().split(sep=\" \", maxsplit=1)\n self.table.insert(hashV, pwd)\n line = f.readline()\n f.close()", "def _ReadDefinitions(filename, delimiter):\n return pd.read_csv(filename, sep=delimiter)", "def _read_file_definition(self):\n row_count = 0\n #\n # THIS METHOD ASSUMES A 14 ROW HEADER\n # If the number of header row lines in the glider ASCII input file changes from 14,\n # this method will NOT WORK\n num_hdr_lines = 14\n\n header_pattern = r'(.*): (.*)$'\n header_re = re.compile(header_pattern)\n\n line = self._stream_handle.readline()\n\n while line and row_count < num_hdr_lines:\n\n match = header_re.match(line)\n\n if match:\n key = match.group(1)\n value = match.group(2)\n value = value.strip()\n\n # update num_hdr_lines based on the header info.\n if key == 'num_ascii_tags':\n # this key has a required value of 14, otherwise we don't know how to parse the file\n if int(value) != num_hdr_lines:\n raise DatasetParserException(\"Header must be %d rows, but it is %s\" % (num_hdr_lines, value))\n\n elif key == 'num_label_lines':\n # this key has a required value of 3, otherwise we don't know how to parse the file\n if int(value) != 3:\n raise DatasetParserException(\"There must be 3 Label lines from the header for this parser\")\n\n elif key == 'sensors_per_cycle':\n # save for future use\n self._header_dict[key] = int(value)\n\n elif key in ['filename_label', 'mission_name', 'fileopen_time']:\n # create a dictionary of these 3 key/value pairs strings from\n # the header rows that need to be saved for future use\n self._header_dict[key] = value\n\n else:\n log.warn(\"Failed to parse header row: %s.\", line)\n\n row_count += 1\n # only read the header lines in this method so make sure we stop\n if row_count < num_hdr_lines:\n line = self._stream_handle.readline()\n\n if row_count < num_hdr_lines:\n log.error('Not enough data lines for a full header')\n raise DatasetParserException('Not enough data lines for a full header')", "def read_table(file, **kwargs):\n extn = Reader.get_extension(file).lower()\n if extn.startswith('.xls'):\n return read_table_excel(file, **kwargs)\n elif extn == '.gsheet':\n if hasattr(file, 'as_posix'): # a pathlib.Path object\n file = str(file)\n elif hasattr(file, 'name'): # a TextIOWrapper object\n file = file.name\n return read_table_gsheets(file[:-7], **kwargs) # ignore the extension\n else:\n return read_table_text(file, **kwargs)", "def _read_tab(pth):\n if not os.path.exists(pth):\n raise SampleTableFileException(\n \"File does not exist: {}\".format(pth))\n read_csv_kwargs = {\"engine\": \"python\", \"dtype\": str,\n \"index_col\": False, \"keep_default_na\": False,\n \"na_values\": [\"\"]}\n return pd.read_csv(pth, sep=infer_delimiter(pth), **read_csv_kwargs)", "def parse_table_file(file):\n\n rows = [row for row in csv.reader(file.decode().splitlines(), delimiter=\",\",\n doublequote=True, escapechar=None, quotechar='\"',\n quoting=csv.QUOTE_MINIMAL, skipinitialspace=True)]\n\n if len(rows) < 2:\n raise Exception(\"File must contain at least two rows.\")\n\n # get header\n attributes = rows[0]\n\n # go through the csv by row\n data = []\n for row in rows[1:]:\n data.append(row)\n\n if len(attributes) < 1:\n raise Exception(\"File must contain at least one column.\")\n\n return attributes, data", "def read(filename, replace_columns=True):\n f = open(filename)\n lines = f.readlines()\n f.close()\n\n # Extract column names from the odt file.\n for i, line in enumerate(lines):\n if line.startswith('# Columns:'):\n columns = []\n odt_section = i # Should be removed after runs are split.\n for part in re.split('Oxs_|Anv_|Southampton_', line)[1:]:\n for char in [\"{\", \"}\", \" \", \"\\n\"]:\n part = part.replace(char, '')\n if replace_columns:\n if part in columns_dic.keys():\n columns.append(columns_dic[part])\n else:\n msg = \"Entry {} not in lookup table.\".format(part)\n raise ValueError(msg)\n else:\n columns.append(part)\n\n # Extract units from the odt file.\n for i, line in enumerate(lines):\n if line.startswith('# Units:'):\n units = line.split()[2:]\n\n # Extract the data from the odt file.\n data = []\n for i, line in enumerate(lines[odt_section:]):\n if not line.startswith(\"#\"):\n data.append([float(number) for number in line.split()])\n\n df = pd.DataFrame(data, columns=columns)\n # next line is required to allow adding list-like attribute to pandas DataFrame\n # see https://github.com/pandas-dev/pandas/blob/2f9d4fbc7f289a48ed8b29f573675cd2e21b2c89/pandas/core/generic.py#L3631\n df._metadata.append('units')\n df.units = dict(zip(columns, units))\n return df", "def load_biom_table_with_file_contents(biom_f):\n table = parse_biom_table(biom_f)\n if hasattr(biom_f, 'seek'):\n biom_f.seek(0)\n return table, biom_f", "def enumerate_tables(self, files=False):\n dbinfo = self.stru.readrec(1)\n if dbinfo[:1] != b\"\\x03\":\n print(\"WARN: expected dbinfo to start with 0x03\")\n try:\n dbdef = self.decode_db_definition(dbinfo[1:])\n except Exception as e:\n print(\"ERROR decoding db definition: %s\" % e)\n print(\"This could possibly mean that you need to try with the --strucrack option\")\n return\n\n for k, v in dbdef.items():\n if k.startswith(\"Base\") and k[4:].isnumeric():\n if files and k[4:] == \"000\":\n yield TableDefinition(v)\n if not files and k[4:] != \"000\":\n yield TableDefinition(v, dbdef.get(\"BaseImage\" + k[4:], b''))", "def read(self, database ='project'):\n\t\tfile = open(self.file_name, \"r\")\n\n\t\ti = 1\n\t\tseptics = []\n\t\tfor line in file:\n\t\t\tif i > 2:\n\t\t\t\tval = line.split()\n\t\t\t\tself.check_cols(val, 13, 'septic')\n\n\t\t\t\tsep = {\n\t\t\t\t\t'name': val[0].lower(),\n\t\t\t\t\t'q_rate': val[1],\n\t\t\t\t\t'bod': val[2],\n\t\t\t\t\t'tss': val[3],\n\t\t\t\t\t'nh4_n': val[4],\n\t\t\t\t\t'no3_n': val[5],\n\t\t\t\t\t'no2_n': val[6],\n\t\t\t\t\t'org_n': val[7],\n\t\t\t\t\t'min_p': val[8],\n\t\t\t\t\t'org_p': val[9],\n\t\t\t\t\t'fcoli': val[10],\n\t\t\t\t\t'description': val[12] if val[12] != 'null' else None # 12 index because extra column\n\t\t\t\t}\n\t\t\t\tseptics.append(sep)\n\t\t\ti += 1\n\n\t\tif database == 'project':\n\t\t\tdb_lib.bulk_insert(project_base.db, project_parmdb.Septic_sep, septics)\n\t\telse:\n\t\t\tdb_lib.bulk_insert(datasets_base.db, datasets_parmdb.Septic_sep, septics)", "def dump_db_table_defs(self, args):\n dbinfo = self.stru.readrec(1)\n if dbinfo[:1] != b\"\\x03\":\n print(\"WARN: expected dbinfo to start with 0x03\")\n dbdef = self.decode_db_definition(dbinfo[1:])\n self.dump_db_definition(args, dbdef)\n\n for k, v in dbdef.items():\n if k.startswith(\"Base\") and k[4:].isnumeric():\n print(\"== %s ==\" % k)\n tbdef = TableDefinition(v, dbdef.get(\"BaseImage\" + k[4:], b''))\n tbdef.dump(args)\n elif k == \"NS1\":\n self.dump_ns1(v)", "def read_schema_from_db(cur, table):\n num_rows = cur.execute(\"\"\"DESCRIBE {}\"\"\".format(table))\n tbl_schema = []\n for i in range(num_rows):\n row = cur.fetchone()\n tbl_schema.append([row[0], row[1]])\n return tbl_schema", "def load_fits_table(fname):\n\treturn fits.open(fname)[1].data", "def get_table_info(line):\n\n COMMENT_EXPR = '-- Name: '\n TYPE_EXPR = '; Type: '\n SCHEMA_EXPR = '; Schema: '\n OWNER_EXPR = '; Owner: '\n TABLESPACE_EXPR = '; Tablespace: '\n\n temp = line.strip('\\n')\n type_start = get_all_occurrences(TYPE_EXPR, temp)\n schema_start = get_all_occurrences(SCHEMA_EXPR, temp)\n owner_start = get_all_occurrences(OWNER_EXPR, temp)\n tblspace_start = get_all_occurrences(TABLESPACE_EXPR, temp)\n if len(type_start) != 1 or len(schema_start) != 1 or len(owner_start) != 1:\n return (None, None, None, None)\n name = temp[len(COMMENT_EXPR) : type_start[0]]\n type = temp[type_start[0] + len(TYPE_EXPR) : schema_start[0]]\n schema = temp[schema_start[0] + len(SCHEMA_EXPR) : owner_start[0]]\n if not tblspace_start:\n tblspace_start.append(None)\n owner = temp[owner_start[0] + len(OWNER_EXPR) : tblspace_start[0]]\n return (name, type, schema, owner)", "def readFile(fname):\n\n fromto = []\n cols = []\n with open(fname , 'r') as f:\n cols = f.readline().split(\",\")[0:4] # Headline\n for line in f:\n tm, frm, to, am = line.split(\",\")[0:4]\n frm = int(frm.lstrip())\n to = int(to.lstrip())\n fromto.append((frm,to))\n return cols, fromto", "def read(self):\r\n entById = {}\r\n entsByName = {}\r\n header = 'HEADER '\r\n readheader = False\r\n for line in self.file:\r\n e = self.parseLine(line)\r\n if e:\r\n entById[int(e[\"id\"])] = e\r\n ids = e.get(e[\"name\"],[])\r\n ids.append(e[\"id\"])\r\n entsByName[e[\"name\"]] = list(set(ids))\r\n elif 'HEADER' in line:\r\n readheader = True\r\n elif readheader:\r\n if 'ENDSEC' in line:\r\n readheader = False\r\n else:\r\n header += line\r\n \r\n return [entById, entsByName, header]", "def table_columns(auth, table_name):\n return [row[0] for row in DBMySQL.csfr(auth, \"describe \" +table_name)]", "def read_table(filename, separator=',', dtype='float'):\n\n fp = open(filename, 'r')\n\n headers = fp.readline()\n\n # print \"headers = \", headers\n headers = [h.strip() for h in headers.split(separator)]\n headers.remove('')\n\n #print \"headers = \", headers\n\n columns = [[] for h in headers]\n #table = dict.fromkeys(headers, [])\n\n #table = Container.fromkeys(headers, [])\n\n #print \"table = \", table\n\n for line in fp.readlines():\n\n values = [h.strip() for h in line.split(separator)]\n values.remove('')\n\n #print \"values = \", values\n\n for k, v in enumerate(values):\n\n #print k, \" = \", v\n\n\n if dtype == \"float\":\n v = float(v)\n\n columns[k].append(v)\n #table[k].append(v)\n\n table = Container(**dict(list(zip(headers, columns))))\n table.headers = headers\n\n return table", "def read_stats(filename):\n header = {}\n tableinfo = {}\n measures = []\n rowmeasures = []\n\n with open(filename, 'rt') as fp:\n lines = fp.readlines()\n for line in lines:\n if line == line[0]:\n continue\n #parse commented header\n if line.startswith('#'):\n fields = line.split()[1:]\n if len(fields) < 2:\n continue\n tag = fields[0]\n if tag == 'TableCol':\n col_idx = int(fields[1])\n if col_idx not in tableinfo:\n tableinfo[col_idx] = {}\n tableinfo[col_idx][fields[2]] = ' '.join(fields[3:])\n if tableinfo[col_idx][fields[2]] == \"StructName\":\n struct_idx = col_idx\n elif tag == \"Measure\":\n fields = ' '.join(fields).replace('CortexVol ', 'CortexVol, ').split()\n fields = ' '.join(fields[1:]).split(', ')\n measures.append({'structure': fields[0],\n 'name': fields[1],\n 'description': fields[2],\n 'value': fields[3],\n 'units': fields[4],\n 'source': 'Header'})\n elif tag == \"ColHeaders\":\n if len(fields) != len(tableinfo):\n for idx, fieldname in enumerate(fields[1:]):\n if idx + 1 in tableinfo:\n continue\n tableinfo[idx + 1] = {'ColHeader': fieldname,\n 'Units': 'unknown',\n 'FieldName': fieldname}\n else:\n continue\n else:\n header[tag] = ' '.join(fields[1:])\n else:\n #read values\n row = line.split()\n values = {}\n measures.append({'structure': row[struct_idx-1],\n 'items': [],\n 'source': 'Table'}),\n for idx, value in enumerate(row):\n if idx + 1 == struct_idx:\n continue\n measures[-1]['items'].append({\n 'name': tableinfo[idx + 1]['ColHeader'],\n 'description': tableinfo[idx + 1]['FieldName'],\n 'value': value,\n 'units': tableinfo[idx + 1]['Units'],\n })\n return header, tableinfo, measures", "def read_sql(self):\n pass", "def find_table(input_file):\n contents = open(input_file, 'r').readlines()\n title = []\n for line in contents:\n if 'CREATE TABLE' in line:\n T = re.search('CREATE TABLE (.+?) \\(',line).group(1).strip('\\\"')\n title.append(T)\n if len(title) != 0:\n return True, title\n else:\n return False, title", "def readFileTable(f, header):\n version, dataOffset, archiveFileCount, fileTableLength, endianness, fileCount = header\n \n def readFileRecords(f):\n for i in range(fileCount):\n recordBytes = f.read(FILE_RECORD_LENGTH)\n path, offset, size, endianness, archiveFileIndex = struct.unpack(\"<256sLLLL\", recordBytes)\n path, _ = path.decode('ascii').split(\"\\0\", 1)\n yield (path, offset, size, endianness, archiveFileIndex)\n \n return list(readFileRecords(f))", "def schema_load(filename):\n print(uc.schema_load(filename))", "def read_mumax3_table(filename):\n \n table = pd.read_csv(filename, sep='\\t')\n table.columns = ' '.join(table.columns).split()[1::2]\n \n return table", "def import_schemas_from_file():\n with open('./tblSchemas') as schemas_file:\n schemas = {}\n for line in schemas_file:\n line = line.split()\n if len(line) == 0: continue\n if line[0] == 'tblname':\n tbl_name = line[1]\n schemas[tbl_name] = []\n else:\n schemas[tbl_name].append(line)\n return schemas", "def read(tablename: str()):\n return pd.read_csv(tablename, dtype={'source_id': str})", "def create_table_from_file():\n\n full_path = os.getcwd()\n file_name = full_path + \"/inventory/inventory.csv\"\n\n if os.path.exists(file_name):\n table = data_manager.get_table_from_file(file_name)\n\n else:\n ui.print_error_message(\"There is no file to read!\")\n table = []\n\n return table", "def read_bed_file(path, labelnum=0):\n\n bed_df = pd.read_table(path, sep=\"\\t\", header=None)\n colnames = generate_colnames(bed_df, labelnum)\n bed_df.columns = colnames\n print(bed_df.head())\n return bed_df", "def _read_metadata(self, conn, tbl_name): \n # Split table name in libname and actual table name\n name, schema = tuple(tbl_name.split('.'))\n # Query the Vertica dictionary to get types and formats\n query = \"\"\"\n SELECT column_name as NAME, data_type as TYPE, data_type_length AS LENGTH \n FROM v_catalog.columns \n WHERE table_schema = '{}' AND table_name = '{}'\n \"\"\".format(name, schema)\n \n md = conn.fetch(query)\n if not len(md):\n raise ValueError('No metadata for table {}'.format(tbl_name))\n\n md = (md\n # Use variable names as row names, then remove the NAME column\n .set_index('NAME', inplace=False)\n # Compute the number of bytes for each variable It is given by the LENGTH variable\n .rename({'LENGTH': 'NUM_BYTES'}, axis=1))\n\n # Identify data types\n type_upper = md['TYPE'].str.upper()\n md['IS_TEXT'] = type_upper.str.startswith('VARCHAR')\n md['IS_BOOLEAN'] = type_upper == 'BOOLEAN'\n md['IS_INTEGER'] = type_upper.isin(['INT', 'INTEGER'])\n md['IS_FLOAT'] = (type_upper == 'FLOAT') | type_upper.str.startswith('NUMERIC')\n md['IS_DATE'] = type_upper == 'DATE'\n md['IS_TIMESTAMP'] = type_upper == 'TIMESTAMP'\n md['IS_TIME'] = type_upper == 'TIME'\n # Determine datetime formats for date and time data\n md['DATETIME_FORMAT'] = np.nan\n md.loc[md['IS_DATE'], 'DATETIME_FORMAT'] = 'yyyy-MM-dd'\n md.loc[md['IS_TIME'], 'DATETIME_FORMAT'] = 'HH:mm:ss'\n # Determine datetime formats for timestamp data\n # For timestamp data, the right format is:\n # - yyyy-MM-dd HH:mm:ss.0 with a JDBC connection <-- python default\n # - yyyy-MM-dd HH:mm:ss with an ODBC connection\n md.loc[md['IS_TIMESTAMP'], 'DATETIME_FORMAT'] = 'yyyy-MM-dd HH:mm:ss.0'\n\n # Original type\n md.rename({'TYPE': 'TYPE_IN_SOURCE'}, axis=1, inplace=True)\n # Create the metadata catalog\n md = MetadataCatalog(md, is_case_sensitive=False)\n # Check that all formats have been correctly processed\n format_check = md.check_metadata_completeness()\n if not all(format_check):\n unsupported_format = md.get_type_in_source()\n unsupported_format = unsupported_format[~format_check].unique()\n raise ValueError('Unsupported Vertica format: {}'.format(unsupported_format))\n return md", "def load_data(file_name):\n return Orange.data.Table(file_name)", "def load_cdm_tables():\n tpath = os.getcwd() + '/../data'\n cdmpath='https://raw.githubusercontent.com/glamod/common_data_model/master/tables/' # cdm tables \n \n \"\"\" Selecting the list of table definitions. Some of the entires do not have the corresponding implemented tables \"\"\"\n cdmtabledeflist=['id_scheme', 'crs', 'station_type', 'observed_variable', 'station_configuration', 'station_configuration_codes', 'observations_table', \n 'header_table', 'source_configuration', 'sensor_configuration', 'units' , 'z_coordinate_type'] \n cdm_tabdef = dict()\n for key in cdmtabledeflist:\n url='table_definitions'.join(cdmpath.split('tables'))+key+'.csv' # https://github.com/glamod/common_data_model/tree/master/table_definitions/ + ..._.dat \n f=urllib.request.urlopen(url)\n col_names=pd.read_csv(f, delimiter='\\t',quoting=3,nrows=0,comment='#')\n f=urllib.request.urlopen(url)\n tdict={col: str for col in col_names}\n cdm_tabdef[key]=pd.read_csv(f,delimiter='\\t',quoting=3,dtype=tdict,na_filter=False,comment='#')\n \n \n \"\"\" Selecting the list of tables. 'station_configuration_codes','observations_table','header_table' are not implemented in the CDM GitHub\"\"\" \n cdmtablelist=['id_scheme', 'crs', 'station_type', 'observed_variable', 'station_configuration_codes','units'] \n cdm_tab=dict() # dictionary where each key is the name of the cdm table, and the value is read from the .dat file \n for key in cdmtablelist:\n f=urllib.request.urlopen(cdmpath+key+'.dat')\n col_names=pd.read_csv(f,delimiter='\\t',quoting=3,nrows=0)\n f=urllib.request.urlopen(cdmpath+key+'.dat')\n tdict={col: str for col in col_names}\n cdm_tab[key]=pd.read_csv(f,delimiter='\\t',quoting=3,dtype=tdict,na_filter=False)\n\n\n \"\"\" Adding the tables that currently only have the definitions but not the implementation in the CDM, OR need extensions \"\"\" \n cdm_tabdef['header_table'] = pd.read_csv(tpath+'/table_definitions/header_table.csv',delimiter='\\t',quoting=3,comment='#')\n #cdm_tabdef['observations_table'] = pd.read_csv(tpath+'/table_definitions/observations_table.csv',delimiter='\\t',quoting=3,comment='#')\n\n id_scheme={ cdm_tabdef['id_scheme'].element_name.values[0]:[0,1,2,3,4,5,6],\n cdm_tabdef['id_scheme'].element_name.values[1]:['WMO Identifier','Volunteer Observing Ships network code',\n 'WBAN Identifier','ICAO call sign','CHUAN Identifier',\n 'WIGOS Identifier','Specially constructed Identifier']}\n\n cdm_tab['id_scheme']=pd.DataFrame(id_scheme)\n cdm_tab['crs']=pd.DataFrame({'crs':[0],'description':['wgs84']})\n \n \"\"\" Here we add missing entries, e.g. in the z_coordinate_type for the pressure levels in Pascal (the available CDM table in the glamod GitHub rep. contains onle the altitude in [meter] \"\"\"\n cdm_tab['station_type']=pd.DataFrame({'type':[0,1],'description':['Radiosonde','Pilot']}) \n cdm_tab['z_coordinate_type']=pd.DataFrame({'type':[0,1],'description':['height (m) above sea level','pressure (Pa)']}) # only the m above sea level is available currently in the GitHub cdm table, added pressure \n \n \n \"\"\" Make dictionary of variables and attributes for the observations table \"\"\" \n dic_obstab_attributes = {}\n for index, row in cdm_tabdef['observations_table'].iterrows():\n dic_obstab_attributes[row['element_name'] ] = {}\n dic_obstab_attributes[row['element_name'] ]['description'] = row.description \n dic_obstab_attributes[row['element_name'] ]['external_table'] = row.external_table \n \n #dic_obs['date_time'] = ['units', 'seconds since 1900-01-01 00:00:00' ]\n \n if not os.path.isfile('dic_obstab_attributes.npy'): \n np.save( 'dic_obstab_attributes' , dic_obstab_attributes )\n\n \"\"\"\n for tab in ['observations_table' , 'header_table', 'sensor_configuration']: \n #for tab in cdm_tabdef: \n \n df = cdm_tabdef[tab]\n variable_type[tab] = {}\n for index,row in df.iterrows():\n if 'kind' in df.columns: \n variable_type[tab][row.element_name ] = kinds[row.kind] \n else:\n rt=row.type\n if row.type=='timestamp':\n rt='timestamp with timezone'\n variable_type[tab][row.element_name ] = kinds[rt] \n \n variable_type['observations_table']['date_time'] = np.int64\n \"\"\"\n \n return cdm_tabdef, cdm_tab, tdict , dic_obstab_attributes", "def load_raw_table(conf, table):\n confrd = load_config_raw_data(conf)\n path_table = Path(confrd[table][\"path\"])\n sep = confrd[table][\"sep\"]\n encoding = confrd[table][\"encoding\"]\n df = pd.read_csv(path_table, sep=sep, encoding=encoding)\n return df", "def readFromFile(self,ffile,nbcolumns=None,columnsNames='yes',name='no',columnsUnits='no'):\n from exceptions import IOError\n try:\n if self.data.shape != (0,0):\n raise Exception(\"The table already contains values\")\n file = open(ffile, 'r')\n except :\n msg=\"can't open file <%s>...\\n\"%ffile\n raise IOError(msg)\n\n\n fileNameColumns=[]\n fileNameUnits=[]\n fileName=None\n \n filemaxnbcol=0\n fileminnbcol=100\n isonvalues=0\n allvaluesbycolonne=[]\n nbvalueline=0\n cpt=1\n for line in file.readlines():\n separe = line.split()\n if (len(separe) == 0 ):\n # blank line\n continue\n \n \n if ( separe[0] == '#' ):\n # comment line\n cpt=cpt+1\n continue\n elif ( separe[0] == '#TITLE:' ):\n # name line\n separe = separe[1:]\n s=''\n for isep in range(len(separe)):\n s=s+separe[isep]+' '\n fileName=s\n pass\n elif ( separe[0] == '#COLUMN_TITLES:' ):\n # column name line\n separe = separe[1:]\n s=''\n for isep in range(len(separe)):\n s=s+separe[isep]\n s=string.strip(s)\n if ( len(s) == 0 ):\n fileNameColumns=[]\n continue\n fileNameColumns = s.split('|')\n pass\n pass\n elif ( separe[0] == '#columnUnits:' ):\n # unit name line\n fileNameUnits = separe[1:]\n pass\n elif ( cpt == 1 ):\n # column name line\n pass\n else:\n # values line\n nbvalueline=nbvalueline+1\n linenbcol=len(separe)\n filemaxnbcol=max(linenbcol,filemaxnbcol)\n fileminnbcol=min(linenbcol,fileminnbcol)\n linevalues=[]\n \n for isep in range(linenbcol): linevalues.append(float(separe[isep]))\n \n # adjust nb columns if not the same on each line\n # or if the first value's line\n if ( filemaxnbcol < len(allvaluesbycolonne) ):\n for icol in range(filemaxnbcol,len(allvaluesbycolonne)):\n allvaluesbycolonne.append([])\n for il in range(nbvalueline-1):\n allvaluesbycolonne[il].append(0)\n pass\n pass\n elif ( filemaxnbcol > len(allvaluesbycolonne) ):\n for icol in range(len(allvaluesbycolonne),filemaxnbcol):\n allvaluesbycolonne.append([])\n for il in range(nbvalueline-1):\n allvaluesbycolonne[icol].append(0)\n pass\n pass\n pass\n # add values\n for icol in range(linenbcol): allvaluesbycolonne[icol].append(linevalues[icol])\n for icol in range(linenbcol,filemaxnbcol): allvaluesbycolonne[icol].append(0)\n \n cpt=cpt+1\n pass\n file.close()\n #\n # check consistency beetwen arguments and file contents\n #\n # controlling the table parameters\n # \n if ( fileminnbcol != filemaxnbcol ):\n raise IOError(\"colums must have the same number of rows\")\n \n if nbcolumns:\n if ( filemaxnbcol != nbcolumns ):\n raise IOError(\" problem with the number of columns\")\n pass\n \n # Warnings\n if ( ( columnsNames.lower() == 'no' ) and ( len(fileNameColumns) > 0 ) ):\n raise Warning(\" you should specify column names\")\n \n if ( ( columnsNames.lower() == 'yes' ) and ( len(fileNameColumns) == 0 ) ):\n raise Warning(\"you specified columnName(s) but the file doesn\\'t entail column names\")\n \n if ( len(fileNameColumns) < filemaxnbcol ):\n nbcol=len(fileNameColumns)\n for icol in range (nbcol,filemaxnbcol): fileNameColumns.append('col'+str(icol+1))\n \n effectivecolumnNames=fileNameColumns\n \n \n if ( ( name.lower() == 'no' ) and fileName ):\n msg='WARNING: you specified no name but there is name in file'\n print(msg)\n \n if ( ( name.lower() == 'yes' ) and ( fileName == None ) ):\n msg='WARNING: you specified name but there is no name in file'\n print(msg)\n \n if ( ( columnsUnits.lower() == 'no' ) and ( len(fileNameUnits) > 0 ) ):\n msg='WARNING: you specified no units name but there are units name in file'\n print(msg)\n \n if ( ( columnsUnits.lower() == 'yes' ) and ( len(fileNameUnits) == 0 ) ):\n msg='WARNING: you specified units name but there are no units name in file'\n print(msg)\n \n if ( ( len(fileNameUnits) > 0 ) and ( len(fileNameUnits) < filemaxnbcol ) ):\n nbcol=len(fileNameUnits)\n for icol in range (nbcol,filemaxnbcol): fileNameUnits.append('col'+str(icol+1))\n pass\n \n\n\n if fileName:\n self.setName(fileName)\n pass\n if len(fileNameUnits):\n self.setColumnUnits(fileNameUnits)\n\n for i in range(filemaxnbcol):\n if columnsNames.lower()=='yes':\n self.addColumn(effectivecolumnNames[i],allvaluesbycolonne[i])\n pass\n else:\n self.addColumnValues(allvaluesbycolonne[i])\n pass\n return", "def test_read_empty_basic_table_with_comments(fast_reader):\n dat = \"\"\"\n # comment 1\n # comment 2\n col1 col2\n \"\"\"\n t = ascii.read(dat, fast_reader=fast_reader)\n assert t.meta[\"comments\"] == [\"comment 1\", \"comment 2\"]\n assert len(t) == 0\n assert t.colnames == [\"col1\", \"col2\"]", "def load_staging_tables_docstring(cur, conn):", "def create_tabular_resources(self, file, skip_lines, encoding):\n engine = Engine()\n self.encoding = encoding\n engine.encoding = encoding\n table_val = Table(str(file), header_rows=skip_lines)\n table = engine.auto_create_table(table_val, filename=file, make=False)\n clean_table = table.__dict__\n resource_dict = {}\n path_to_table = os.path.basename(clean_table[\"name\"])\n print(\"Processing... {file_name}\".format(file_name=path_to_table))\n r_name = os.path.splitext(path_to_table)[0].lower()\n resource_dict[\"name\"] = clean_table_name(r_name)\n resource_dict[\"path\"] = path_to_table\n resource_dict[\"schema\"] = {}\n resource_dict[\"dialect\"] = {\"delimiter\": \",\"}\n resource_dict[\"schema\"][\"fields\"] = []\n for cname, ctuple in clean_table[\"columns\"]:\n if len(ctuple) >= 2:\n if ctuple[0] == \"char\":\n # char sizes need quotes\n char_size = \"{a}\".format(a=ctuple[1])\n resource_dict[\"schema\"][\"fields\"].append({\n \"name\": cname,\n \"type\": ctuple[0],\n \"size\": char_size\n })\n else:\n resource_dict[\"schema\"][\"fields\"].append({\n \"name\": cname,\n \"type\": ctuple[0],\n \"size\": ctuple[1]\n })\n else:\n resource_dict[\"schema\"][\"fields\"].append({\n \"name\": cname,\n \"type\": ctuple[0]\n })\n resource_dict[\"url\"] = \"fill\"\n return resource_dict", "def _read_lengths(self):\n\n stream = self.stream\n stream.seek(0)\n\n ###########\n # Read and set table lengths\n self.table_lengths = [None]*len(tables)\n\n (self.entire_file_length,\n header_length,\n self.smallest_character_code,\n self.largest_character_code) = repeat_call(stream.read_unsigned_byte2, 4)\n\n header_data_length_min = 18 # words\n self.table_lengths[tables.header] = max(\n header_data_length_min, header_length)\n\n self.number_of_chars = self.largest_character_code - \\\n self.smallest_character_code + 1\n self.table_lengths[tables.character_info] = self.number_of_chars\n\n # read the last lengths\n for i in range(tables.width, len(tables)):\n self.table_lengths[i] = stream.read_unsigned_byte2()\n\n ###########\n # Compute table pointers\n self.table_pointers = [None]*len(tables)\n\n # The header starts at 24 bytes\n self.table_pointers[tables.header] = 24\n\n for table in range(tables.header, tables.font_parameter):\n self.table_pointers[\n table+1] = self._position_in_table(table, self.table_lengths[table])\n\n ###########\n # Sanity check\n length = self._position_in_table(\n tables.font_parameter, self.table_lengths[tables.font_parameter])\n if length != self.word_ptr(0, self.entire_file_length):\n raise NameError('Bad TFM file')", "def parse(self, lines):\n # Keep count of the current line number.\n i = 0\n # list tables and content\n tables = dict()\n attr_param = list()\n\n skipped_lines = list() # DEBUG\n\n # Loop through all lines.\n for i in range(0, len(lines)):\n line_stripped = lineNormalise(lines[i])\n skip = True\n\n for keyword in self.target_keywords:\n\n # Look for keywords at the beginning of the line.\n if line_stripped.startswith(keyword):\n # print(\"{} : {}\".format(i, line_stripped)) # DEBUG\n skip = False\n\n # Found one, do parse\n expression = re.search(r'(\\w+) (\\w+)', line_stripped)\n if keyword is self.target_keywords[0]: # class/table\n # get table name\n table_name = expression.group(2)\n\n # add it in tables if not already in\n # tables (classes) may be at differant place in a PlantUML file\n if table_name not in tables:\n tables[table_name] = list()\n # print(\"Table : «{}» ajoutee\".format(expression.group(2))) # DEBUG\n print(\"{} : +table «{}»\".format(i, table_name)) # DEBUG\n\n elif keyword is self.target_keywords[1]: # primary key\n # import pdb; pdb.set_trace()\n # get related table\n attr_param = (re.sub(r'(pyk\\()|\\)|,|\\n', r' ', line_stripped).strip().split())\n tables[table_name].extend(attr_param)\n print(\"{} :\\t«{}» +{}\".format(i, table_name, attr_param)) # DEBUG\n\n elif keyword is self.target_keywords[2]: # foreign key\n # get related table\n attr_param = (re.sub(r'(fnk\\()|\\)|,|\\n', r' ', line_stripped).strip().split())\n tables[table_name].extend(attr_param)\n print(\"{} :\\t«{}» +{}\".format(i, table_name, attr_param)) # DEBUG\n\n\n elif keyword is self. target_keywords[3]: # primary foreign key\n # get related table\n attr_param = (re.sub(r'(pfk\\()|\\)|,|\\n', r' ', line_stripped).strip().split())\n tables[table_name].extend(attr_param)\n print(\"{} :\\t«{}» +{}\".format(i, table_name, attr_param)) # DEBUG\n\n else: # attribute\n # print(line_stripped) # DEBUG\n print(\"{} : \\t«{}» Attribute? {}\".format(i, line_stripped)) # DEBUG\n\n if skip:\n skipped_lines.append(i)\n\n print(\"\\nNumbers of tables : {}\\n\".format(len(tables)))\n pp = pprint.PrettyPrinter(indent=4, compact=True)\n print(\"Scraped data:\")\n pp.pprint(tables) # DEBUG\n print(\"\\nSkipped lines: {}\\n\".format(skipped_lines)) # DEBUG", "def process_data(data):\n # Table Name from Json file\n table_name = data['table_name']\n\n # No of Column\n column_count = data['column_count']\n\n # No of Row\n row_count = data['row_count']\n\n # Table columns schema from Json file\n column_properties = data['column_properties']\n\n # Get the row row_properties\n row_properties = data['row_properties']\n return table_name, column_count, row_count, column_properties, row_properties", "def read_tables(tb_path, tb_id, cdm_subset=None, delimiter='|',\n extension='psv', col_subset=None, log_level='INFO', na_values=[]):\n logger = logging_hdlr.init_logger(__name__, level=log_level)\n # Because how the printers are written, they modify the original data frame!,\n # also removing rows with empty observation_value in observation_tables\n if not os.path.isdir(tb_path):\n logger.error('Data path not found {}: '.format(tb_path))\n return\n\n # See if theres anything at all:\n files = glob.glob(os.path.join(tb_path, '*' + tb_id + '*.' + extension))\n if len(files) == 0:\n logger.error('No files found matching pattern {}'.format(tb_id))\n return\n\n # See if subset, if any of the tables is not as specs\n if cdm_subset:\n for tb in cdm_subset:\n if tb not in properties.cdm_tables:\n logger.error('Requested table {} not defined in CDM'.format(tb))\n return\n\n tables = properties.cdm_tables if not cdm_subset else cdm_subset\n file_patterns = {tb: os.path.join(tb_path, '-'.join([tb, tb_id]) + '.' + extension) for tb in tables}\n file_paths = {}\n for k, v in file_patterns.items():\n logger.info('Getting file path for pattern {}'.format(v))\n file_path = glob.glob(v)\n if len(file_path) == 1:\n file_paths[k] = file_path[0]\n elif len(file_path) > 1:\n logger.error(\n 'Pattern {0} resulted in multiple files for table {1}. '\n 'Cannot seccurely retrieve cdm table(s)'.format(tb_id, k))\n return\n\n if len(file_paths) == 0:\n logger.error(\n 'No cdm table files found for search patterns {0}: '.format(','.join(list(file_patterns.values()))))\n return\n\n usecols = None if len(tables) == 1 else {table: None for table in tables}\n if col_subset:\n if len(tables) == 1:\n if not isinstance(col_subset, list):\n logger.error('Column subset (col_subset) has to be declared as a list')\n return\n else:\n usecols = col_subset\n else:\n if not isinstance(col_subset, dict):\n logger.error(\n 'Column subset (col_subset) has to be declared as a dictionary '\n 'with a table:[columns] pair per table to subset')\n return\n else:\n usecols = {table: col_subset.get(table, None) for table in tables}\n\n logger.info('Reading into dataframe data files {}: '.format(','.join(list(file_paths.values()))))\n if len(tables) == 1:\n file_path = list(file_paths.values())[0]\n return pd.read_csv(file_path, delimiter=delimiter, usecols=usecols,\n dtype='object', na_values=na_values, keep_default_na=False)\n else:\n df_list = []\n for tb, tb_file in file_paths.items():\n dfi = pd.read_csv(tb_file, delimiter=delimiter,\n usecols=usecols.get(tb), dtype='object',\n na_values=na_values, keep_default_na=False)\n if len(dfi) > 0:\n dfi.set_index('report_id', inplace=True, drop=False)\n dfi.columns = pd.MultiIndex.from_product([[tb], dfi.columns])\n df_list.append(dfi)\n else:\n logger.warning('Table {} empty in file system, not added to the final DF'.format(tb))\n\n if len(df_list) > 0:\n merged = pd.concat(df_list, axis=1, join='outer')\n merged.reset_index(drop=True, inplace=True)\n return merged\n else:\n logger.error('All tables empty in file system')\n return", "def readTab(file_name):\n data = []\n meta = []\n l=0\n for line in open(file_name):\n if l<3:\n meta.append(line.strip(\"\\n\").split(\"\\t\"))\n else:\n if len(line.strip(\"\\n\").split(\"\\t\")) == len(meta[0]):\n data.append(line.strip(\"\\n\").split(\"\\t\"))\n l += 1\n return (meta, data)", "def _create_TableDescriptor(self):\n\n self.conn.cursor.execute(\"PRAGMA table_info(\" + self.table_name + \")\")\n descriptions = self.conn.cursor.fetchall()\n column_map = {}\n for description in descriptions:\n column_map[description[1]] = description[2]\n td = TD(self.table_name, column_map) \n\n# self.conn.cursor.execute(\"SELECT sql FROM sqlite_master WHERE name='{tb}'\"\\\n# .format(tb=self.table_name))\n# aa = str(self.conn.cursor.fetchone()[0])\n# sindx = aa.find(\"(\")\n# eindx = aa.find(\")\")\n# aa = aa[sindx+1:eindx]\n# aa = aa.split(\",\")\n# column_map = {kyval.split()[0]:kyval.split()[1] for kyval in aa}\n# td = TD(self.table_name, column_map) \n\n return td", "def read_specific_problem(filename):\r\n table = []\r\n with open(filename, newline='') as csvfile:\r\n reader = csv.reader(csvfile, skipinitialspace=True, delimiter=',')\r\n for row in reader:\r\n table.append(row)\r\n return table", "def data(self) -> Tuple[List[str], List[List[str]]]:\n format = self.format\n # Check if the file contains header information. Initialize the header\n # with the optional names of columns in the format descriptor.\n has_header = format.get('header', True)\n columns = format.get('columns')\n rows = list()\n # Delimiter depends on the file format.\n delim = '\\t' if format['type'] == 'tsv' else ','\n f = codecs.iterdecode(self.load().open(), 'utf-8')\n for row in csv.reader(f, delimiter=delim):\n if has_header:\n # Set the has_header flag to False so that all following records\n # are added to the list of rows.\n has_header = False\n columns = row if columns is None else columns\n else:\n rows.append(row)\n columns = [None] * len(rows[0]) if not columns and rows else columns\n return (columns, rows)", "def open_txt_table(txt_file, data_start_with=2,keys_line=0, types_line=1, split_char=\"\\t\"):\n file = open(txt_file,\"r\")\n i = 0;\n line = file.readline()\n keys = []\n types = []\n txt_table_rows = []\n while line != \"\":\n line = line.strip(\"\\n\")\n line = line.strip(\"\\r\")\n if(i >= data_start_with):\n values = line.split(split_char)\n n = len(values)\n values += [\" \" for x in range(len(keys) - n)]\n txt_table_rows.append(\n TxtTableRow(keys, values, types)\n )\n elif(i==keys_line):\n keys = line.split(split_char)\n elif(i == types_line):\n types = line.split(split_char)\n i += 1\n line = file.readline()\n\n file.close()\n return txt_table_rows", "def table_tags(filen):\n\n print(\"Scanning file \" + str(filen))\n rtable = re.compile(r\"\\$TABLE *:\")\n f = open(filen, \"r\")\n linecount = 0\n tablecount = 0\n tablelist = []\n for line in f:\n linecount = linecount + 1\n table = rtable.search(line)\n if table:\n tablecount = tablecount + 1\n print(str(linecount) + \": \" + str(line.rstrip(\"\\n\")))\n tablelist.append(line.rstrip(\"\\n\"))\n f.close()\n print(str(linecount) + \" lines and \" + str(tablecount) + \" tables\")\n return tablelist", "def testReadDataFile(self):\n try:\n blockNameList = []\n myReader = ParseCifSimple(self.__pathPdbxDataFile, False, 0, 255, \"?\", self.__logFileName)\n blockNameList = myReader.GetBlockNames(blockNameList)\n #\n for blockName in blockNameList:\n block = myReader.GetBlock(blockName)\n tableNameList = []\n tableNameList = block.GetTableNames(tableNameList)\n for tableName in tableNameList:\n table = block.GetTable(tableName)\n columnNameList = table.GetColumnNames()\n logger.debug(\"Table %s colunms %r\", tableName, columnNameList)\n numRows = table.GetNumRows()\n rowList = []\n for iRow in range(0, numRows):\n row = table.GetRow(iRow)\n rowList.append(row)\n logger.debug(\"table %s row length %d\", tableName, len(rowList))\n except Exception as e:\n logger.exception(\"Failing with %s\", str(e))\n self.fail()", "def read_dbf(dbf_fullpath):\n # import pysal\n # dbfin = pysal.open(dbf_fullpath)\n # vars = dbfin.header\n # data = dict([(var, dbfin.by_col(var)) for var in vars])\n\n # table_df = pandas.DataFrame(data)\n import simpledbf\n dbf = simpledbf.Dbf5(dbf_fullpath)\n table_df = dbf.to_dataframe()\n\n print \"Read %d lines from %s\" % (len(table_df), dbf_fullpath)\n\n return table_df", "def read_table(self, table_type):\n\n if table_type == 'hash':\n entry_class = MPQHashTableEntry\n elif table_type == 'block':\n entry_class = MPQBlockTableEntry\n else:\n raise ValueError(\"Invalid table type.\")\n\n table_offset = self.header['%s_table_offset' % table_type]\n table_entries = self.header['%s_table_entries' % table_type]\n key = self._hash('(%s table)' % table_type, 'TABLE')\n\n self.file.seek(table_offset + self.header['offset'])\n data = self.file.read(table_entries * 16)\n data = self._decrypt(data, key)\n\n def unpack_entry(position):\n entry_data = data[position*16:position*16+16]\n return entry_class._make(\n struct.unpack(entry_class.struct_format, entry_data))\n\n return [unpack_entry(i) for i in range(table_entries)]", "def parse_tables_xlsx(inp):\n # --------------------------------------------------------------------------\n # Start\n # --------------------------------------------------------------------------\n raw_read = pd.read_excel(inp,sheet_name = None)\n indx = get_tab_index(raw_read)\n # --------------------------------------------------------------------------\n # Get the individual tables from the file\n # --------------------------------------------------------------------------\n tabdict = {}\n for i in indx['tab'].to_list():\n tabdict[i] = get_table_df(raw_read[i])\n # --------------------------------------------------------------------------\n # Finish\n # --------------------------------------------------------------------------\n out = {}\n out['indx'] = indx\n out['tabs'] = tabdict\n return out", "def init_fields(self):\n result = self.connection.execute('pragma table_info(files)')\n rows = result.fetchall()\n self.fields = [Field(row) for row in rows[4:]]\n result = self.connection.execute('select _keyword from keywords')\n rows = result.fetchall()\n self.keywords = [row[0] for row in rows]", "def get_table_definition(jwt_payload: dict, schema_name: str, table_name: str):\n DJConnector.set_datajoint_config(jwt_payload)\n\n schema_virtual_module = dj.create_virtual_module(schema_name, schema_name)\n return getattr(schema_virtual_module, table_name).describe()", "def read(self):\n file_path = os.path.join(self.query_path, self.filename + '.sql')\n with open(file_path, 'r') as f:\n self.raw_sql = f.read()", "def load_file(self):\n\n self.df = self.sqlContext.read.csv(self.source, sep=self.sep, header=True, inferSchema=True)", "def description_text(self, P=None):\n\n if not P:\n P = self.parameters.values_to_dict()\n\n method = P[\"method\"]\n tablename = P[\"table name\"]\n lines = [self.header]\n lines.append(f\" {method} table '{tablename}'\")\n\n if method == \"Create\":\n table = {\"Column\": [], \"Type\": [], \"Default\": []}\n for d in self.parameters[\"columns\"].value:\n try:\n table[\"Column\"].append(self.get_value(d[\"name\"]))\n except Exception:\n table[\"Column\"].append(d[\"name\"])\n table[\"Type\"].append(d[\"type\"])\n if d[\"default\"] == \"\":\n table[\"Default\"].append(\"\")\n else:\n try:\n table[\"Default\"].append(self.get_value(d[\"default\"]))\n except Exception:\n table[\"Default\"].append(d[\"default\"])\n for tmp in tabulate(table, headers=\"keys\", tablefmt=\"grid\").splitlines():\n lines.append(8 * \" \" + tmp)\n elif method == \"Read\":\n filename = P[\"filename\"]\n file_type = P[\"file type\"]\n if file_type == \"from extension\":\n if isinstance(filename, str) and self.is_expr(filename):\n lines.append(\n f\" File: from variable '{filename}' with type from the \"\n \"extension\"\n )\n else:\n file_type = PurePath(filename).suffix\n if file_type not in self.parameters[\"file type\"].enumeration:\n types = \"', '\".join(self.parameters[\"file type\"].enumeration)\n raise RuntimeError(\n f\"Cannot handle files of type '{file_type}' when reading \"\n f\"table '{tablename}'.\\nKnown types: '{types}'\"\n )\n lines.append(\n f\" File: '{filename}' with type '{file_type}' from the \"\n \"extension.\"\n )\n else:\n lines.append(f\" File: '{filename}' with type '{file_type}'\")\n elif method == \"Save\":\n pass\n elif method == \"Save as\":\n filename = P[\"filename\"]\n file_type = P[\"file type\"]\n if file_type == \"from extension\":\n file_type = PurePath(filename).suffix\n if file_type not in self.parameters[\"file type\"].enumeration:\n types = \"', '\".join(self.parameters[\"file type\"].enumeration)\n raise RuntimeError(\n f\"Cannot handle files of type '{file_type}' when reading \"\n f\"table '{tablename}'.\\nKnown types: '{types}'\"\n )\n lines.append(\n f\" File: '{filename}' with type '{file_type}' from the \"\n \"extension.\"\n )\n else:\n lines.append(f\" File: '{filename}' with type '{file_type}'\")\n elif method == \"Print\":\n pass\n elif method == \"Print the current row of\":\n pass\n elif method == \"Append a row to\":\n table = {\"Column\": [], \"Value\": []}\n for d in self.parameters[\"columns\"].value:\n try:\n table[\"Column\"].append(self.get_value(d[\"name\"]))\n except Exception:\n table[\"Column\"].append(d[\"name\"])\n try:\n table[\"Value\"].append(self.get_value(d[\"value\"]))\n except Exception:\n table[\"Value\"].append(d[\"value\"])\n for tmp in tabulate(table, headers=\"keys\", tablefmt=\"grid\").splitlines():\n lines.append(8 * \" \" + tmp)\n elif method == \"Go to the next row of\":\n pass\n elif method == \"Add columns to\":\n table = {\"Column\": [], \"Type\": [], \"Default\": []}\n for d in self.parameters[\"columns\"].value:\n try:\n table[\"Column\"].append(self.get_value(d[\"name\"]))\n except Exception:\n table[\"Column\"].append(d[\"name\"])\n table[\"Type\"].append(d[\"type\"])\n if d[\"type\"] == \"boolean\":\n if d[\"default\"] == \"\":\n default = False\n else:\n default = bool(d[\"default\"])\n elif d[\"type\"] == \"integer\":\n if d[\"default\"] == \"\":\n default = 0\n else:\n default = int(d[\"default\"])\n elif d[\"type\"] == \"float\":\n if d[\"default\"] == \"\":\n default = np.nan\n else:\n default = float(d[\"default\"])\n elif d[\"type\"] == \"string\":\n default = d[\"default\"]\n table[\"Default\"].append(default)\n for tmp in tabulate(table, headers=\"keys\", tablefmt=\"grid\").splitlines():\n lines.append(8 * \" \" + tmp)\n elif method == \"Get element of\":\n if P[\"column\"] == \"\":\n raise RuntimeError(\"Table get element: the column must be given\")\n column = P[\"column\"]\n if P[\"row\"] == \"\":\n raise RuntimeError(\"Table get element: the row must be given\")\n row = P[\"row\"]\n lines.append(f\" row {row}, column {column}\")\n elif method == \"Set element of\":\n if P[\"column\"] == \"\":\n raise RuntimeError(\"Table set element: the column must be given\")\n column = P[\"column\"]\n if P[\"row\"] == \"\":\n raise RuntimeError(\"Table set element: the row must be given\")\n row = P[\"row\"]\n value = P[\"value\"]\n lines.append(f\" row {row}, column {column} = {value}\")\n else:\n methods = \", \".join(table_step.methods)\n raise RuntimeError(\n f\"The table method must be one of {methods}, not {method}.\"\n )\n\n return \"\\n\".join(lines)", "def read_table(self, table):\n return READ_TABLE(table, db=self.db)", "def read_rdata(rdata_fullpath, table_name):\n from rpy2.robjects import pandas2ri, r\n pandas2ri.activate()\n\n # we want forward slashes for R\n rdata_fullpath_forR = rdata_fullpath.replace(\"\\\\\", \"/\")\n print \"Loading %s\" % rdata_fullpath_forR\n \n # read in the data from the R session with python\n r['load'](rdata_fullpath_forR)\n # check that it's there\n table_df = pandas2ri.ri2py(r['model_summary'])\n\n # fillna\n for col in table_df.columns:\n nullcount = sum(pandas.isnull(table_df[col]))\n if nullcount > 0: print \" Found %5d NA values in column %s\" % (nullcount, col)\n table_df = table_df.fillna(0)\n for col in table_df.columns:\n nullcount = sum(pandas.isnull(table_df[col]))\n if nullcount > 0: print \" -> Found %5d NA values in column %s\" % (nullcount, col)\n \n print \"Read %d lines from %s\" % (len(table_df), rdata_fullpath)\n return table_df", "def get_table_from_ldac(filename, frame=1):\n from astropy.table import Table\n if frame>0:\n frame = frame*2\n tbl = Table.read(filename, hdu=frame)\n return tbl", "def get_mist_eep_table():\n fp = Path(DATA_PATH, \"mist_eep_table.csv\")\n return pd.read_csv(fp, comment=\"#\")", "def read_relations(db, openfile):\n pass", "def read_data(columns, types = {}, filename= \"data/wxobs20170821.txt\"):\n #Initialize my data variable\n data = {}\n for column in columns:\n data[column] = []\n\n with open(filename, \"r\") as datafile:\n # read first three line (header)\n for _ in range(3):\n #print(_)\n datafile.readline()\n\n\n # Read and parse the rest of the file\n for line in datafile:\n split_line = line.split()\n for column in columns:\n i = columns[column]\n t = types.get(column, str)\n value = t(split_line[i])\n data[column].append(value)\n\n return data", "def read_attrs(db_path, table=Annotation.__tablename__, index_col='TranscriptId'):\n engine = create_engine('sqlite:///{}'.format(db_path))\n return pd.read_sql_table(table, engine, index_col=index_col)", "def get_table_def(dict_in, db_in):\n meta = MetaData(db_in)\n \n val_mapping = {\n 'pressure': Integer,\n 'temperature': Float,\n 'humidity': Float,\n 'battery': Integer,\n 'colorTemperature': Integer,\n }\n \n val_type = val_mapping.get(dict_in['name'], String)\n \n\n table_def = Table(dict_in['name'], meta, \n Column('source', String),\n Column('name', String),\n Column('displayName', String),\n Column('value', String),\n Column('unit', String),\n Column('deviceId', Integer),\n Column('hubId', Integer),\n Column('locationId', Integer),\n Column('installedAppId', Integer),\n Column('descriptionText', String),\n Column('timestamp', DateTime),\n )\n return table_def", "def read_table(cls, filepath_or_buffer, *args, **vargs):\n if filepath_or_buffer.endswith('.csv') and 'sep' not in vargs:\n vargs['sep'] = ','\n df = pandas.read_table(filepath_or_buffer, *args, **vargs)\n labels = df.columns\n return Table([df[label].values for label in labels], labels)", "def _read_hdr_file(ktlx_file):\r\n with open(ktlx_file, 'rb') as f:\r\n\r\n hdr = {}\r\n assert f.tell() == 0\r\n\r\n hdr['file_guid'] = hexlify(f.read(16))\r\n hdr['file_schema'], = unpack('<H', f.read(2))\r\n if not hdr['file_schema'] in (1, 3, 7, 8, 9):\r\n raise NotImplementedError('Reading header not implemented for ' +\r\n 'file_schema ' + str(hdr['file_schema']))\r\n\r\n hdr['base_schema'], = unpack('<H', f.read(2))\r\n if not hdr['base_schema'] == 1: # p.3: base_schema 0 is rare, I think\r\n raise NotImplementedError('Reading header not implemented for ' +\r\n 'base_schema ' + str(hdr['base_schema']))\r\n\r\n hdr['creation_time'] = datetime.fromtimestamp(unpack('<i',\r\n f.read(4))[0])\r\n hdr['patient_id'], = unpack('<i', f.read(4))\r\n hdr['study_id'], = unpack('<i', f.read(4))\r\n hdr['pat_last_name'] = _make_str(unpack('c' * 80, f.read(80)))\r\n hdr['pat_first_name'] = _make_str(unpack('c' * 80, f.read(80)))\r\n hdr['pat_middle_name'] = _make_str(unpack('c' * 80, f.read(80)))\r\n hdr['patient_id'] = _make_str(unpack('c' * 80, f.read(80)))\r\n assert f.tell() == 352\r\n\r\n if hdr['file_schema'] >= 7:\r\n hdr['sample_freq'], = unpack('<d', f.read(8))\r\n n_chan, = unpack('<i', f.read(4))\r\n hdr['num_channels'] = n_chan\r\n hdr['deltabits'], = unpack('<i', f.read(4))\r\n hdr['phys_chan'] = unpack('<' + 'i' * hdr['num_channels'],\r\n f.read(hdr['num_channels'] * 4))\r\n\r\n f.seek(4464)\r\n hdr['headbox_type'] = unpack('<' + 'i' * 4, f.read(16))\r\n hdr['headbox_sn'] = unpack('<' + 'i' * 4, f.read(16))\r\n hdr['headbox_sw_version'] = _make_str(unpack('c' * 40, f.read(40)))\r\n hdr['dsp_hw_version'] = _make_str(unpack('c' * 10, f.read(10)))\r\n hdr['dsp_sw_version'] = _make_str(unpack('c' * 10, f.read(10)))\r\n hdr['discardbits'], = unpack('<i', f.read(4))\r\n\r\n if hdr['file_schema'] >= 8:\r\n hdr['shorted'] = unpack('<' + 'h' * 1024, f.read(2048))[:n_chan]\r\n hdr['frequency_factor'] = unpack('<' + 'h' * 1024,\r\n f.read(2048))[:n_chan]\r\n return hdr", "def read_field(file_name):\n\n f = open(file_name, 'r', encoding='utf-8', errors='ignore')\n data = dict()\n row = 1\n for i in f:\n n = 1\n i = i.strip('\\n')\n for symb in i:\n data[(row, n)] = symb\n n += 1\n row += 1\n return data", "def readRecordFromFile():\n\twith open(gbl.sourceFile, newline='') as csvfile:\n\t\trowReader = csv.reader(csvfile, delimiter=gbl.csvDiscriminator, quotechar=gbl.csvQuotechar)\n\t\tfor row in rowReader:\n\t\t\tROWData.append(row)", "def createTable(self):\n ## reading the source file\n\n \n ## building the hive script\n\n ## creating the metastore table by executing the Hive script on the remote machine (SSH)", "def readFrom(self,fn):\n hdrs = {}\n try:\n f = open(fn+\".headers\",\"tr\")\n for l in f:\n if l[-1:]==\"\\n\":\n l = l[:-1]\n i = l.find(\": \")\n if -1!=i:\n hdrs[l[:i]] = l[i+2:]\n f.close()\n except (Exception,Error) as err:\n log(\"readFrom: header: error: \"+str(err))\n try:\n f2 = open(fn,\"br\")\n data = f2.read()\n f2.close()\n except (Exception,Error) as err:\n log(\"readFrom: body: error: \"+str(err))\n return (hdrs,data)", "def get_readpattern_defs(self, filename=None):\n if filename is not None:\n return ascii.read(filename)\n\n tab = ascii.read(self.readpatt_def_file)\n return tab", "def _load_schema(self, mode=\"staging\"):\n\n self._check_mode(mode)\n\n json_path = self.table_folder / f\"schema-{mode}.json\"\n columns = self.table_config[\"columns\"]\n\n if mode == \"staging\":\n new_columns = []\n for c in columns:\n # case is_in_staging are None then must be True\n is_in_staging = (\n True if c.get(\"is_in_staging\") is None else c[\"is_in_staging\"]\n )\n # append columns declared in table_config.yaml to schema only if is_in_staging: True\n if is_in_staging and not c.get(\"is_partition\"):\n c[\"type\"] = \"STRING\"\n new_columns.append(c)\n\n del columns\n columns = new_columns\n\n elif mode == \"prod\":\n schema = self._get_table_obj(mode).schema\n\n # get field names for fields at schema and at table_config.yaml\n column_names = [c[\"name\"] for c in columns]\n schema_names = [s.name for s in schema]\n\n # check if there are mismatched fields\n not_in_columns = [name for name in schema_names if name not in column_names]\n not_in_schema = [name for name in column_names if name not in schema_names]\n\n # raise if field is not in table_config\n if not_in_columns:\n raise BaseDosDadosException(\n \"Column {error_columns} was not found in table_config.yaml. Are you sure that \"\n \"all your column names between table_config.yaml, publish.sql and \"\n \"{project_id}.{dataset_id}.{table_id} are the same?\".format(\n error_columns=not_in_columns,\n project_id=self.table_config[\"project_id_prod\"],\n dataset_id=self.table_config[\"dataset_id\"],\n table_id=self.table_config[\"table_id\"],\n )\n )\n\n # raise if field is not in schema\n if not_in_schema:\n raise BaseDosDadosException(\n \"Column {error_columns} was not found in publish.sql. Are you sure that \"\n \"all your column names between table_config.yaml, publish.sql and \"\n \"{project_id}.{dataset_id}.{table_id} are the same?\".format(\n error_columns=not_in_schema,\n project_id=self.table_config[\"project_id_prod\"],\n dataset_id=self.table_config[\"dataset_id\"],\n table_id=self.table_config[\"table_id\"],\n )\n )\n\n # if field is in schema, get field_type and field_mode\n for c in columns:\n for s in schema:\n if c[\"name\"] == s.name:\n c[\"type\"] = s.field_type\n c[\"mode\"] = s.mode\n break\n ## force utf-8, write schema_{mode}.json\n json.dump(columns, (json_path).open(\"w\", encoding=\"utf-8\"))\n\n # load new created schema\n return self.client[f\"bigquery_{mode}\"].schema_from_json(str(json_path))", "def parseIntoDB(self, filehandle, cursor, alignTab, sequenceTab=None,\n update=None):\n c = filehandle.tell()\n filehandle.seek(0, 2)\n filesize = filehandle.tell()\n filehandle.seek(c)\n l = filehandle.readline()\n rc = 0\n count = 0\n if l.split()[0] != '##maf':\n return\n else:\n self.setpar(l.split()[1:])\n\n l=filehandle.readline()\n while l:\n la = l.split()\n## print la\n if(len(la)==0 or la[0]=='#'):\n## print \"skipping\"\n 1\n elif(la[0]=='a'):\n## print \"reading alignment\"\n count+=1\n self.readalign(la[1:], filehandle)\n self._dump(alignTab, sequenceTab)\n if(update and not count % 1000):\n cursor.execute(update % (int(filehandle.tell() * 100.\n / filesize)))\n else:\n## print \"end of records\"\n return\n l=filehandle.readline()", "def read_file(self):\n colspecs = [[0, 7]] # for the id\n names = ['id']\n for question in self.question_list:\n colspecs.extend(question.get_column_range())\n names.extend(question.get_column_names())\n\n self.data = pd.read_fwf(self.file, colspecs=colspecs, encoding=self.encoding, names=names, header=None)\n self.data.fillna(0, inplace=True)\n self.data = self.data.astype(int)\n return self.data", "def get_table_definition(db_name, schema_name, table_name, server_name, data_partition_column_name='', excluded_columns=()):\n server_name = '' if server_name == '127.0.0.1' or server_name == 'localhost' else server_name\n server_name = f'[{server_name}].' if server_name else ''\n\n sql = (\"SELECT T.name AS TABLE_NAME, C.name AS COLUMN_NAME, P.name AS DATA_TYPE, \"\n \"P.max_length AS SIZE, CAST(P.precision AS VARCHAR) + '/' + CAST(P.scale AS VARCHAR) AS PRECISION_SCALE, \"\n \"c.* FROM {0}[{1}].sys.objects AS T JOIN {0}[{1}].sys.columns AS C ON T.object_id = C.object_id \"\n \"JOIN {0}[{1}].sys.types AS P ON C.system_type_id = P.system_type_id \"\n \"JOIN sys.schemas ss ON (T.schema_id = ss.schema_id) \"\n \" WHERE T.type_desc = 'USER_TABLE' and ss.name = ? \"\n \"and T.name = ? and P.name != 'timestamp' and P.name != 'sysname' order by column_id asc\").format(server_name, db_name)\n\n columns = fetch_rows(sql, [schema_name, table_name])\n\n target_table_column_prefix = get_config()['TARGET_TABLE_COLUMN_PREFIX']\n out_columns = {}\n\n for column in columns:\n column['original_data_type'] = column['data_type']\n\n if column['column_name'].upper() in default_columns:\n column['target_table_column_name'] = target_table_column_prefix + column['column_name']\n else:\n column['target_table_column_name'] = column['column_name']\n\n # Update the data type for the data partition column\n if data_partition_column_name != '' and column['column_name'].upper() == data_partition_column_name.upper():\n column['data_type'] = 'datetime'\n\n out_columns[column['column_name'].upper()] = column\n\n if len(excluded_columns) > 0:\n for excluded_column in excluded_columns:\n out_columns.pop(excluded_column)\n\n return out_columns", "def parse(self):\n if self.filename.endswith('.gz'):\n compression = 'gzip'\n elif self.filename.endswith('.bz2'):\n compression = 'bz2'\n else:\n compression = None\n df = pd.read_table(self.filename, compression=compression)\n\n # drop empty column from extra tab\n df.dropna(axis=1, how='all', inplace=True)\n return df", "def info_table(table):\n print \"\\nSCHEMA de la taula \",table, \"es: \"\n con=lite.connect('parking.db')\n cur=con.cursor()\n cur.execute(\"PRAGMA table_info({});\".format(table))\n data = cur.fetchall()\n for d in data:\n print \"\\t\",d[0], d[1], d[2]\n con.close()", "def read_database(self):\n # open the database\n f = open('KISS_LINES','r')\n # make a list which will contain lines\n tlc = []\n for row in f:\n tlc.append(f.readline())\n f.close()\n\n return tlc", "def find_table_command(input_file):\n contents = open(input_file, 'r')\n file = \"\".join(contents)\n table_title = find_table(input_file)[1]\n table_command =[]\n for title in table_title:\n stString = 'CREATE TABLE '+str(title)+ ' ('\n start = file.find(stString)\n end = file.find(');', start)\n word_list = file[start+len(stString):end].split(',')\n table_command.append(stString+ ','.join(word_list)+');')\n\n return table_command", "def get_table_info(create_table_sql: str):\n table_info = {'columns': [], 'priority': 'rowid'}\n bits = create_table_sql.split()\n lowbits = create_table_sql.lower().split()\n rowid = True\n for n, word in enumerate(lowbits):\n if word == 'table':\n table_info['table_name'] = bits[n + 1]\n if word.startswith('('):\n table_info['columns'].append(bits[n].lstrip('('))\n if word.endswith(','):\n table_info['columns'].append(bits[n + 1])\n if word == 'unique':\n table_info['priority'] = table_info['columns'][-1]\n if word == 'without' and lowbits[n] == 'rowid':\n rowid = False\n if rowid:\n table_info['columns'].insert(0, 'rowid')\n return table_info", "def __read_all_tables( self, dbfile, iteration=2000 ): #limit=None ):\n conn = sql3.connect( dbfile )\n tnames = pd.read_sql(\"SELECT name FROM sqlite_master WHERE type='table' ORDER BY name\", conn)\n tables = {}\n for tname in tnames.name.values:\n #print tname\n tmp = pd.read_sql( 'select * from %s limit 3' % tname, conn )\n if tname != 'motif_infos' and 'iteration' in tmp.columns.values.tolist():\n query = 'select * from %s where iteration=' + str(iteration)\n else:\n query = 'select * from %s'\n table = pd.read_sql(query % tname, conn)\n if tname == 'motif_infos':\n table = table[ table.iteration == iteration ]\n tables[ tname ] = table\n\n conn.close()\n table = tables[ 'meme_motif_sites' ]\n table = table.ix[ np.in1d( table.motif_info_id, tables[ 'motif_infos' ].index.values ) ]\n tables[ 'meme_motif_sites' ] = table\n return tables", "def import_tables(file, pages):\n tables = camelot.read_pdf(\n file, pages=pages,\n flavor='stream',\n )\n return tables", "def makeTableFromFile(ffile,name=None,nameInFile='No',\\\n columnsNames=None,columnsNamesInFile='Yes',\n columnsUnits=None,columnsUnitsInFile='No'):\n from exceptions import IOError\n t = None\n try:\n file = open(ffile, 'r')\n except :\n msg=\"can't open file <%s>...\\n\"%ffile\n raise IOError(msg)\n if name and nameInFile.lower()=='yes':\n raise Warning(\"You give a table name and ask to get name from file\") \n if columnsNames and columnsNameInFile.lower()=='yes':\n raise Warning(\"You ask for colum names in the file while already defining them\")\n if columnsUnits and columnsUnitsInFile.lower()=='yes':\n raise Warning(\"You ask for units in the file while already defining them\")\n # table creation\n if name:\n verifyType(name,StringType)\n t=DataTable(name)\n pass\n else:\n t=DataTable('table')\n pass\n # affect columns names if necessary\n if columnsNames:\n t.setColumnNames(columnsNames)\n pass\n # affect columns units if necessary\n if columnsUnits:\n t.setColumnUnits(columnsUnits)\n pass\n t.readFromFile( ffile,\n columnsNames=columnsNamesInFile,name=nameInFile,\n colonnesnames=columnsNames,columnsUnits=columnsUnitsInFile)\n\n return t", "def load_biom_table(table_f):\n return parse_biom_table(table_f)", "def read(cls, filename, hdu=\"PSF_2D_TABLE\"):\n filename = str(make_path(filename))\n table = Table.read(filename, hdu=hdu)\n return cls.from_table(table)", "def getTableHeader(self, filename):\n hdr = \"\"\n with open(filename, \"r\") as f:\n for line in f:\n if line[0] == \">\":\n hdr += line\n else:\n return hdr", "def reader(fname, sd):\n with open(fname, 'rb') as f:\n rdr = csv.reader(f)\n hdr = None\n for l in rdr:\n # header has not been found\n if not hdr:\n # for each field defined in the semantic dictionary,\n # search for one of the aliases to be present in the line\n x = {k: _find_alias(l,sd[k]) for k in sd}\n # have we found a header? essentially: have we found a\n # match for one of the aliases of each mandatory field?\n if all([x[k] is not None for k in x if k[1]]):\n hdr = x\n continue\n # header has been found\n else:\n # check of one or more mandatory columns are missing?\n if any([_silent_get(l,hdr[k]) is '' for k in hdr if k[1]]):\n continue\n # yields a dictionary with field identifier as keys\n yield {k: l[hdr[k]] for k in hdr if hdr[k] is not None}", "def describe_table_details(cur, pattern, verbose):\n # This is a simple \\d command. No table name to follow.\n if not pattern:\n sql = \"\"\"SELECT table_schema AS Schema,\n table_name AS Name,\n 'table' AS Kind, owner_name AS Owner\n FROM v_catalog.tables\n ORDER BY 1, 2\"\"\"\n\n log.debug(sql)\n cur.execute(sql)\n if cur.description:\n headers = [x[0] for x in cur.description]\n return [(None, cur, headers, None, False)]\n\n # This is a \\d <tablename> command. A royal pain in the ass.\n schema, relname = sql_name_pattern(pattern)\n where = []\n if schema:\n where.append(\"c.table_schema ~ '%s'\" % schema)\n if relname:\n where.append(\"c.table_name ~ '%s'\" % relname)\n\n sql = \"\"\"SELECT c.table_schema AS Schema,\n c.table_name AS Table,\n c.column_name AS Column,\n c.data_type AS Type,\n c.data_type_length AS Size,\n c.column_default AS Default,\n NOT c.is_nullable AS 'Not Null',\n p.constraint_id IS NOT NULL AS 'Primary Key'\n FROM v_catalog.columns c\n LEFT JOIN v_catalog.primary_keys p\n USING (table_schema, table_name, column_name)\"\"\"\n if where:\n sql += 'WHERE ' + ' AND '.join(where)\n sql += ' ORDER BY 1, 2, c.ordinal_position'\n\n # Execute the sql, get the results and call describe_one_table_details on each table.\n\n log.debug(sql)\n cur.execute(sql)\n\n headers = [x[0] for x in cur.description]\n return [(None, cur, headers, None, False)]", "def loadTextEx(self, dbPath:str, tableName:str, partitionColumns:Optional[List[str]]=None, remoteFilePath:str=None, delimiter:str=\",\") -> Type[\"Table\"]:\n if partitionColumns is None:\n partitionColumns = []\n isDBPath = True\n if \"/\" in dbPath or \"\\\\\" in dbPath or \"dfs://\" in dbPath:\n dbstr ='db=database(\"' + dbPath + '\")'\n self.run(dbstr)\n tbl_str = '{tableNameNEW} = loadTextEx(db, \"{tableName}\", {partitionColumns}, \"{remoteFilePath}\", {delimiter})'\n else:\n isDBPath = False\n tbl_str = '{tableNameNEW} = loadTextEx('+dbPath+', \"{tableName}\", {partitionColumns}, \"{remoteFilePath}\", {delimiter})'\n fmtDict = dict()\n fmtDict['tableNameNEW'] = _generate_tablename()\n fmtDict['tableName'] = tableName\n fmtDict['partitionColumns'] = str(partitionColumns)\n fmtDict['remoteFilePath'] = remoteFilePath if remoteFilePath is not None else \"\"\n fmtDict['delimiter'] = delimiter\n # tbl_str = tableName+'=loadTextEx(db,\"' + tableName + '\",'+ str(partitionColumns) +',\"'+ remoteFilePath+\"\\\",'\"+delimiter+\"')\"\n tbl_str = re.sub(' +', ' ', tbl_str.format(**fmtDict).strip())\n self.run(tbl_str)\n if isDBPath:\n return Table(data=fmtDict['tableName'] , dbPath=dbPath, s=self)\n else:\n return Table(data=fmtDict['tableNameNEW'], s=self)", "def _read_sample_data(self):\n def _read_tab(pth):\n \"\"\"\n Internal read table function\n\n :param str pth: absolute path to the file to read\n :return pandas.DataFrame: table object\n \"\"\"\n if not os.path.exists(pth):\n raise SampleTableFileException(\n \"File does not exist: {}\".format(pth))\n read_csv_kwargs = {\"engine\": \"python\", \"dtype\": str,\n \"index_col\": False, \"keep_default_na\": False,\n \"na_values\": [\"\"]}\n return pd.read_csv(pth, sep=infer_delimiter(pth), **read_csv_kwargs)\n\n no_metadata_msg = \"No {} specified\"\n if CONFIG_KEY not in self:\n _LOGGER.warning(\"No config key in Project\")\n return\n if CFG_SAMPLE_TABLE_KEY not in self[CONFIG_KEY]:\n _LOGGER.debug(\"no {} found\".format(CFG_SAMPLE_TABLE_KEY))\n return\n st = self[CONFIG_KEY][CFG_SAMPLE_TABLE_KEY]\n if st:\n self[SAMPLE_DF_KEY] = _read_tab(st)\n else:\n _LOGGER.warning(no_metadata_msg.format(CFG_SAMPLE_TABLE_KEY))\n self[SAMPLE_DF_KEY] = None\n if CFG_SUBSAMPLE_TABLE_KEY in self[CONFIG_KEY]:\n if self[CONFIG_KEY][CFG_SUBSAMPLE_TABLE_KEY] is not None:\n sst = make_list(self[CONFIG_KEY][CFG_SUBSAMPLE_TABLE_KEY], str)\n self[SUBSAMPLE_DF_KEY] = [_read_tab(x) for x in sst]\n else:\n _LOGGER.debug(no_metadata_msg.format(CFG_SUBSAMPLE_TABLE_KEY))\n self[SUBSAMPLE_DF_KEY] = None", "def vocab_tables(source_file, tags_file):\n pass", "def _guess_columns(\n self, file, format, columns_name=None, filter_case=None, header=True, encoding=\"utf-8\"):\n f = TextFile(file, fLOG=self.LOG, encoding=encoding)\n f.open()\n\n if header:\n _aa, _bb, _cc, _dd = f.guess_columns(fields=columns_name)\n reg_exp = re.compile(DatabaseCore2._split_expr.replace(\n \"\\\\t\", _cc.replace(\"|\", \"[|]\")))\n else:\n # tabulation by default\n reg_exp = re.compile(DatabaseCore2._split_expr)\n f.close()\n raise NoHeaderException(\"a header is expected for that function\")\n\n self.LOG(\" [_guess_columns] sep={0}\".format([_cc]))\n\n lines = []\n for line in f:\n if len(lines) > 1000:\n break\n if len(lines) > 900 and random.randint(0, 10) > 0:\n continue\n lines.append(reg_exp.split(\n line.strip(\" \\r\\n\").strip('\\ufeff')))\n f.close()\n\n if len(lines) <= 1:\n raise Exception(\"file %s is empty\" % file)\n\n exp = re.compile(\"\\\\W+\")\n columns = {}\n done = {}\n count = {}\n changes = {}\n\n for i in range(0, len(lines[0])):\n if lines[0][i] in [\n '\\ufeffID', '\\ufeffid', '\\ufeffqid', '\\ufeffQID']:\n lines[0][i] = \"qid\"\n\n if columns_name is None:\n name = lines[0][i].replace(\":\", \"_\")\n origin = lines[0][i]\n else:\n name = columns_name[i].replace(\":\", \"_\")\n origin = columns_name[i]\n\n name = name.replace(\"-\", \"_\").replace(\" \", \"_\")\n\n spl = exp.split(name)\n if len(spl) > 1:\n name = \"\".join(spl)\n if name[0] in \"0123456789\":\n name = \"_\" + name\n\n if name in count:\n count[name] += 1\n name += str(count[name])\n else:\n count[name] = 1\n\n #lines [0][i] = name\n columns[i] = (name, int)\n done[i] = False\n\n if origin != name:\n changes[origin] = name\n\n self.LOG(\" [_guess_columns] columns_name={0}\".format(columns_name))\n\n length = {}\n nbline = 0\n count_types = {}\n\n for line_ in lines[1:]:\n if filter_case is None:\n line = line_\n else:\n line = [filter_case(s) for s in line_]\n nbline += 1\n if line == [] or line == ['']:\n continue\n\n for i in range(0, len(line)):\n\n if i >= len(done):\n # it is probably a wrong line\n continue\n\n vl = length.get(i, 0)\n if len(line[i]) > vl:\n length[i] = len(line[i])\n\n try:\n if done[i]:\n continue\n except KeyError as e:\n str_columns = \"\"\n for k, v in columns.items():\n str_columns += \" \" + \\\n str(k) + \"\\t\" + str(v) + \"\\n\"\n mes = \"KeyError:\" + str(e) + \"\\n\" + str(done) + \"\\n\" + str_columns + \"\\nnb line \" + str(\n nbline) + \" columns: \" + str(len(line)) + \"\\n\" + str(line)\n raise RuntimeError( # pylint: disable=W0707\n \"problem\\n\" +\n mes +\n \"\\n\\ncount_types:\\n \" +\n \"\\n \".join(\n \"{0}:{1}\".format(\n k,\n v) for k,\n v in sorted(\n count_types.items())))\n\n if line[i] is None or len(line[i]) == 0:\n continue\n\n try:\n x = int(line[i])\n if abs(x) >= 2147483647:\n raise ValueError(\"too big int\")\n\n if i not in count_types:\n count_types[i] = {int: 1}\n else:\n count_types[i][int] = count_types[i].get(int, 0) + 1\n\n except ValueError:\n try:\n x = float(line[i])\n\n if i not in count_types:\n count_types[i] = {float: 1}\n else:\n count_types[i][float] = count_types[\n i].get(float, 0) + 1\n\n if columns[i][1] != float:\n columns[i] = (columns[i][0], float)\n\n except ValueError:\n columns[i] = (\n columns[i][0], (str, max(\n 1, len(\n line[i])) * 2))\n\n if i not in count_types:\n count_types[i] = {str: 1}\n else:\n count_types[i][str] = count_types[\n i].get(str, 0) + 1\n\n self.LOG(\" guess with \", len(lines), \"lines\")\n self.LOG(\" count_types \", count_types)\n for i in range(0, len(columns)):\n\n # if i is not in count_types, it means the first rows do now\n # contain values for these columns (only null values)\n t = count_types.get(i, {str: 1})\n nb = sum(t.values())\n\n th = 0.0 if nb < 50 else (\n 0.01 if nb < 100 else 0.02) # we authorize 2% of wrong types\n\n n = t.get(int, 0)\n if n * 1.0 / nb >= 1 - th:\n ty = int\n else:\n n += t.get(float, 0)\n if n * 1.0 / nb >= 1 - th:\n ty = float\n else:\n ty = str\n\n columns[i] = (columns[i][0], ty)\n\n self.LOG(\" columns \", columns)\n\n # if not done, choose str by default\n for c in columns:\n v = columns[c]\n if v[1] == str:\n columns[c] = (v[0], (str, max(1, length.get(c, 4)) * 2))\n\n for c, v in columns.items():\n t = v[1]\n if isinstance(t, tuple) and t[0] == str and t[1] == 0:\n raise Exception(\n \"the length is null for column %s - %s\" %\n (c, str(v)))\n\n self.LOG(\" guess\", columns)\n return columns, changes" ]
[ "0.6431829", "0.6404322", "0.6258304", "0.61863047", "0.6153", "0.61336774", "0.61302614", "0.6072644", "0.6022581", "0.60137993", "0.5962784", "0.59596694", "0.5934838", "0.5925993", "0.5887302", "0.58700204", "0.5861452", "0.5848875", "0.584234", "0.5839805", "0.5791649", "0.5790137", "0.5740772", "0.5740407", "0.5735958", "0.57332504", "0.57271755", "0.5707696", "0.5694898", "0.56925917", "0.56921273", "0.5690772", "0.5686438", "0.56776875", "0.56698334", "0.56423336", "0.56325257", "0.5626392", "0.5618616", "0.56163305", "0.56066114", "0.56013805", "0.5596794", "0.55952483", "0.55822164", "0.5582118", "0.5573883", "0.5564573", "0.5561324", "0.55554646", "0.555062", "0.55407965", "0.55276644", "0.55274016", "0.55176824", "0.5513981", "0.5504746", "0.55009025", "0.549691", "0.5491746", "0.5478819", "0.54664063", "0.5457413", "0.54492974", "0.54369545", "0.5432973", "0.5427396", "0.5421097", "0.5419651", "0.54171836", "0.54129547", "0.5398064", "0.5397828", "0.53972", "0.5389869", "0.5386193", "0.53834945", "0.5369939", "0.53621995", "0.536072", "0.5354375", "0.5349196", "0.53445005", "0.5336952", "0.53317845", "0.5329967", "0.53184175", "0.5318118", "0.53137726", "0.5310731", "0.53057456", "0.5303802", "0.5301131", "0.5299283", "0.5298991", "0.52931696", "0.5287846", "0.528778", "0.5276838", "0.52662647" ]
0.58154535
20
Check whether a line contains legal select/project/join (SPJ) predicate
def is_legal_prdicate(predicate): is_legal = 0 res = '' regex = re.compile('^.*\ (like|\=|\<\=|\>\=|\<|\>|in|\ between\ )\ .*$') if regex.search(predicate) is not None: predicate = predicate.split('\t') line = [x for x in predicate if x != ''][0] if '\n' in line: line = line[:-1] # remove logic ops (and/when/or) treg = re.compile('^and |^when |^or ') if treg.search(line): line = line[treg.search(line).end():] # remove nested rreg = re.compile('\($') # remove alias areg = re.compile('^.*(l1|l2|l3|n1|n2|n3).*$') if rreg.search(line) is None and areg.search(line) is None: is_legal = 1 res = line return is_legal,res
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def is_candidate(line):\n line = line.lower()\n line = prepare_text_line(line)\n return (has_content(line) and any(s in line for s in copyrights_hint.statement_markers))", "def check_record(idline,nclline,sepline,qualiline):\n return check_idline(idline) and check_sepline(sepline)", "def line_is_valid(line):\n if '-' in map(lambda item: item.strip(), line.strip().split(\";\")):\n return False\n else:\n return True", "def is_skippable(line: str) -> bool:\n return len(line) == 0 or line[0] == ';'", "def does_line_contains_import_pdb(line: str) -> bool:\n return IMPORT_PDB_LINE.strip().split() == line.split()", "def match(self, context, line):\n\t\t\n\t\treturn line.kind == 'code' and line.partitioned[0] in self._both", "def _select_simple_chainparts(chain_parts):\n\n for cp in chain_parts:\n if reject_substr_res.search(cp['chainPartName']):\n return False\n\n return True", "def issafe(arg):\n return arg.find(';') == -1 and arg.find('|') == -1", "def intersects(self, line):\n\t\treturn self.intersection(lint) != None", "def consequence_filter(line, index, consequence_list):\n consequence = re.split(r'\\t+', line.rstrip('\\t'))[index]\n if not any(variant_type in consequence for variant_type in\n consequence_list):\n return True\n else:\n return False", "def filter_line(line:str) -> bool:\n fails = is_short_sentence(line, MIN_LINE_LENGTH)\n\n return not fails", "def is_requirement(line):\n # Remove whitespace at the start/end of the line\n line = line.strip()\n\n # Skip blank lines, comments, and editable installs\n return not (\n line == '' or\n line.startswith('-r') or\n line.startswith('#') or\n line.startswith('-e') or\n line.startswith('git+')\n )", "def is_requirement(line):\n # Remove whitespace at the start/end of the line\n line = line.strip()\n\n # Skip blank lines, comments, and editable installs\n return not (\n line == '' or\n line.startswith('-r') or\n line.startswith('#') or\n line.startswith('-e') or\n line.startswith('git+')\n)", "def hacking_has_license(physical_line, filename, lines, line_number):\n # don't work about init files for now\n # TODO(sdague): enforce license in init file if it's not empty of content\n license_found = False\n\n # skip files that are < 10 lines, which isn't enough for a license to fit\n # this allows us to handle empty files, as well as not fail on the Okay\n # doctests.\n if _project_is_apache() and not line_number > 1 and len(lines) > 10:\n for idx, line in enumerate(lines):\n # if it's more than 10 characters in, it's probably not in the\n # header\n if 0 < line.find('Licensed under the Apache License') < 10:\n license_found = True\n if not license_found:\n return (0, \"H102: Apache 2.0 license header not found\")", "def check_splice_command(self, line):\n # Define some useful _vars for later\n is_cond_var = lambda x: any(x.strip() == j for j in 'xyz') or type_check.is_num(x)\n is_cond = lambda x: any(x.strip() == j for j in '< <= > >= =='.strip())\n\n syntax = \"splice <var name> (x-max > x > x-min y-max > y > y-min z-max > z > z-min ) as <new var name> \\n\\n[All conditional statements are optional e.g. you can just use x-min if that's all that you need.]\"\n bad_syntax_msg = \"Incorrect syntax: To splice data use the following syntax...\\n `%s`\" % syntax\n self.E_str = \"check_splice_line\"\n if '(' not in line or ')' not in line:\n self.print_error(bad_syntax_msg, errorFunc=SyntaxError)\n\n # Separate the conditional statement and the rest of the syntax\n conditions, rest_of_text = gen_parse.get_str_between_delims(line, \"(\", \")\")\n words = rest_of_text.split()\n if len(words) != 5:\n self.print_error(bad_syntax_msg, errorFunc=SyntaxError)\n\n # Check for the variables\n if words[1] not in self.variables:\n self.print_error(f\"Can't find variable: {words[1]}\", errorFunc=NameError)\n\n # Check the conditional statements\n cond_words = conditions.split()\n if len(cond_words) < 2:\n pass\n else:\n if all(idim not in conditions for idim in 'xyz'):\n self.print_error(\"%s\\n\\nYou're missing either an 'x', 'y' and/or 'z'.\" % bad_syntax_msg, errorFunc=SyntaxError)\n if all(j not in conditions for j in '<>'):\n self.print_error(\"%s\\n\\n You're missing either any comparators.\" % bad_syntax_msg, errorFunc=SyntaxError)\n\n errs = []\n\n # Check the first entry\n _curr, _next = cond_words[0], cond_words[1]\n msg = \"%s\\n\\nCheck statement %s %s\" % (bad_syntax_msg, _curr, _next)\n if is_cond(_curr): errs.append(msg)\n elif not (is_cond(_next)): errs.append(msg)\n\n # Check middle entries\n for i in range(1, len(cond_words) - 1):\n _prev, _curr, _next = cond_words[i-1], cond_words[i], cond_words[i+1]\n\n msg = \"%s\\n\\nCheck statement %s %s %s\" % (bad_syntax_msg, _prev, _curr, _next)\n if is_cond(_curr):\n if not (is_cond_var(_prev) or is_cond_var(_next)): errs.append(msg)\n else:\n if not (is_cond(_prev) or is_cond(_next)): errs.append(msg)\n\n # Check the last entry\n _curr, _prev = cond_words[-1], cond_words[-2]\n msg = \"%s\\n\\nCheck statement %s %s\" % (bad_syntax_msg, _prev, _curr)\n if is_cond(_curr): errs.append(msg)\n elif not (is_cond(_prev)): errs.append(msg)\n\n if errs:\n errs = \"\\n\\n______________________\\n\\n\\n\".join(errs)\n self.print_error(errs, errorFunc=SyntaxError)", "def is_printing(line):\r\n return line.startswith('G1 ') and 'X' in line and 'Y' in line and 'E' in line", "def parse_or_reject_line(line):\n try:\n return convert_to_parkme_format(line)\n except RateCardParsingException:\n return False", "def crit(p):\n return any([\n is_proj(p),\n is_uuid(p),\n is_sqlite(p),\n contains_uuid_folder(p),\n contains_proj_file(p),\n contains_sqlite(p)\n ])", "def line_valid(line: str) -> bool:\n\n return line != ' ' and line != ''", "def check_for_extra_semicolon(sql_str):\r\n try:\r\n if len(sql_str.split(';')) > 2:\r\n raise sqlErr(\"Extra Semi-Colon Detected!\")\r\n except Exception as e:\r\n raise e", "def keep_rule(row):\n return row.license in [\n \"green-oa\",\n \"cc-by\",\n \"cc-by-nc\",\n \"cc0\",\n \"gold-oa\",\n \"medrxiv\",\n \"biorxiv\",\n \"arxiv\",\n ]", "def __contains__(self, line):\n # First check for an entry in _lines.\n specific = self._lines.get(line)\n if specific is not None:\n return specific\n # Find the position in _ranges for line. The polarity of this position\n # determines whether we are inside a range (odd) or outside (even).\n pos = bisect.bisect(self._transitions, line)\n return (pos % 2) == 1", "def can_fix_intersection(self, segment):\n\n points = segment.points\n points = [points[1], points[2], points[3], points[2], points[1], points[0]]\n path = create_path(points)\n layer = GSLayer()\n layer.paths.append(path)\n\n if layer.paths[0].insertNodeWithPathTime_(2.5) is None:\n return False\n for segment in layer.paths[0].segments[:-1]:\n # We need to check only curve segments which consist of four points.\n if len(segment.points) == 4:\n s_t = self.triangle_error_of(segment.points, do_round=True)\n if s_t is not None:\n points = points2vectors(segment.points)\n ok = False\n for s, t in self.calculate_s_t_candidates(points, s_t):\n if self.try_update_points(points, s, t) is not None:\n ok = True\n break\n if not ok:\n return False\n return True", "def _is_clustal_seq_line(line):\n return line and (not line[0].isspace()) and\\\n (not line.startswith('CLUSTAL')) and (not line.startswith('MUSCLE'))", "def is_constraint(self, line):\n constraints = ['PRIMARY', 'KEY', 'UNIQUE', 'CONSTRAINT']\n for constraint in constraints:\n if line.startswith(constraint): return True\n return False", "def test_line(line):\r\n if not line.strip():\r\n return False # if the last line is blank\r\n if line.startswith(\"#\"):\r\n return False # comment line\r\n if line.startswith(\" #\"):\r\n return False # comment line\r\n return line", "def _is_option(line):\n return '=' in line", "def process_line(self, line):\n ltype = self.line_type(line)\n if ltype == 'gene':\n self.process_gene_line(line)\n return True\n elif ltype == 'mRNA':\n self.process_mrna_line(line)\n return True\n elif ltype == 'CDS':\n self.process_cds_line(line)\n return True\n elif ltype == 'exon':\n self.process_exon_line(line)\n return True\n elif ltype == 'start_codon' or ltype == 'stop_codon':\n self.process_other_feature_line(line)\n return True\n else:\n self.skipped_features += 1\n return False", "def __input_data_ok(self, line=None):\n # valid pattern: 1407478022|www.facebook.com\n valid_pattern = re.compile(\"\\w{10}\\|\\w+\")\n if (line) and (re.match(valid_pattern, line)):\n return True\n else:\n return False", "def line_part_of_commit(file, line, commit):\n if line == '0': return False\n\n line_val = git(\"blame\", \"-l\", \"-L{0},{0}\".format(line), file)\n return line_val.split(\" \", 1)[0] == commit", "def isline(l):\n return isinstance(l,list) and len(l) == 2 \\\n and ispoint(l[0]) and ispoint(l[1])", "def check(self): # full program\n r = re.compile('(?!(^(((?!;)[A-Z][+-]?\\d+(\\.\\d+)?\\s?)*(\\s*;\\s.*)?)$))')\n for line in self.blocks:\n if r.match(line) and line and line != '\\r' and line != '\\n':\n return False\n return True", "def is_syntax_test_line(self, pos, must_contain_assertion):\n\n details = self.get_details_of_test_assertion_line(pos)\n if details.comment_marker_match:\n return not must_contain_assertion or details.assertion_colrange is not None\n return False", "def no_or_clauses (self,phrase):\r\n \r\n for x in phrase:\r\n if isinstance(x,list) and x[0] == '@':\r\n return False\r\n return True", "def is_ok_line(line):\n card1 = line[0]\n card2 = line[1]\n card3 = line[2]\n\n if not is_coupled(card1.east, card2.west):\n return False\n if not is_coupled(card2.east, card3.west):\n return False\n return True", "def is_ok_three_lines(line1, line2, line3):\n card1 = line1[0]\n card2 = line1[1]\n card3 = line1[2]\n card4 = line2[0]\n card5 = line2[1]\n card6 = line2[2]\n\n card7 = line3[0]\n card8 = line3[1]\n card9 = line3[2]\n idents1 = [card.ident for card in line1]\n idents2 = [card.ident for card in line2]\n idents3 = [card.ident for card in line3]\n\n intersection = list(set(idents1) & set(idents2))\n if intersection:\n dprint(\"intersection 12\")\n return False\n\n intersection = list(set(idents1) & set(idents3))\n if intersection:\n return False\n\n intersection = list(set(idents2) & set(idents3))\n if intersection:\n return False\n\n print(\"??????????????\")\n show_triple(line1, line2, line3)\n print(\"??????????????\")\n\n if not is_ok_two_lines(line1, line2):\n return False\n if not is_ok_two_lines(line2, line3):\n return False\n\n return True", "def transcript_filter(line, transcript_list):\n if any(transcript in line for transcript in transcript_list):\n return True\n else:\n return False", "def validate(self, s):\n if len(s) == 0:\n return False\n if s in self.whitelist:\n return True\n if s in self.blacklist:\n return False\n\n # SQL Types are rarely used\n if 't' in s and 'f(t' not in s and 'At' not in s:\n return False\n\n if '1nf' in s:\n return False\n if 's1o' in s:\n return False\n if 'oo' in s:\n return False\n if 'v,s' in s:\n return False\n if 's,v' in s:\n return False\n if 'v,v' in s:\n return False\n if 'v,1' in s:\n return False\n if 'v,n' in s:\n return False\n if 'n,v' in s:\n return False\n if '1,v' in s:\n return False\n if 'Eo(' in s:\n return False\n if '(o(' in s:\n return False\n if '(o1' in s:\n return False\n if '(on' in s:\n return False\n if '(os' in s:\n return False\n if '(of' in s:\n return False\n if '(ov' in s:\n return False\n if 'B(n)' in s:\n return False\n if 'oso' in s:\n return False\n if 'o1o' in s:\n return False\n if 'ono' in s:\n return False\n\n # only 1 special case for this\n # 1;foo:goto foo\n # 1;n:k\n # the 'foo' can only be a 'n' type\n if ':' in s and not 'n:' in s:\n return False\n\n if '11' in s:\n return False\n\n if '))' in s:\n return False\n if '((' in s:\n return False\n if 'v1' in s:\n return False\n\n if 'nv' in s and ';T' not in s:\n return False\n if 'nn' in s and ';T' not in s:\n return False\n\n # select @version foo is legit\n # but unlikely anywhere else\n if 'vn' in s and 'Evn' not in s:\n return False\n\n if 'oE' in s:\n return False\n\n if 'A1' in s:\n return False\n if 'An' in s:\n return False\n if 'A(1' in s:\n return False\n\n if 'vov' in s:\n return False\n if 'vo1' in s:\n return False\n if 'von' in s:\n return False\n\n if 'ns' in s:\n if 'U' in s:\n return True\n if 'T' in s:\n return True\n return False\n\n if 'sn' in s:\n # that is... Tsn is ok\n if s.find('T') != -1 and s.find('T') < s.find('sn'):\n return True\n return False\n\n # select foo (as) bar is only nn type i know\n if 'nn' in s and 'Enn' not in s and ';T' not in s:\n return False\n\n if ',o' in s:\n return False\n\n if 'kk' in s and 'Tkk' not in s:\n return False\n\n if 'ss' in s:\n return False\n\n if 'ff' in s:\n return False\n\n if '1no' in s:\n return False\n\n if 'kno' in s:\n return False\n\n if 'nEk' in s:\n return False\n\n if 'n(n' in s:\n return False\n if '1so' in s:\n return False\n if '1s1' in s:\n return False\n if 'noo' in s:\n return False\n if 'ooo' in s:\n return False\n\n if 'vvv' in s:\n return False\n\n if '1vn' in s:\n return False\n if '1n1' in s:\n return False\n if '&1n' in s:\n return False\n if '&1v' in s:\n return False\n if '&1s' in s:\n return False\n if 'nnk' in s:\n return False\n if 'n1f' in s:\n return False\n # folded away\n if s.startswith('('):\n return False\n\n if '&o' in s:\n return False\n\n if '1,1' in s:\n return False\n if '1,s' in s:\n return False\n if '1,n' in s:\n return False\n if 's,1' in s:\n return False\n if 's,s' in s:\n return False\n if 's,n' in s:\n return False\n if 'n,1' in s:\n return False\n if 'n,s' in s:\n return False\n if 'n,n' in s:\n return False\n if '1o1' in s:\n return False\n if '1on' in s:\n return False\n if 'no1' in s:\n return False\n if 'non' in s:\n return False\n if '1(v' in s:\n return False\n if '1(n' in s:\n return False\n if '1(s' in s:\n return False\n if '1(1' in s:\n return False\n if 's(s' in s:\n return False\n if 's(n' in s:\n return False\n if 's(1' in s:\n return False\n if 's(v' in s:\n return False\n if 'v(s' in s:\n return False\n if 'v(n' in s:\n return False\n if 'v(1' in s:\n return False\n if 'v(v' in s:\n return False\n\n if s.startswith('n('):\n return False\n\n if s.startswith('vs'):\n return False\n\n if s.startswith('o'):\n return False\n\n if ')(' in s:\n return False\n\n # need to investigate T(vv) to see\n # if it's correct\n if 'vv' in s and s != 'T(vv)':\n return False\n\n # unlikely to be sqli but case FP\n if s in ('so1n)', 'sonoE'):\n return False\n\n return True", "def isThereApartToIgnore(self,line):\n good_line = ''\n curr_line = line\n # there are 3 options: or the first of the next line is a comment, or a qoute, or a //. each time we will check\n # what is first\n global multi_comment_line_mode\n bad_line = line.find(\"//\")\n bad_part_start = line.find(\"/*\")\n if (bad_line == -1 and bad_part_start == -1 and not multi_comment_line_mode):\n # if there is no problem\n return line\n while curr_line != '':\n bad_line = curr_line.find(\"//\")\n curr_lenght_line = len(curr_line)\n bad_part_start = curr_line.find(\"/*\")\n qoutes_start = curr_line.find('\"')\n # handling the case in which bad part is first\n if bad_line==-1 and bad_part_start==-1 and qoutes_start==-1:\n good_line += ' ' + curr_line\n return good_line\n if (bad_line!=-1 and bad_part_start!= -1 and qoutes_start!=-1 and\n bad_part_start == min(bad_part_start,bad_line,qoutes_start) or (bad_part_start!=-1 and bad_line==-1\n and qoutes_start == -1) or (bad_part_start!=-1 and bad_line==-1 and qoutes_start!=-1\n and bad_part_start < qoutes_start )or\n (bad_part_start!=-1 and bad_line!=-1 and qoutes_start==-1 and\n bad_part_start < bad_line )):\n curr_bad = curr_line[bad_part_start:]\n bad_part_end = curr_bad.find(\"*/\")\n good_line += ' ' +curr_line[:bad_part_start]# adding this part to good line\n if bad_part_end != -1:\n # good_line += curr_line[:bad_part_start]\n if bad_part_start + bad_part_end + 2 == curr_lenght_line - 1:\n break\n curr_line = curr_line[bad_part_start + bad_part_end + 2:]\n continue\n else:\n # in this case there are more lines which are bad\n # global multi_comment_line_mode\n multi_comment_line_mode = True\n return good_line\n # hadling the case in which bad line is first\n elif ((bad_line!=-1 and bad_part_start!= -1 and qoutes_start!=-1 and\n bad_line == min(bad_part_start,bad_line,qoutes_start))or\n (qoutes_start == -1 and bad_line !=-1 and bad_part_start == -1) or (qoutes_start!=-1 and bad_line!=-1\n and bad_line<qoutes_start ) or (bad_line!=-1 and bad_part_start!=-1 and qoutes_start ==-1\n and bad_line<bad_part_start)):\n curr_line = curr_line[:bad_line]\n continue\n # handling the case in which quates the first\n if(bad_line!=-1 and bad_part_start!= -1 and qoutes_start!=-1 and\n qoutes_start == min(bad_part_start,bad_line,qoutes_start) or\n (qoutes_start != -1 and bad_line ==-1 and bad_part_start==-1) or\n (qoutes_start != -1 and bad_line !=-1 and bad_part_start==-1 and qoutes_start<bad_line) or\n (qoutes_start != -1 and bad_part_start !=-1 and bad_line==-1 and qoutes_start<bad_part_start)):\n end_qoutes = curr_line[qoutes_start+1:].find('\"')\n good_line+=' '+curr_line[:qoutes_start]+curr_line[qoutes_start:end_qoutes+qoutes_start+2]\n curr_line = curr_line[end_qoutes+qoutes_start+2:]\n continue\n # need???\n elif ((qoutes_start!=-1 and bad_part_start!=-1 and qoutes_start > bad_part_start) or\n (qoutes_start==-1 and bad_part_start!=-1)):\n curr_bad = curr_line[bad_part_start:]\n bad_part_end = curr_bad.find(\"*/\")\n if bad_part_end != -1:\n good_line += ' '+curr_line[:bad_part_start] # adding this part to good line\n if bad_part_start+bad_part_end+2 == curr_lenght_line-1:\n break\n curr_line = curr_line[bad_part_start+bad_part_end+2:]\n else:\n # in this case there are more lines which are bad\n multi_comment_line_mode = True\n return good_line\n else:\n good_line+=' '+ curr_line\n break\n return good_line", "def is_select_external(self) -> bool:\n external_starts = (\n 'select_one_external ',\n 'select_multiple_external ',\n )\n row_type = self.get_type()\n return any(row_type.startswith(item) for item in external_starts)", "def _check_semicolon(line_index, input_line):\n global _total_lines_of_code\n if input_line.endswith(';'):\n _code_lines.append(line_index)\n _total_lines_of_code += 1", "def select_data(lines):\r\n # Statement is an inner or outer join if it does not end in ';'\r\n if '*' in lines and lines[-1] != ';':\r\n try:\r\n # read until ';' is seen\r\n res = [lines]\r\n while lines[-1] != ';':\r\n lines = input().rstrip()\r\n res.append(lines)\r\n # Outer Join\r\n if \"outer join\" in res[1]:\r\n file_one = res[1].split()[1]\r\n file_two = res[1].split()[6]\r\n # Names of attributes to compare\r\n if '.' in res[2]:\r\n seperate = res[2].split()\r\n for i in range(len(seperate)):\r\n if '.' in seperate[i]:\r\n val = seperate[i]\r\n val = val[val.find('.'):]\r\n val = val.replace(\".\", '')\r\n val = val.replace(\";\", '')\r\n seperate[i] = val\r\n res[2] = ' '.join(seperate)\r\n attribute_one = res[2].split()[1]\r\n attribute_two = res[2].split()[3]\r\n outer_join(file_one.upper(), file_two.upper(), attribute_one, attribute_two, res)\r\n # Inner join covers the other case\r\n else:\r\n if \"inner join\" in res[1]:\r\n file_one = res[1].split()[1]\r\n file_two = res[1].split()[5]\r\n else:\r\n file_one = res[1].split()[1]\r\n file_two = res[1].split()[3]\r\n # Names of attributes to compare\r\n if '.' in res[2]:\r\n seperate = res[2].split()\r\n for i in range(len(seperate)):\r\n if '.' in seperate[i]:\r\n val = seperate[i]\r\n val = val[val.find('.'):]\r\n val = val.replace(\".\", '')\r\n val = val.replace(\";\", '')\r\n seperate[i] = val\r\n res[2] = ' '.join(seperate)\r\n attribute_one = res[2].split()[1]\r\n attribute_two = res[2].split()[3]\r\n inner_join(file_one.upper(), file_two.upper(), attribute_one, attribute_two, res)\r\n except:\r\n print(\"Error\")\r\n # Selecting all from database\r\n elif '*' in lines:\r\n try:\r\n read_file(lines)\r\n except:\r\n print(\"Failed to query table \" + lines.split()[3][:-1] + \" because it does not exist\")\r\n # Query is not a join or a select all\r\n else:\r\n try:\r\n res = [lines]\r\n # Construct the select statement until a ';' is seen\r\n while lines[-1] != ';':\r\n lines = input().rstrip()\r\n res.append(lines)\r\n file_name = res[1].split()[1].upper()\r\n attributes, indexes = get_attributes(file_name.upper(), res)\r\n check_pos = get_index(file_name.upper(), res)\r\n select_tuple(file_name, res, check_pos, attributes, indexes)\r\n except:\r\n print(\"Error\")", "def test_no_greplist_raises(self):\n line_matches_greps(self.line)", "def check_line(self, line):\n return int(line) in self.__bus_dict", "def checkLine(line: str):\n\n key_words = ['src', 'href', 'url']\n out = list()\n for word in key_words:\n if line.__contains__(word):\n out.append((True, word))\n\n # Check if output list is not empty\n if len(out) == 0:\n # If list is empty return None\n return None\n else:\n return out", "def validate_csv_seq(sequence):\n if sequence.find(',') != -1 or sequence.find(';') != -1:\n return True\n else:\n return False", "def par_checker(s):\n lparens = \"([{\" # otevírací závorky\n rparens = \")]}\" # uzavírací závorky (ve stejném pořadí)\n stack = Stack()\n for c in s:\n if c in lparens:\n stack.push(c)\n for i in range(len(rparens)):\n if c == rparens[i]:\n if stack.is_empty() or stack.pop() != lparens[i]: # líné vyhodnocení\n return False\n return stack.is_empty()", "def hacking_has_correct_license(physical_line, filename, lines, line_number):\n # don't work about init files for now\n # skip files that are < 10 lines, which isn't enough for a license to fit\n # this allows us to handle empty files, as well as not fail on the Okay\n # doctests.\n if _project_is_apache() and not line_number > 1 and len(lines) > 10:\n for idx, line in enumerate(lines):\n # if it's more than 10 characters in, it's probably not in the\n # header\n if (0 < line.find('Licensed under the Apache License') < 10\n and not _check_for_exact_apache(idx, lines)):\n return (idx, \"H103: Header does not match Apache 2.0 \"\n \"License notice\")", "def _run_parse_checks(cls, line, filepath, logger=None):\n\n check_funcs = [\n cls.check_column_count,\n cls.check_date_column,\n cls.check_amount_column,\n ]\n checks = [partial(check, line) for check in check_funcs]\n is_parsable = all((check() for check in checks)) # NB short circuit\n logger = logger or logging.getLogger(cls.__name__)\n logger.debug(\"can %s parse this file? %s, %s\" %\n (cls.__name__, \"true\" if is_parsable else \"false\", filepath))\n return is_parsable", "def isbatch(line):\n return line and (\n line.strip()[:3] == \"BHS\"\n or (line.count(\"MSH\") > 1 and line.strip()[:3] != \"FHS\")\n )", "def is_math_line(line):\n if '=' in line:\n # Check it isn't some other command\n for cmd in CMD_LIST:\n if re.findall(f\"^{cmd} \", line):\n return False\n\n str_txt, non_str = gen_parse.get_str_between_delims(line, '\"')\n if any(j in non_str for j in '<>-+/*^'):\n return True\n return False", "def is_input(line):\n #tex_input_re = r\"\"\"^\\s*\\\\input{[^}]*}\"\"\" # input only\n tex_input_re = r\"\"\"(^[^\\%]*\\\\input{[^}]*})|(^[^\\%]*\\\\include{[^}]*})\"\"\" # input or include\n return re.search(tex_input_re, line)", "def check_file(fname):\n\n with open(fname, 'r') as f:\n data = f.readlines()\n for d in data:\n d = d.split()\n if any(s in d for s in dist_solvers):\n return True\n return False", "def areParallel(self, line):\n # A vector perpendicular to line1\n perpVect = np.array([-self.vector[c.Y], self.vector[c.X]])\n # Farin-Hansford eq 3.14\n cosTheda = (np.dot(perpVect, line.vector)/\n (np.linalg.norm(perpVect)*np.linalg.norm(line.vector)))\n # if cosTheda is < c.EPSILON then the lines are parallel and we return True\n return abs(cosTheda) < c.EPSILON", "def is_var_line(line):\n if '=' in line:\n # Check it isn't some other command\n for cmd in CMD_LIST:\n if re.findall(f\"^{cmd} \", line):\n return False\n\n str_txt, non_str = gen_parse.get_str_between_delims(line, '\"')\n if any(j in non_str for j in '<>-+/*^'):\n return False\n return True\n else:\n return False", "def is_all_rights_reserved(line):\n line = prepare_text_line(line)\n # remove any non-character\n line = re.sub(r'\\W+', '', line)\n line = line.strip()\n line = line.lower()\n return line.endswith(('rightreserved', 'rightsreserved'))", "def condition_segment(segment):\n # 1. If the start and end points are the same, done and one\n if segment[0][0] == segment[-1][0] and segment[0][1] == segment[-1][1]:\n if len(segment) == 2:\n LOG.warning(\" REJECTING two point segment, both equal\")\n return None\n return [segment]\n # 2. If point start and end points are inside the conus and they are closer\n # to each other than the CONUS bounds, then close off polygon\n if all(not point_outside_conus(Point(segment[i])) for i in [0, -1]):\n pt0 = Point(segment[0])\n pt1 = Point(segment[-1])\n cpt0 = get_conus_point(pt0)\n cpt1 = get_conus_point(pt1)\n cdist0 = cpt0.distance(pt0)\n cdist1 = cpt1.distance(pt1)\n if pt0.distance(pt1) < 0.5 * min([cdist0, cdist1]):\n LOG.warning(\" non-closed polygon assumed unclosed in error.\")\n segment.append(segment[0])\n return [segment]\n # 3. If the line intersects the CONUS 3+ times, split the line\n ls = ensure_outside_conus(LineString(segment))\n # Examine how our linestring intersects the CONUS polygon\n res = ls.intersection(CONUS[\"poly\"])\n if isinstance(res, LineString):\n return [ls.coords]\n # We got multiple linestrings\n # pylint: disable=no-member\n res = [r for r in res.geoms if r.length > 0.2]\n if len(res) == 1:\n LOG.warning(\" was able to filter out very short lines\")\n return [ensure_outside_conus(res[0]).coords]\n LOG.warning(\" returning a MultiLineString len=%s\", len(res))\n return [ensure_outside_conus(x).coords for x in res]", "def is_target(self, line: str) -> bool:\n # Remove possible leading spaces\n return line.lstrip().startswith('<text comment_id=\"0\"')", "def check(self, line):\n if not isinstance(line, str):\n raise TypeError(\"Parameter 'line' not a 'string', is {0}\".format(type(line)))\n if line in self.contents:\n return line\n return False", "def _LineContainsRelevantDisableComment(line: str, removal_type: str) -> bool:\n if FINDER_DISABLE_COMMENT_GENERAL in line:\n return True\n if FINDER_DISABLE_COMMENT_BASE + removal_type in line:\n return True\n return False", "def test_sqpp_long_or_chain(self):\n self.assertEqual(self.parser.parse_query('p0 or p1 or p2 or p3 or p4'),\n ['+', 'p0', '|', 'p1', '|', 'p2', '|', 'p3', '|', 'p4'])", "def contains(self, p):\n p = base.getvector(p)\n if len(p) == 2:\n p = np.r_[p, 1]\n return base.iszero(self.line * p)", "def is_commended_function(line: str) -> bool:\n return line.strip()[0] == '#'", "def encontrarparejas(primeraeleccion,segundaeleccion):\r\n x=ubicacion_cuadrado(primeraeleccion)\r\n y=ubicacion_cuadrado(segundaeleccion)\r\n if (x==0 and y==1 or x==1 and y==0)\\\r\n or (x == 2 and y == 3 or x == 3 and y == 2) \\\r\n or (x == 4 and y == 5 or x == 5 and y == 4)\\\r\n or (x == 6 and y == 7 or x == 7 and y == 6)\\\r\n or (x == 8 and y == 9 or x == 9 and y == 8) \\\r\n or (x==10 and y==11 or x==11 and y==10)\\\r\n or (x==12 and y==13 or x==13 and y==12)\\\r\n or (x==14 and y==15 or x==15 and y==14):\r\n return True", "def check_quadline(self, row: int, col: int, drow: int, dcol: int) -> bool:\n count = 1\n token = self.get_token(row, col)\n count_token = 1\n while self.check_bounds(row+drow, col+dcol) and count <= 3:\n if self.grid[row+drow][col+dcol] == token:\n row += drow\n col += dcol\n count_token += 1\n if count_token == 4:\n return True\n count += 1\n return False", "def _filter(self, entry):\n host = entry.get('@source_host', '')\n\n # errors will most likely come from job-s1\n if not is_from_production_host(host):\n return False\n\n return True", "def test_spires_syntax_detected_invenio(self):\n # trac #261\n converter = search_engine_query_parser.SpiresToInvenioSyntaxConverter()\n inv_search = converter.is_applicable(\"t:p a:c\")\n self.assertEqual(inv_search, False)", "def _validate_select_where(self):", "def CheckLine(rules, line):\n found_item = EXTRACT_INCLUDE_PATH.match(line)\n if not found_item:\n return False, None # Not a match\n\n include_path = found_item.group(1)\n\n # Fix up backslashes in case somebody accidentally used them.\n include_path.replace(\"\\\\\", \"/\")\n\n if include_path.find(\"/\") < 0:\n # Don't fail when no directory is specified. We may want to be more\n # strict about this in the future.\n if VERBOSE:\n print \" WARNING: directory specified with no path: \" + include_path\n return True, None\n\n (allowed, why_failed) = rules.DirAllowed(include_path)\n if not allowed:\n if VERBOSE:\n retval = \"\\nFor \" + rules.__str__()\n else:\n retval = \"\"\n return True, retval + ('Illegal include: \"%s\"\\n Because of %s' %\n (include_path, why_failed))\n\n return True, None", "def _line_fits_pattern(self, logline):\n for (fieldname, pattern) in self._excludepatterns:\n try:\n m = pattern.search(str(logline.__dict__[fieldname]))\n except AttributeError:\n warn(\"Exclude patterns must be tuples of a field name and a compiled regex.\")\n warn(\"The object that you provided as a regex seems not to have a 'search' method\")\n exit(-1)\n except KeyError:\n warn(\"You tried to filter for a field that doesn't exist\")\n m = False\n if m:\n return False\n if len(self._includepatterns) == 0:\n return True # no includepatterns means 'accept everything'\n for (fieldname, pattern) in self._includepatterns:\n try:\n m = pattern.search(str(logline.__dict__[fieldname]))\n except AttributeError:\n warn(\"Exclude patterns must be tuples of a field name and a compiled regex.\")\n warn(\"The object that you provided as a regex seems not to have a 'search' method\")\n exit(-1)\n except KeyError:\n warn(\"You tried to filter for a field that doesn't exist\")\n m = False\n if m:\n return True\n return False", "def is_line_on_multiline(feature_1: Sequence, feature_2: Sequence) -> bool:\n return any(is_line_on_line(feature_1, coords_2) for coords_2 in feature_2)", "def is_valid_part1(line):\n char_min, char_max, required_char, password = parse_line(line)\n char_count = password.count(required_char)\n if (char_min <= char_count <= char_max):\n return True\n return False", "def check_duplicate(triple: str, result: List[str]) -> bool:\n fields = triple.strip().split(', ')\n assert len(fields) == 13\n assert fields[9] == 'BERT'\n psuedo_triple = fields[:11]\n psuedo_triple[9] = 'RELEVANCE'\n return ', '.join(psuedo_triple) in result", "def is_used_as_statement(item):\n # this is what we need above our head to be in this category\n layering = [\n ANY_CONSTRUCT,\n syntax.PROGRAM\n ]\n\n return filter_by_layering(item, layering)", "def line_exists():\n global _current_line\n return _current_line is not None", "def isfixline(number):\n if number[0] == '(':\n return True\n return False", "def in_line(pi, pj, pk):\n # compute cross product\n dxc = pk.x - pi.x;\n dyc = pk.y - pi.y;\n\n dxl = pj.x - pi.x;\n dyl = pj.y - pi.y;\n\n cross = dxc * dyl - dyc * dxl;\n\n return True if cross == 0 else False", "def test_combine_multiple_or(self):\n inv_search = 'author:\"ellis, j*\" and (title:report or keyword:\"cross section\")'\n spi_search = 'find a j ellis and (t report or k \"cross section\")'\n self._compare_searches(inv_search, spi_search)", "def validate_line(self, line):\n splitline = line.split('\\t')\n if len(splitline) is not 9:\n return []\n if not \"ID\" in splitline[8]:\n return []\n if not int(splitline[3]) <= int(splitline[4]):\n return []\n # Everything except genes must have parent id\n if not \"Parent\" in splitline[8] and not splitline[2] == \"gene\":\n return []\n return splitline", "def should_split_line(line: Line, opening_bracket: Leaf) -> bool:\n\n if not (opening_bracket.parent and opening_bracket.value in \"[{(\"):\n return False\n\n # We're essentially checking if the body is delimited by commas and there's more\n # than one of them (we're excluding the trailing comma and if the delimiter priority\n # is still commas, that means there's more).\n exclude = set()\n trailing_comma = False\n try:\n last_leaf = line.leaves[-1]\n if last_leaf.type == token.COMMA:\n trailing_comma = True\n exclude.add(id(last_leaf))\n max_priority = line.bracket_tracker.max_delimiter_priority(exclude=exclude)\n except (IndexError, ValueError):\n return False\n\n return max_priority == COMMA_PRIORITY and (\n (line.mode.magic_trailing_comma and trailing_comma)\n # always explode imports\n or opening_bracket.parent.type in {syms.atom, syms.import_from}\n )", "def check_stmt(stmt, cmds, exclude=False, exception=None):\r\n if exclude:\r\n return all(cmd not in stmt for cmd in cmds)\r\n if exception:\r\n return any(\r\n cmd in stmt and set(stmt.split()).isdisjoint(set(exception)) for cmd in cmds\r\n )\r\n return any(cmd in stmt for cmd in cmds)", "def is_connected(self, line):\n return any(ext in (line.start, line.end) \\\n for ext in (self.start, self.end))", "def check_line(self):\n if not self.hosts and not self.line:\n self.msg(\"There is no line here. You can create one with +line/createline.\")\n return\n return True", "def isfile(line):\n return line and (line.strip()[:3] == \"FHS\" or isbatch(line))", "def filter(self, line):\n\t\t\n\t\tif line.content.squid_action == 'DIRECT':\n\t\t\treturn True\n\t\telse:\n\t\t\treturn False", "def validate_semicolon(s):\n positions = identify_create_table_view(s)\n validation = {\n \"exit_code\": 0,\n \"total_lines\": count_lines(s)\n }\n if len(positions) > 1:\n validation[\"exit_code\"] = 1\n validation[\"val_lines\"] = positions\n return validation", "def is_one_line_function_declaration_line(line: str) -> bool: # pylint:disable=invalid-name\n return 'def ' in line and '(' in line and '):' in line or ') ->' in line", "def test_spires_keyword_distribution_with_parens(self):\n spi_search = \"find cn d0 and (a abachi or abbott or abazov)\"\n inv_search = \"collaboration:d0 and (author:abachi or author:abbott or author:abazov)\"\n self._compare_searches(inv_search, spi_search)", "def _is_start(self, line):\n if re.match(\".*\\:\\s*\\(groupid\", line):\n return True", "def is_sale(this_line):\n has_price = re.search(r'[0-9]+\\.[0-9]{2}', str(this_line))\n is_not_succinct = (len(this_line) > 3)\n \n return bool(has_price and is_not_succinct)", "def check_if_plist(in_path):\n is_plist = False\n with open(in_path) as fp:\n try:\n for i, line in enumerate(fp):\n if i == 1:\n # print line\n if line.find(\"PLIST 1.0\") != -1:\n is_plist = True\n elif i > 2:\n break\n except UnicodeDecodeError:\n pass\n return is_plist", "def test_spires_syntax_detected_naked_author_leading_spaces(self):\n converter = search_engine_query_parser.SpiresToInvenioSyntaxConverter()\n spi_search = converter.is_applicable(\" author ellis\")\n self.assertEqual(spi_search, True)", "def _check_opt_starts_with(self, docstring: PetscDocStringImpl, item: tuple[SourceRange, DescribableItem, int], entity_name: str, char: str) -> None:\n loc, descr_item, _ = item\n pre = descr_item.prefix\n if pre != char:\n eloc = docstring.make_source_range(pre, descr_item.text, loc.start.line)\n mess = f'{entity_name} parameter list entry must start with \\'{char}\\''\n docstring.add_diagnostic_from_source_range(\n Diagnostic.Kind.ERROR, self.diags.prefix, mess, eloc, patch=Patch(eloc, char)\n )\n return", "def is_void(line):\n\n for i in line:\n if i!=' ' and i!='\\t' and i!='\\n':\n return False\n return True", "def check_hgvs(self):\n import re\n check = 0\n for row_index, row in self.snp_df.iterrows():\n if row['hgvs'] is not None:\n if not re.match(\"c(.*)\", str(row['hgvs'])):\n check += 1\n print \"Error: invalid HGVS nomenclature, see row\", row_index+4 # prints row in excel doc\n return check", "def skip_line(line):\n return IGNORE_LINES.search(line) is not None", "def test_spires_syntax_detected_f(self):\n # trac #261\n converter = search_engine_query_parser.SpiresToInvenioSyntaxConverter()\n spi_search = converter.is_applicable(\"f t p\")\n self.assertEqual(spi_search, True)", "def _test_line(\n self, line, manager_data=None\n ): # pylint: disable=too-many-branches # pragma: no cover\n\n if PyFunceble.CONFIGURATION[\"db_type\"] == \"json\" and manager_data is not None:\n autocontinue = AutoContinue(self.file, parent_process=False)\n inactive_db = InactiveDB(self.file)\n mining = Mining(self.file)\n else:\n # We use the previously initiated autocontinue instance.\n autocontinue = self.autocontinue\n\n # We use the previously initiated inactive database instance.\n inactive_db = self.inactive_db\n\n # We use the previously initiated mining instance.\n mining = self.mining\n\n # We remove cariage from the given line.\n line = line.strip()\n\n if not line or line[0] == \"#\":\n # We line is a comment line.\n\n # We return None, there is nothing to test.\n return None\n\n if Regex(line, self.regex_ignore, escape=False, return_data=False).match():\n # The line match our list of elemenet\n # to ignore.\n\n # We return None, there is nothing to test.\n return None\n\n # We format the line, it's the last\n # rush before starting to filter and test.\n subject = self._format_line(line)\n\n if (\n not PyFunceble.CONFIGURATION[\"local\"]\n and PyFunceble.Check(subject).is_reserved_ipv4()\n ):\n # * We are not testing for local components.\n # and\n # * The subject is a reserved IPv4.\n\n # We return None, there is nothing to test.\n return None\n\n if PyFunceble.CONFIGURATION[\"filter\"]:\n # We have to filter.\n\n if Regex(\n subject, PyFunceble.CONFIGURATION[\"filter\"], return_data=False\n ).match():\n # The line match the given filter.\n\n # We get the status of the current line.\n status = self.__process_test(subject)\n else:\n # The line does not match the given filter.\n\n # We return None.\n return None\n else:\n # We do not have to filter.\n\n # We get the status of the current line.\n status = self.__process_test(subject)\n\n # We add the line into the auto continue database.\n autocontinue.add(subject, status)\n\n if status.lower() in self.list_of_up_statuses:\n # The status is in the list of UP status.\n\n # We mine if necessary.\n mining.mine(subject, self.file_type)\n\n if subject in inactive_db:\n # The subject is in the inactive database.\n\n # We generate the suspicous file.\n Generate(\n subject, \"file_domain\", PyFunceble.STATUS[\"official\"][\"up\"]\n ).analytic_file(\"suspicious\")\n\n # And we remove the current subject from\n # the inactive database.\n inactive_db.remove(subject)\n else:\n # The status is not in the list of UP status.\n\n # We add the current subject into the\n # inactive database.\n inactive_db.add(subject, status)\n\n if (\n self.complements_test_started\n and PyFunceble.CONFIGURATION[\"db_type\"] == \"json\"\n ):\n # We started the test of the complements.\n\n if \"complements\" in autocontinue.database:\n # The complement index is present.\n\n while subject in autocontinue.database[\"complements\"]:\n # We loop untill the line is not present into the\n # database.\n\n # We remove the currently tested element.\n autocontinue.database[\"complements\"].remove(subject)\n\n # We save the current state.\n autocontinue.save()\n\n if manager_data is None:\n # We are not in a multiprocess environment.\n\n # We update the counters\n autocontinue.update_counters()\n\n # We process the autosaving if it is necessary.\n self.autosave.process(test_completed=False)\n elif PyFunceble.CONFIGURATION[\"db_type\"] == \"json\":\n # We are in a multiprocess environment.\n\n # We save everything we initiated into the server process\n manager_data.append(\n {\n \"autocontinue\": autocontinue.database,\n \"inactive_db\": inactive_db.database,\n \"mining\": mining.database,\n }\n )\n\n # We return None.\n return None", "def _project_is_apache():\n\n license_files = [\"LICENSE\"]\n for filename in license_files:\n try:\n with open(filename, \"r\") as file:\n for line in file:\n if re.search('Apache License', line):\n return True\n except IOError:\n pass\n return False", "def is_quadline(self, col: int) -> bool:\n row = self.depth(col)\n for i in range(-1, 2):\n for j in range(-1, 2):\n if not (i == 0 and j == 0) and self.check_quadline(row, col,\n i, j):\n return True\n return False" ]
[ "0.64510995", "0.62487596", "0.6135062", "0.5924878", "0.58464956", "0.57694995", "0.56707954", "0.5648749", "0.55708396", "0.5511604", "0.54753846", "0.54356587", "0.5420101", "0.5379314", "0.5345371", "0.5341573", "0.5319152", "0.53119344", "0.5308702", "0.5283885", "0.52726245", "0.52717066", "0.52258736", "0.52254134", "0.52007246", "0.5200467", "0.519913", "0.5192515", "0.51878333", "0.5186637", "0.51483166", "0.5140445", "0.51270765", "0.51188743", "0.51123923", "0.5068721", "0.50667447", "0.5065339", "0.50580055", "0.50554836", "0.50508404", "0.50506", "0.50480086", "0.5039115", "0.503284", "0.50256187", "0.5022261", "0.5017172", "0.49984297", "0.49953303", "0.49951532", "0.49833512", "0.49811143", "0.4979874", "0.49741894", "0.49625397", "0.49488926", "0.49434355", "0.49300128", "0.49299622", "0.49073452", "0.49051252", "0.48825943", "0.488098", "0.4878206", "0.48741123", "0.4862812", "0.48521036", "0.4848948", "0.48360267", "0.4831323", "0.4827522", "0.48201138", "0.48172706", "0.48165554", "0.48111555", "0.4803966", "0.48008883", "0.4795606", "0.47950137", "0.47855124", "0.4783916", "0.47817728", "0.47789901", "0.4774262", "0.47699556", "0.4767916", "0.47644922", "0.47544792", "0.4753239", "0.4752249", "0.4750197", "0.47457582", "0.47429875", "0.47410884", "0.47405905", "0.47265428", "0.47201627", "0.4719286", "0.47190243" ]
0.5551145
9
Test case for get_liveness Get job service liveness
def test_get_liveness(self): response = self.client.open('/api/v1//liveness', method='GET', content_type='application/json') self.assert200(response, "Response body is : " + response.data.decode('utf-8'))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def liveness():\n return '', 200", "def test_get_refresh_job_status(self):\n pass", "def liveness_probe():\n return \"I am still alive!\"", "def test_estimate_liveness_batch(self):\n detection = self.detector.detectOne(VLImage.load(filename=SPOOF), detect68Landmarks=True)\n estimations = self.livenessEstimator.estimateBatch([self.detection, detection])\n assert isinstance(estimations, list)\n assert len(estimations) == 2\n for estimation in estimations:\n self.assertLivenessEstimation(estimation)", "def lantern_check():\n if not app.config.get(\"ENABLE_LANTERN\", False):\n print \"[{x}] Not checking Lantern jobs - interface disabled\".format(x=dates.now())\n return\n print \"[{x}] Checking Lantern jobs\".format(x=dates.now())\n LanternApi.check_jobs()", "def test_health_get(self):\n pass", "def main(args = sys.argv):\n\n parser = parser_setup()\n poptions = parser.parse_args()\n\n if poptions.quiet:\n logging.basicConfig(level=logging.WARNING, format=log_format)\n elif poptions.debug:\n logging.basicConfig(level=logging.DEBUG, format=log_format)\n else:\n # Set up the default logging levels\n logging.basicConfig(level=logging.INFO, format=log_format)\n # Make this a little less noisy by default\n requests_log = logging.getLogger(\"requests.packages.urllib3.connectionpool\")\n requests_log.setLevel(logging.WARN)\n\n if not poptions.base_api_url and \"LIMS_API_URL\" in os.environ:\n api_url = os.environ[\"LIMS_API_URL\"]\n log.debug(\"Using LIMS API endpoint: %s from environment\" % api_url)\n elif poptions.base_api_url:\n api_url = poptions.base_api_url\n log.debug(\"Using LIMS API endpoint: %s from options\" % api_url)\n else:\n sys.stderr.write(\"Could not find LIMS API URL.\\n\")\n sys.exit(1)\n\n\n if not poptions.token and \"LIMS_API_TOKEN\" in os.environ:\n token = os.environ[\"LIMS_API_TOKEN\"]\n elif poptions.token:\n token = poptions.token\n else:\n sys.stderr.write(\"Could not find LIMS API TOKEN.\\n\")\n sys.exit(1)\n\n monitor = ClusterMonitor(api_url, token, cluster_type=poptions.cluster)\n\n monitor.run()", "def test_get_job(self):\n response = self.client.open(\n '/v1/job/{id}'.format(id='id_example'),\n method='GET',\n content_type='application/json')\n self.assert200(response,\n 'Response body is : ' + response.data.decode('utf-8'))", "def test_async_estimate_liveness(self):\n detection = self.detector.detectOne(VLImage.load(filename=SPOOF))\n task = self.livenessEstimator.estimate(detection, asyncEstimate=True)\n self.assertAsyncEstimation(task, LivenessV1)\n task = self.livenessEstimator.estimateBatch([detection] * 2, asyncEstimate=True)\n self.assertAsyncBatchEstimation(task, LivenessV1)", "def test_status(self):\n\n url = '/%s/jobs/?status=RUNNING' % self.api\n response = self.client.generic('GET', url)\n self.assertEqual(response.status_code, status.HTTP_200_OK, response.content)\n\n result = json.loads(response.content)\n self.assertEqual(len(result['results']), 1)\n self.assertEqual(result['results'][0]['job_type']['id'], self.job1.job_type.id)", "def test_job_fail(client):\n response = client.get('/status/random')\n assert response.status_code == 400", "def test_livenessv1_as_dict(self):\n livenessDict = self.livenessEstimator.estimate(self.detection).asDict()\n assert (\n jsonValidator(schema=LIVENESSV1_SCHEMA).validate(livenessDict) is None\n ), f\"{livenessDict} does not match with schema {LIVENESSV1_SCHEMA}\"", "def test_query_train_jobs_with_wrong_offse(self, client):\n params = dict(offse=0, limit=10)\n url = get_url(BASE_URL, params)\n response = client.get(url)\n result = response.get_json()\n assert result.get('total') == SUMMARY_DIR_NUM\n assert len(result.get('train_jobs')) == min(10, SUMMARY_DIR_NUM)", "def test_lbheartbeat(self):\n pass", "def test_estimate_liveness_batch_without_landmarks68(self):\n detection = self.detector.detectOne(VLImage.load(filename=SPOOF), detect68Landmarks=False)\n estimations = self.livenessEstimator.estimateBatch([self.detection, detection])\n assert isinstance(estimations, list)\n assert len(estimations) == 2\n for estimation in estimations:\n self.assertLivenessEstimation(estimation)", "def test_get_status(self):\n pass", "def test_get_status(self):\n pass", "def test_estimate_liveness_batch_with_threshold(self):\n qualityThreshold = 0.9\n detection = self.detector.detectOne(VLImage.load(filename=SPOOF))\n estimations = self.livenessEstimator.estimateBatch(\n [self.detection, detection],\n qualityThreshold=qualityThreshold,\n )\n assert isinstance(estimations, list)\n assert len(estimations) == 2\n self.assertLivenessEstimation(estimations[0], LivenessPrediction.Real)\n self.assertLivenessEstimation(estimations[1], LivenessPrediction.Spoof)", "def query_job_progress():\n pass", "def test_query_train_jobs(self, client):\n params = dict(offset=0, limit=10)\n url = get_url(BASE_URL, params)\n response = client.get(url)\n result = response.get_json()\n assert result.get('total') == SUMMARY_DIR_NUM\n assert len(result.get('train_jobs')) == min(10, SUMMARY_DIR_NUM)", "def test_lis_test(desc, inputs, exp_results, condition):\n pywbemlistener_test(desc, inputs, exp_results, condition)", "def test_successful_on_get(self):\n\n url = '/%s/jobs/' % self.api\n\n response = self.client.get(url)\n self.assertEqual(response.status_code, status.HTTP_200_OK, response.content)", "def test_lti20_get_no_score_success(self):\r\n self.setup_system_xmodule_mocks_for_lti20_request_test()\r\n mock_request = self.get_signed_lti20_mock_request(\"\", method=u'GET')\r\n # Now call the handler\r\n response = self.xmodule.lti_2_0_result_rest_handler(mock_request, \"user/abcd\")\r\n # Now assert\r\n self.assertEqual(response.status_code, 200)\r\n self.assertEqual(response.json, {\"@context\": \"http://purl.imsglobal.org/ctx/lis/v2/Result\",\r\n \"@type\": \"Result\"})", "def test_linestatus_for_multiple_input(self, client):\n response = client.get(url_for(\"status\") + \"?line=lsp\")\n\n assert response.status_code == 404", "def check(job, logger, **kwargs):\n resources = Resource.objects.filter(\n attributes__field__name=\"health_check_config\",\n lifecycle='ACTIVE'\n ).distinct()\n set_progress(\n f\"Will run health checks for {resources.count()} resource(s): \"\n f\"{[resource.name for resource in resources]}\")\n\n check_results = []\n\n for resource in resources:\n logger.info(f\"Will run health checks for resource '{resource.name}'.\")\n config_dict = get_config_value(resource)\n failing_health_checks = 0\n\n # Run all the health checks configured for this resource.\n for health_check in config_dict.get('health_checks', {}):\n max_retries = health_check.get('max_retries', 3)\n retry_interval_seconds = health_check.get('retry_interval_seconds', 1)\n\n name = health_check.get('name')\n job.set_progress(f\"Beginning health check '{name}'.\")\n url = health_check.get('url')\n accepted_statuses = health_check.get('accepted_status_codes')\n timeout_seconds = health_check.get('timeout_seconds', 3)\n\n retry_attempts = 0\n while retry_attempts <= max_retries:\n try:\n if retry_attempts > 1:\n logger.info(f\"On retry attempt {retry_attempts}.\")\n status_code = requests.get(url, timeout=timeout_seconds).status_code\n\n if accepted_statuses and status_code not in accepted_statuses:\n # Failure.\n msg = (\n f\"HTTP Request returned {status_code}, \"\n f\"which is not in the accepted statuses: {accepted_statuses}\"\n f\"for health check '{name}'.\"\n )\n logger.debug(msg)\n retry_attempts += 1\n else:\n # Pass - We got a valid status. We can stop now.\n logger.info(f\"Health check '{name}' completed with success.\")\n break\n\n except Exception as e:\n # Bad, could be ConnectionError, which will count as a failure.\n logger.debug(e)\n retry_attempts += 1\n\n # Wait for the specified retry interval before trying again\n time.sleep(retry_interval_seconds)\n\n if retry_attempts == max_retries:\n job.set_progress(f\"Max retries exceeded for health check '{name}'.\")\n failing_health_checks += 1\n\n # Summarize this resource's health check results.\n data_dict = {\n 'time': datetime.datetime.now(),\n 'resource_id': resource.id,\n 'resource_name': resource.name,\n 'failing_health_checks': failing_health_checks,\n }\n\n check_results.append(data_dict)\n\n context = {\n \"health_check_results\": check_results,\n }\n\n # Return the dict to be processed by the \"Then\" action\n return 'SUCCESS', '', '', {'context': context}", "def test_get_hyperflex_health_list(self):\n pass", "def healthcheck(parameters): \n\n print(\"In healthcheck module\")", "def __await_helms_installation(self, job_id, expected_services_count):\n end_waiting = datetime.now().timestamp() + self.TIMEOUT_MIN * 60 * 1000\n curr_status = self.helm_results.get(job_id)\n while datetime.now().timestamp() <= end_waiting:\n curr_status = self.helm_results.get(job_id, {\"services\": []})\n if expected_services_count != len(curr_status[\"services\"]):\n time.sleep(1.)\n else:\n self.helm_results.pop(job_id)\n return curr_status\n self.helm_results.pop(job_id)\n return curr_status", "def test_lti20_get_with_score_success(self):\r\n self.setup_system_xmodule_mocks_for_lti20_request_test()\r\n SCORE = 0.55 # pylint: disable=invalid-name\r\n COMMENT = u\"ಠ益ಠ\" # pylint: disable=invalid-name\r\n self.xmodule.module_score = SCORE\r\n self.xmodule.score_comment = COMMENT\r\n mock_request = self.get_signed_lti20_mock_request(\"\", method=u'GET')\r\n # Now call the handler\r\n response = self.xmodule.lti_2_0_result_rest_handler(mock_request, \"user/abcd\")\r\n # Now assert\r\n self.assertEqual(response.status_code, 200)\r\n self.assertEqual(response.json, {\"@context\": \"http://purl.imsglobal.org/ctx/lis/v2/Result\",\r\n \"@type\": \"Result\",\r\n \"resultScore\": SCORE,\r\n \"comment\": COMMENT})", "def ping():\n \"\"\"Get the estimator object for this instance, loading it if it's not already loaded.\"\"\"\n checker = os.listdir('/opt/ml')\n health = checker is not None # health check here\n status = 200 if health else 404\n return flask.Response(response='\\n', status=status, mimetype='application/json')", "def run_lap():\n pass", "def test_successful(self):\n\n url = '/%s/job-types/running/' % self.api\n response = self.client.generic('GET', url)\n self.assertEqual(response.status_code, status.HTTP_200_OK, response.content)\n\n result = json.loads(response.content)\n self.assertEqual(len(result['results']), 1)\n self.assertEqual(result['results'][0]['job_type']['name'], self.job.job_type.name)\n self.assertEqual(result['results'][0]['count'], 1)\n self.assertIsNotNone(result['results'][0]['longest_running'])", "def test_job_result_with_conf(self):\n test_app = self._create_app()\n conf = \"stress.test.longpijob.duration = 1\"\n class_path = \"spark.jobserver.LongPiJob\"\n job = self._create_job(test_app, class_path,\n conf=conf,\n ctx=self._get_functional_context())\n time.sleep(3)\n created_job = self.client.jobs.get(job.jobId)\n self.assertEqual(job.jobId, created_job.jobId)\n status = created_job.status\n self.assertTrue(status == \"RUNNING\" or status == \"FINISHED\")\n self._wait_till_job_is_done(created_job)\n job = self.client.jobs.get(job.jobId)\n self.assertEqual(\"FINISHED\", job.status)\n sys.stderr.write(\"duration %s\" % job.duration)\n self.assertTrue(\"1.\" in job.duration)", "def test_get_hyperflex_cluster_list(self):\n pass", "def test_fake_health_get(self):\n pass", "def __get_status_api(self):\r\n try:\r\n return Call_shelly_api(url=self.__api_address + \"/status\")\r\n except ShellyException as err:\r\n _LOGGER.warning(err)", "def describe_training_job(TrainingJobName=None):\n pass", "def health_check():\n printed_something = False\n\n job_checks = {}\n job_names = []\n for job in config.enabled_jobs:\n spec = nomad.parse(get_job(job.template))\n printed_something |= bool(nomad.check_events_and_logs(job.name))\n for service, checks in nomad.get_health_checks_from_spec(spec):\n if not checks:\n log.warn(f'service {service} has no health checks')\n continue\n job_checks[service] = checks\n job_names.append(job.name)\n printed_something |= nomad.wait_for_service_health_checks(consul, job_names, job_checks, nowait=True)\n\n if printed_something:\n log.error('Problems detected; see logs above.')\n sys.exit(1)\n else:\n log.info('No problems detected.')", "def status(args):\n print('Yields HPC fleet \"{}\" status'.format(args))", "def test_ipam_services_list(self):\n pass", "def test():\n request = pb2.TestRequest.FromString(flask.request.get_data())\n logger.debug(\"Flask service received: %s\", request)\n\n if not request.service_hops:\n response = pb2.TestResponse(\n id=request.id,\n status=[pb2.CommonResponseStatus(\n status=pb2.SUCCESS,\n )],\n )\n else:\n status = ([pb2.CommonResponseStatus(status=pb2.SUCCESS)] +\n list(service.call_next(request).status))\n response = pb2.TestResponse(id=request.id, status=status)\n\n tracer = execution_context.get_opencensus_tracer()\n tracer.add_attribute_to_current_span(\"reqId\", request.id)\n return response.SerializeToString()", "def test_get_run(self):\n pass", "def test_running_job(self):\n running_job = json.loads(BASE_JSON % ('null', 'null', 0, 'null'))[0]\n self.assertEquals(self.query_api.get_job_status(running_job), RUNNING)", "def list_service(request):\n builder = http.ResponseBuilder()\n master_addr = request.GET.get('master',None)\n if not master_addr:\n return builder.error('master is required').build_json()\n\n client = wrapper.Galaxy(master_addr,settings.GALAXY_CLIENT_BIN)\n status,jobs = client.list_jobs()\n LOG.info(status)\n if not status:\n return builder.error('fail to list jobs').build_json()\n ret = []\n for job in jobs:\n ret.append(job.__dict__)\n return builder.ok(data=ret).build_json()", "def test_ai_training_stopped(self, mock_get, mock_get_categories):\n\n # We mock ai_list\n mock_get.return_value = {\n 'ai_list': [\n factory.build(\n dict,\n FACTORY_CLASS=AiFactory,\n ai_status='ai_training_stopped'\n )\n ]\n }\n response = self.client.get(reverse('studio:summary'))\n self.assertContains(response, 'Stopped')", "def test_lineuptime_for_multiple_input(self, client):\n response = client.get(url_for(\"uptime\") + \"?line=lzj\")\n\n assert response.status_code == 404", "def statuses(ctx, job, page):\n\n def get_experiment_statuses():\n try:\n response = PolyaxonClient().experiment.get_statuses(\n user, project_name, _experiment, page=page)\n except (PolyaxonHTTPError, PolyaxonShouldExitError, PolyaxonClientException) as e:\n Printer.print_error('Could get status for experiment `{}`.'.format(_experiment))\n Printer.print_error('Error message `{}`.'.format(e))\n sys.exit(1)\n\n meta = get_meta_response(response)\n if meta:\n Printer.print_header('Statuses for experiment `{}`.'.format(_experiment))\n Printer.print_header('Navigation:')\n dict_tabulate(meta)\n else:\n Printer.print_header('No statuses found for experiment `{}`.'.format(_experiment))\n\n objects = list_dicts_to_tabulate(\n [Printer.add_status_color(o.to_light_dict(humanize_values=True), status_key='status')\n for o in response['results']])\n if objects:\n Printer.print_header(\"Statuses:\")\n objects.pop('experiment', None)\n dict_tabulate(objects, is_list_dict=True)\n\n def get_experiment_job_statuses():\n try:\n response = PolyaxonClient().experiment_job.get_statuses(user,\n project_name,\n _experiment,\n _job,\n page=page)\n except (PolyaxonHTTPError, PolyaxonShouldExitError, PolyaxonClientException) as e:\n Printer.print_error('Could not get status for job `{}`.'.format(job))\n Printer.print_error('Error message `{}`.'.format(e))\n sys.exit(1)\n\n meta = get_meta_response(response)\n if meta:\n Printer.print_header('Statuses for Job `{}`.'.format(_job))\n Printer.print_header('Navigation:')\n dict_tabulate(meta)\n else:\n Printer.print_header('No statuses found for job `{}`.'.format(_job))\n\n objects = list_dicts_to_tabulate(\n [Printer.add_status_color(o.to_light_dict(humanize_values=True), status_key='status')\n for o in response['results']])\n if objects:\n Printer.print_header(\"Statuses:\")\n objects.pop('job', None)\n dict_tabulate(objects, is_list_dict=True)\n\n page = page or 1\n\n user, project_name, _experiment = get_project_experiment_or_local(ctx.obj.get('project'),\n ctx.obj.get('experiment'))\n\n if job:\n _job = get_experiment_job_or_local(job)\n get_experiment_job_statuses()\n else:\n get_experiment_statuses()", "def test_service_status(self, api_instance):\n params = api_instance.get_service_status()\n # Only key we care about here is GetServiceStatus\n assert params[\"Action\"] == \"GetServiceStatus\"", "def test_hugepage_service_state(Service):\n\n service = Service('disable-transparent-hugepages')\n\n assert service.is_enabled\n assert service.is_running", "def url_health():\n return \"OK\"", "def test_is_system(self):\n\n url = '/%s/job-type-names/?is_system=false' % self.api\n response = self.client.generic('GET', url)\n self.assertEqual(response.status_code, status.HTTP_200_OK, response.content)\n\n result = json.loads(response.content)\n self.assertEqual(len(result['results']), 3)\n\n url = '/%s/job-type-names/?is_system=true' % self.api\n response = self.client.generic('GET', url)\n self.assertEqual(response.status_code, status.HTTP_200_OK, response.content)\n\n result = json.loads(response.content)\n self.assertEqual(len(result['results']), 1)", "def test_get_scenario(self):\n pass", "def test_get_server_runnable(self):\n global locator, config_paths\n locator.load_config(config_paths[2])\n\n self.assertIsNotNone(locator.get_server_runnable())", "def test_liveness(self):\n with DockerHost('host1',\n additional_docker_options=CLUSTER_STORE_DOCKER_OPTIONS) as host1:\n retry_until_success(host1.assert_is_live, retries=30)", "def examine_job(self):\n if self.data is None:\n print(\"Could not download job id\", self.job_id)\n return\n self.duration = self.read_data(\"duration\")\n self.between_commands = self.read_data(\"between_commands\")\n\n print(\"---\")\n print(\"test_id: {}\".format(self.job_id))\n print(\"duration:\")\n Job.print_results(self.duration)\n print(\"between_commands:\")\n Job.print_results(self.between_commands)\n print(\"\")", "def test_success(self, mock_get, circuits_app, tower_job_status, tower_last_updated, expected_results):\n mock_get.return_value = helper.MockedResponse(helper.mocked_get_paged_jobs())\n\n function_params = { \n \"tower_job_status\": tower_job_status,\n \"tower_last_updated\": tower_last_updated\n }\n results = call_ansible_tower_list_jobs_function(circuits_app, function_params)\n assert(len(results['content']) == expected_results)", "def test_ai_training(self, mock_get, mock_get_categories):\n\n # We mock ai_list\n mock_get.return_value = {\n 'ai_list': [\n factory.build(\n dict,\n FACTORY_CLASS=AiFactory,\n ai_status='ai_training'\n )\n ]\n }\n response = self.client.get(reverse('studio:summary'))\n self.assertContains(response, 'In Progress')", "def get(ctx, job):\n\n def get_experiment():\n try:\n response = PolyaxonClient().experiment.get_experiment(user, project_name, _experiment)\n cache.cache(config_manager=ExperimentManager, response=response)\n except (PolyaxonHTTPError, PolyaxonShouldExitError, PolyaxonClientException) as e:\n Printer.print_error('Could not load experiment `{}` info.'.format(_experiment))\n Printer.print_error('Error message `{}`.'.format(e))\n sys.exit(1)\n\n get_experiment_details(response)\n\n def get_experiment_job():\n try:\n response = PolyaxonClient().experiment_job.get_job(user,\n project_name,\n _experiment,\n _job)\n cache.cache(config_manager=ExperimentJobManager, response=response)\n except (PolyaxonHTTPError, PolyaxonShouldExitError, PolyaxonClientException) as e:\n Printer.print_error('Could not get job `{}`.'.format(_job))\n Printer.print_error('Error message `{}`.'.format(e))\n sys.exit(1)\n\n if response.resources:\n get_resources(response.resources.to_dict(), header=\"Job resources:\")\n\n response = Printer.add_status_color(response.to_light_dict(\n humanize_values=True,\n exclude_attrs=['uuid', 'definition', 'experiment', 'unique_name', 'resources']\n ))\n Printer.print_header(\"Job info:\")\n dict_tabulate(response)\n\n user, project_name, _experiment = get_project_experiment_or_local(ctx.obj.get('project'),\n ctx.obj.get('experiment'))\n\n if job:\n _job = get_experiment_job_or_local(job)\n get_experiment_job()\n else:\n get_experiment()", "def test_is_system(self):\n\n url = '/%s/job-types/?is_system=false' % self.api\n response = self.client.generic('GET', url)\n self.assertEqual(response.status_code, status.HTTP_200_OK, response.content)\n result = json.loads(response.content)\n self.assertEqual(len(result['results']), 5)\n\n url = '/%s/job-types/?is_system=true' % self.api\n response = self.client.generic('GET', url)\n self.assertEqual(response.status_code, status.HTTP_200_OK, response.content)\n\n result = json.loads(response.content)\n self.assertEqual(len(result['results']), 1)", "def test_list_runs(self):\n pass", "def test_estimate_liveness_batch_without_landmarks5(self):\n detection = self.detector.detectOne(VLImage.load(filename=SPOOF), detect5Landmarks=False)\n with pytest.raises(ValueError) as exceptionInfo:\n self.livenessEstimator.estimateBatch([detection])\n assert \"Landmarks5 is required for liveness estimation\" == str(exceptionInfo.value)", "def get_status(self):\n url = \"data_request?id=jobstatus&job=%d&plugin=zwave\" % self.id\n return self.vera.get(url)", "def get_net_service_status(self):\n\t\treturn Job(SDK.PrlSrv_GetNetServiceStatus(self.handle)[0])", "def test_lti_rest_listing(self):\r\n request = mock.Mock()\r\n request.method = 'GET'\r\n response = get_course_lti_endpoints(request, self.course.id.to_deprecated_string())\r\n\r\n self.assertEqual(200, response.status_code)\r\n self.assertEqual('application/json', response['Content-Type'])\r\n\r\n expected = {\r\n \"lti_1_1_result_service_xml_endpoint\": self.expected_handler_url('grade_handler'),\r\n \"lti_2_0_result_service_json_endpoint\":\r\n self.expected_handler_url('lti_2_0_result_rest_handler') + \"/user/{anon_user_id}\",\r\n \"display_name\": self.lti_published.display_name\r\n }\r\n self.assertEqual([expected], json.loads(response.content))", "def test_running_job(self):\n\n running_job = json.loads(TREEHERDER_JOB % (\"unknown\", \"running\"))\n self.assertEquals(self.query_api.get_job_status(running_job), RUNNING)", "def list(self, jobguid=\"\", executionparams=None):", "def check_result(self, params, server):\n if server['building']:\n # I assume Server and client are on the same TimeZone\n # the API doesn't tell me where is the server (only /systemInfo)\n job_started = datetime.fromtimestamp(int(server['timestamp']) / 1000)\n time_delta = (params['now'] - job_started)\n\n # New in version 2.7 --> datetime.timedelta.total_seconds\n # we want python >= 2.4 so we will do it ourselves\n seconds_since_start = time_delta.seconds + time_delta.days * 86400\n job_duration = self.seconds2human(seconds_since_start)\n if (seconds_since_start >= params['critical'] * 60):\n msg = '%s has been running for %s, see %sconsole#footer' % (\n params['job'],\n job_duration,\n server['url'])\n status = 'CRITICAL'\n elif (seconds_since_start >= params['warning'] * 60):\n msg = '%s has been running for %s, see %sconsole#footer' % (\n params['job'],\n job_duration,\n server['url'])\n status = 'WARNING'\n else:\n msg = '%s still running after %s, watch it on %sconsole#footer' % (\n params['job'],\n job_duration,\n server['url'])\n status = 'OK'\n else:\n # Easy part, the job has completed ...\n if server['result'] == 'SUCCESS':\n duration = self.seconds2human(server['duration'] / 1000)\n msg = '%s exited normally after %s' % (params['job'], duration)\n status = 'OK'\n\n elif server['result'] == 'UNSTABLE':\n duration = self.seconds2human(server['duration'] / 1000)\n msg = '%s is marked as unstable after %s, see %sconsole#footer' % (\n params['job'], duration, server['url'])\n status = 'WARNING'\n\n elif server['result'] == 'FAILURE':\n msg = '%s exited with an error, see %sconsole#footer' % (\n params['job'], server['url'])\n status = 'CRITICAL'\n\n elif server['result'] == 'ABORTED':\n msg = '%s has been aborted, see %sconsole#footer' % (\n params['job'], server['url'])\n status = 'UNKNOWN'\n else:\n # If you get there, patch welcome\n msg = '%s is in a not known state, Jenkins API issue ? see %s' % (\n params['job'], server['url'])\n status = 'UNKNOWN'\n\n return(status, msg)", "def test_get_list(self):\n result = self.runner.invoke(\n cli,\n [\n *CLI_LOG_OPTION,\n \"config\",\n \"get\",\n \"vendor.fetchai.connections.p2p_libp2p.config.entry_peers\",\n ],\n standalone_mode=False,\n )\n assert result.exit_code == 0\n assert result.output == \"[]\\n\"", "async def get_jobs(jobId: int) -> str: \n return mngr.getJob(str(jobId))", "def lantern_jobs():\n if not app.config.get(\"ENABLE_LANTERN\", False):\n print \"[{x}] Not sending Lantern jobs - interface disabled\".format(x=dates.now())\n return\n print \"[{x}] Sending Lantern jobs\".format(x=dates.now())\n LanternApi.make_new_jobs()", "def job_status(self, job_id):\n url = self.base_url + \"/ml-service/phoenix-ml/job/status?id={0}\".format(job_id)\n # url = \"http://10.1.2.110:8199/phoenix-ml/job/status?id=12\"\n headers = {\"ApiKey\": self.api_key}\n response = requests.get(url=url, headers=headers)\n return response.text", "def test_get_hyperflex_cluster_profile_list(self):\n pass", "def test_virtualservice_get(self):\n pass", "def test_ipam_services_read(self):\n pass", "def lgb_trial(args, reporter):\n try_import_lightgbm()\n import lightgbm as lgb\n # list of args which are not model hyperparameters:\n nonparam_args = set(['directory', 'task_id', 'lgb_model', 'dataset_train_filename', 'dataset_val_filename'])\n trial_id = args.task_id # Note may not start at 0 if HPO has been run for other models with same scheduler\n directory = args.directory\n file_prefix = \"trial_\"+str(trial_id)+\"_\" # append to all file names created during this trial. Do NOT change!\n lgb_model = args.lgb_model\n lgb_model.params = lgb_model.params.copy() # ensure no remaining pointers across trials\n for key in args:\n if key not in nonparam_args:\n lgb_model.params[key] = args[key] # use these hyperparam values in this trial\n dataset_train = lgb.Dataset(directory+args.dataset_train_filename)\n dataset_val_filename = args.get('dataset_val_filename', None)\n if dataset_val_filename is not None:\n dataset_val = lgb.Dataset(directory+dataset_val_filename)\n eval_metric = lgb_model.get_eval_metric()\n if lgb_model.problem_type == BINARY:\n train_loss_name = 'binary_logloss'\n elif lgb_model.problem_type == MULTICLASS:\n train_loss_name = 'multi_logloss'\n elif lgb_model.problem_type == REGRESSION:\n train_loss_name = 'l2'\n else:\n raise ValueError(\"unknown problem_type for LGBModel: %s\" % lgb_model.problem_type)\n lgb_model.eval_results = {}\n callbacks = []\n valid_names = ['train_set']\n valid_sets = [dataset_train]\n if dataset_val is not None:\n callbacks += [\n hpo_callback(reporter=reporter, stopping_rounds=150, metrics_to_use=[('valid_set', lgb_model.eval_metric_name)], \n max_diff=None, ignore_dart_warning=True, verbose=False, train_loss_name=train_loss_name, eval_results=lgb_model.eval_results)\n ]\n valid_names = ['valid_set'] + valid_names\n valid_sets = [dataset_val] + valid_sets\n else:\n raise NotImplementedError(\"cannot call gbm hyperparameter_tune without validation dataset\")\n \n num_boost_round = lgb_model.params.pop('num_boost_round', 1000)\n seed_value = lgb_model.params.pop('seed_value', None)\n train_params = {\n 'params': lgb_model.params.copy(),\n 'train_set': dataset_train,\n 'num_boost_round': num_boost_round, \n 'valid_sets': valid_sets,\n 'valid_names': valid_names,\n 'evals_result': lgb_model.eval_results,\n 'callbacks': callbacks,\n 'verbose_eval': -1,\n }\n if type(eval_metric) != str:\n train_params['feval'] = eval_metric\n if seed_value is not None:\n train_params['seed'] = seed_value\n random.seed(seed_value)\n np.random.seed(seed_value)\n \n lgb_model.model = lgb.train(**train_params)\n lgb_model.params['num_boost_round'] = num_boost_round # re-set this value after training\n if seed_value is not None:\n lgb_model.params['seed_value'] = seed_value\n lgb_model.best_iteration = lgb_model.model.best_iteration\n # TODO: difficult to ensure these iters always match\n # if lgb_model.eval_results['best_iter'] != lgb_model.best_iteration:\n # raise ValueError('eval_results[best_iter]=%s does not match lgb_model.best_iteration=%s' % (lgb_model.eval_results['best_iter'], lgb_model.best_iteration) )\n # print('eval_results[best_iter]=%s does not match lgb_model.best_iteration=%s' % (lgb_model.eval_results['best_iter'], lgb_model.best_iteration) )\n trial_model_file = lgb_model.save(file_prefix=file_prefix, directory=directory, return_filename=True)\n reporter(epoch=num_boost_round+1, validation_performance=lgb_model.eval_results['best_valperf'],\n train_loss=lgb_model.eval_results['best_trainloss'],\n best_iteration=lgb_model.eval_results['best_iter'],\n directory=directory, file_prefix=file_prefix, trial_model_file=trial_model_file)\n # TODO: add to reporter: time_of_trial without load/save time (isn't this just function of early-stopping point?), memory/inference ??", "async def test_healthy(coresys: CoreSys):\n\n class TestClass:\n \"\"\"Test class.\"\"\"\n\n def __init__(self, coresys: CoreSys):\n \"\"\"Initialize the test class.\"\"\"\n self.coresys = coresys\n\n @Job(conditions=[JobCondition.HEALTHY])\n async def execute(self):\n \"\"\"Execute the class method.\"\"\"\n return True\n\n test = TestClass(coresys)\n assert await test.execute()\n\n coresys.resolution.unhealthy = UnhealthyReason.DOCKER\n assert not await test.execute()", "def test__get_slurm_jobid(self):\n \n # Mock the system calls\n subprocess.check_output = Mock(return_value='')\n # Assert that getting non-existing jobs return an empty job list\n self.assertListEqual([],sq.get_slurm_jobid(\"jobname\"),\n \"Querying for jobid of non-existing job should return an empty list\")\n # Assert that a returned job id is parsed correctly\n for jobids in [[123456789],[123456789,987654321]]:\n subprocess.check_output = Mock(return_value=\"\\n\".join([str(jid) for jid in jobids]))\n self.assertListEqual(jobids,sq.get_slurm_jobid(\"jobname\"),\n \"Querying for jobid of existing job did not return the correct value\")", "def get_status():\n # TODO tie this in with requests that can fetch the status of the pod from the cluster\n\n if request.method == \"GET\":\n \"\"\"\n request looks like:\n {\n \"workflow_name\": \"test-workflow\"\n }\n \"\"\"\n\n req = request.get_json(force=True)\n if workflow_exists(req['workflow_name']):\n # TODO fit into database\n # Get the pod by workflow and read the status\n # status = RUNNING_JOBS[req['workflow_name']].get_pod_status()\n response = {\n \"status\": 'Still running'\n }\n else:\n app.logger.error(\n f\"Received request asking the pod status in {req['workflow_name']} \"\n f\"but this workflow is not present in running jobs\"\n f\"record. Nothing to do.\")\n response = {\n \"status\": \"Not running\"\n }\n\n return jsonify(response)", "def describe_labeling_job(LabelingJobName=None):\n pass", "def test_cron_workflow_service_list_cron_workflows2(self):\n pass", "def test_failed_job(self):\n failed_job = json.loads(BASE_JSON % (FAILURE, 1433166610, 1, 1433166609))[0]\n self.assertEquals(self.query_api.get_job_status(failed_job), FAILURE)", "def test_health(self):\n self.assert_request('get', '/_health')", "def test_issue_start_stop_watch(self):\n pass", "def test_get(self):\n task_types = [1, 2]\n\n for task_type in task_types:\n self.john_gamer.tasks.start(task_type)\n\n self.client.force_login(self.john)\n resp = self.client.get(self.URL)\n\n self.assertListEqual(\n resp.json(),\n ['Type: 1, time left: 42s', 'Type: 2, time left: 42s'],\n \"Gamer can't get list of task via API!\"\n )", "def get_status(self):\n\t\treturn call_sdk_function('PrlJob_GetStatus', self.handle)", "def test_api_get(httpretty, new_job):\n url = 'https://salesforce/services/async/34.0/job/THEJOBID'\n httpretty.register_uri('GET', url, status=200, body=b'some xml and stuff')\n response = new_job.request('get', url, expected_response=200)\n assert response == b'some xml and stuff'", "def workload_get_success():\n return util_load_json(\n os.path.join(TEST_DATA_DIRECTORY, \"workload_get_success_response.json\")\n )", "def test_workload_get_command_success(mock_client, workload_get_success, monkeypatch):\n monkeypatch.setattr(\n illumio.pce.PolicyComputeEngine._PCEObjectAPI,\n \"get_by_reference\",\n lambda *a, **k: Workload.from_json(workload_get_success),\n )\n args = {\"href\": \"/orgs/1/workloads/dummy\"}\n resp = workload_get_command(mock_client, args)\n\n assert resp.raw_response == workload_get_success", "def list(self, jobguid=\"\", executionparams=dict()):", "def test_get_node_state_servicelight(self):\n pass", "def check_missing_requests():\n logger.info(\"ETL Check for missing requests\")\n timestamp = int((datetime.datetime.now() - datetime.timedelta(hours=12)).timestamp()) * 1000\n\n job = Job(\n run=\"beagle_etl.jobs.lims_etl_jobs.fetch_new_requests_lims\",\n args={\"timestamp\": timestamp, \"redelivery\": False},\n status=JobStatus.CREATED,\n max_retry=3,\n children=[],\n )\n job.save()\n logger.info(format_log(\"ETL fetch_new_requests_lims job created\", obj=job))", "def troubleshoot_ess(self,\n label: str,\n job: 'JobAdapter',\n level_of_theory: Union[Level, dict, str],\n conformer: Optional[int] = None,\n ):\n if not self.trsh_ess_jobs:\n logger.warning(f'Not troubleshooting failed {label} job {job.job_name}. '\n f'To enable troubleshooting, set the \"trsh_ess_jobs\" argument to \"True\".')\n return None\n\n level_of_theory = Level(repr=level_of_theory)\n logger.info('\\n')\n warning_message = f'Troubleshooting {label} job {job.job_name} which failed'\n if job.job_status[1][\"status\"] and job.job_status[1][\"status\"] != 'done':\n warning_message += f' with status: \"{job.job_status[1][\"status\"]},\"'\n if job.job_status[1][\"keywords\"]:\n warning_message += f'\\nwith keywords: {job.job_status[1][\"keywords\"]}'\n warning_message += f' in {job.job_adapter}. '\n if {job.job_status[1][\"error\"]} and job.job_status[1][\"line\"]:\n warning_message += f'The error \"{job.job_status[1][\"error\"]}\" was derived from the following line in the ' \\\n f'log file:\\n\"{job.job_status[1][\"line\"]}\".'\n logger.warning(warning_message)\n if conformer is not None:\n xyz = self.species_dict[label].conformers[conformer]\n else:\n xyz = self.species_dict[label].final_xyz or self.species_dict[label].initial_xyz\n\n if 'Unknown' in job.job_status[1]['keywords'] and 'change_node' not in job.ess_trsh_methods:\n job.ess_trsh_methods.append('change_node')\n job.troubleshoot_server()\n if job.job_name not in self.running_jobs[label]:\n self.running_jobs[label].append(job.job_name) # mark as a running job\n if job.job_adapter == 'gaussian':\n if self.species_dict[label].checkfile is None:\n self.species_dict[label].checkfile = job.checkfile\n # Determine if the species is a hydrogen atom (or its isotope).\n is_h = self.species_dict[label].number_of_atoms == 1 and \\\n self.species_dict[label].mol.atoms[0].element.symbol in ['H', 'D', 'T']\n\n output_errors, ess_trsh_methods, remove_checkfile, level_of_theory, \\\n software, job_type, fine, trsh_keyword, memory, shift, cpu_cores, couldnt_trsh = \\\n trsh_ess_job(label=label,\n level_of_theory=level_of_theory,\n server=job.server,\n job_status=job.job_status[1],\n is_h=is_h,\n job_type=job.job_type,\n num_heavy_atoms=self.species_dict[label].number_of_heavy_atoms,\n software=job.job_adapter,\n fine=job.fine,\n memory_gb=job.job_memory_gb,\n cpu_cores=job.cpu_cores,\n ess_trsh_methods=job.ess_trsh_methods,\n )\n for output_error in output_errors:\n self.output[label]['errors'] += output_error\n if 'Could not troubleshoot' in output_error and 'tsg' in job.job_name:\n self.species_dict[label].ts_guesses[get_i_from_job_name(job.job_name)].errors += f'; {output_error}'\n if remove_checkfile:\n self.species_dict[label].checkfile = None\n job.ess_trsh_methods = ess_trsh_methods\n\n if not couldnt_trsh:\n self.run_job(label=label,\n xyz=xyz,\n level_of_theory=level_of_theory,\n job_adapter=software,\n memory=memory,\n job_type=job_type,\n fine=fine,\n ess_trsh_methods=ess_trsh_methods,\n trsh=trsh_keyword,\n conformer=conformer,\n torsions=job.torsions,\n dihedrals=job.dihedrals,\n directed_scan_type=job.directed_scan_type,\n rotor_index=job.rotor_index,\n cpu_cores=cpu_cores,\n shift=shift,\n )\n elif self.species_dict[label].is_ts and not self.species_dict[label].ts_guesses_exhausted:\n logger.info(f'TS {label} did not converge. '\n f'Status is:\\n{self.species_dict[label].ts_checks}\\n'\n f'Searching for a better TS conformer...')\n self.switch_ts(label=label)\n\n self.save_restart_dict()", "def run_list_cli_tests(experiment_id: int) -> None:\n\n subprocess.check_call(\n [\"det\", \"-m\", conf.make_master_url(), \"experiment\", \"list-trials\", str(experiment_id)]\n )\n\n subprocess.check_call(\n [\"det\", \"-m\", conf.make_master_url(), \"experiment\", \"list-checkpoints\", str(experiment_id)]\n )\n subprocess.check_call(\n [\n \"det\",\n \"-m\",\n conf.make_master_url(),\n \"experiment\",\n \"list-checkpoints\",\n \"--best\",\n str(1),\n str(experiment_id),\n ]\n )", "def my_job_func(*, mustfail):\n\n if mustfail:\n raise RuntimeError('Job failed, as requested!')\n\n return {\n 'message': 'Job well done!',\n 'payload': {'coolstuff': 'here'},\n 'readiness': 1.0\n }", "def test_query_train_jobs_with_lower_offset(self, client):\n params = dict(offset=-1, limit=10)\n url = get_url(BASE_URL, params)\n response = client.get(url)\n result = response.get_json()\n assert result.get('error_code') == '50540002'", "def test_cachedjob_get_status(cached_job):\n \n # Setup\n c_job = cached_job\n \n # Execute\n expected_status = StatusEnum(JOB_DETAILS_HTML['status'])\n cached_status = c_job.status\n\n # Verify\n assert expected_status == cached_status", "def test_service_initiated():\n assert \"ready\" in bkt_outcome_unwind.index()", "def test_successful(self):\n\n url = '/%s/job-types/system-failures/' % self.api\n response = self.client.generic('GET', url)\n self.assertEqual(response.status_code, status.HTTP_200_OK, response.content)\n\n result = json.loads(response.content)\n self.assertEqual(len(result['results']), 1)\n self.assertEqual(result['results'][0]['job_type']['name'], self.job.job_type.name)\n self.assertEqual(result['results'][0]['count'], 1)", "def status():\n _request('worklog/status/')", "def test_workon(self):\n\n def foo(x):\n return [dict(name=\"result\", type=\"objective\", value=x * 2)]\n\n experiment = workon(foo, space={\"x\": \"uniform(0, 10)\"}, max_trials=5)\n assert len(experiment.fetch_trials()) == 5\n assert experiment.name == \"loop\"\n assert isinstance(experiment._experiment._storage, Legacy)\n assert isinstance(experiment._experiment._storage._db, EphemeralDB)" ]
[ "0.6060957", "0.60050637", "0.5874817", "0.5739824", "0.56933695", "0.5687158", "0.5626335", "0.56105167", "0.560023", "0.55861163", "0.55428433", "0.5536615", "0.55124384", "0.54971635", "0.5488803", "0.5478777", "0.5478777", "0.54599184", "0.54395", "0.54195917", "0.5348092", "0.5341301", "0.53278685", "0.53211635", "0.5308946", "0.5256032", "0.5236095", "0.5225105", "0.5223274", "0.52098006", "0.52033067", "0.5191079", "0.51880175", "0.51869375", "0.51507616", "0.5136455", "0.51031953", "0.51030403", "0.51004225", "0.50927675", "0.50846905", "0.5083852", "0.50824565", "0.5078986", "0.5075513", "0.50449044", "0.50444204", "0.5039275", "0.5036554", "0.5012672", "0.50120705", "0.50117654", "0.50082135", "0.49886078", "0.49797052", "0.4966212", "0.49631453", "0.49536896", "0.494956", "0.49494514", "0.49404782", "0.49310762", "0.49273303", "0.49252895", "0.49213082", "0.491665", "0.49148753", "0.49145198", "0.49128726", "0.49074316", "0.49068385", "0.4905411", "0.49021193", "0.489633", "0.48960298", "0.48902076", "0.4887071", "0.48833767", "0.48816743", "0.48771393", "0.4866267", "0.48652357", "0.48577642", "0.4857051", "0.485558", "0.4851954", "0.48516017", "0.48495945", "0.48476663", "0.48449582", "0.48380372", "0.4834339", "0.48301178", "0.4825133", "0.48216188", "0.48158354", "0.48128262", "0.48099652", "0.48082006", "0.48076123" ]
0.73525107
0
Test case for get_readiness Get job service readiness
def test_get_readiness(self): response = self.client.open('/api/v1//readiness', method='GET', content_type='application/json') self.assert200(response, "Response body is : " + response.data.decode('utf-8'))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_get_refresh_job_status(self):\n pass", "def test_readiness_endpoint(self):\n url = f'{BASE_URL}/ready'\n response = requests.get(url)\n response_json = response.json()\n assert response.status_code == 503\n assert response_json['status'] == 503", "def test_status(self):\n\n url = '/%s/jobs/?status=RUNNING' % self.api\n response = self.client.generic('GET', url)\n self.assertEqual(response.status_code, status.HTTP_200_OK, response.content)\n\n result = json.loads(response.content)\n self.assertEqual(len(result['results']), 1)\n self.assertEqual(result['results'][0]['job_type']['id'], self.job1.job_type.id)", "def test_get_job(self):\n response = self.client.open(\n '/v1/job/{id}'.format(id='id_example'),\n method='GET',\n content_type='application/json')\n self.assert200(response,\n 'Response body is : ' + response.data.decode('utf-8'))", "def test_get_status(self):\n pass", "def test_get_status(self):\n pass", "def test_get_job_queue(self):\n response = self.client.open(\n '/tx-queue/2/scheduler/job',\n method='GET')\n self.assert200(response,\n 'Response body is : ' + response.data.decode('utf-8'))", "def test_active(self):\n\n url = '/%s/job-types/status/?is_active=true' % self.api\n response = self.client.generic('GET', url)\n self.assertEqual(response.status_code, status.HTTP_200_OK, response.content)\n\n result = json.loads(response.content)\n self.assertEqual(len(result['results']), 3)\n self.assertEqual(result['results'][0]['job_type']['name'], self.job1.job_type.name)\n self.assertEqual(result['results'][0]['job_counts'][0]['count'], 1)", "def test_cachedjob_get_status(cached_job):\n \n # Setup\n c_job = cached_job\n \n # Execute\n expected_status = StatusEnum(JOB_DETAILS_HTML['status'])\n cached_status = c_job.status\n\n # Verify\n assert expected_status == cached_status", "def test_running_job(self):\n running_job = json.loads(BASE_JSON % ('null', 'null', 0, 'null'))[0]\n self.assertEquals(self.query_api.get_job_status(running_job), RUNNING)", "def test_health_get(self):\n pass", "def test_successful_on_get(self):\n\n url = '/%s/jobs/' % self.api\n\n response = self.client.get(url)\n self.assertEqual(response.status_code, status.HTTP_200_OK, response.content)", "def test_is_active(self):\n\n url = '/%s/job-types/?is_active=false' % self.api\n response = self.client.generic('GET', url)\n self.assertEqual(response.status_code, status.HTTP_200_OK, response.content)\n result = json.loads(response.content)\n self.assertEqual(len(result['results']), 2)", "def get_status(self):\n\t\treturn call_sdk_function('PrlJob_GetStatus', self.handle)", "def test_successful(self):\n\n url = '/%s/job-types/running/' % self.api\n response = self.client.generic('GET', url)\n self.assertEqual(response.status_code, status.HTTP_200_OK, response.content)\n\n result = json.loads(response.content)\n self.assertEqual(len(result['results']), 1)\n self.assertEqual(result['results'][0]['job_type']['name'], self.job.job_type.name)\n self.assertEqual(result['results'][0]['count'], 1)\n self.assertIsNotNone(result['results'][0]['longest_running'])", "def test_get_node_status_batterystatus(self):\n pass", "def test_expect_status_property_about_registry_process(client, start_call_fx):\n\n url = reverse_lazy('calls:registry-list')\n\n response = client.post(url, start_call_fx, content_type='application/json')\n\n job_id = response.data.get('job_id')\n\n job = client.get(job_id)\n\n assert job.data.get('status') == 'DONE'", "def test_is_active(self):\n\n url = '/%s/job-type-names/?is_active=false' % self.api\n response = self.client.generic('GET', url)\n self.assertEqual(response.status_code, status.HTTP_200_OK, response.content)\n result = json.loads(response.content)\n\n self.assertEqual(len(result['results']), 2)", "def query_job_progress():\n pass", "def test_service_initiated():\n assert \"ready\" in bkt_outcome_unwind.index()", "def test_running_job(self):\n\n running_job = json.loads(TREEHERDER_JOB % (\"unknown\", \"running\"))\n self.assertEquals(self.query_api.get_job_status(running_job), RUNNING)", "def _check_job_status(self) -> str:\n self._assert_job_created()\n\n r = requests.post(\n f'https://{cc.ROUTE_PREFIX}.stratodem.com/jobs/status',\n headers=dict(\n Authorization=f'Bearer {get_api_token()}',\n ),\n json=dict(job_id=self._job_id)\n )\n\n if not r.status_code == 200:\n raise APIQueryFailedException('Failed to determine job status')\n\n r = r.json()\n\n if not r['success']:\n raise APIQueryFailedException(r)\n else:\n return r['message']", "def jobHealthy(self, count):\n job = self.tester.submission_result.job\n for idx in range(count - 1):\n if (job.health == 'healthy'):\n return True\n print(\"health check fail : %d\" % idx )\n time.sleep(1)\n job.refresh()\n self.assertEqual('healthy', job.health)\n return False", "async def test_healthy(coresys: CoreSys):\n\n class TestClass:\n \"\"\"Test class.\"\"\"\n\n def __init__(self, coresys: CoreSys):\n \"\"\"Initialize the test class.\"\"\"\n self.coresys = coresys\n\n @Job(conditions=[JobCondition.HEALTHY])\n async def execute(self):\n \"\"\"Execute the class method.\"\"\"\n return True\n\n test = TestClass(coresys)\n assert await test.execute()\n\n coresys.resolution.unhealthy = UnhealthyReason.DOCKER\n assert not await test.execute()", "def test_lbheartbeat(self):\n pass", "def test_get_refresh_status(api: API, account: Account):\n account._latest_refresh_job_id = \"123_job_id\"\n api.candlepin.get_job.return_value = {\"state\": \"FINISHED\"}\n assert account.get_refresh_status() == \"FINISHED\"\n api.candlepin.get_job.assert_called_once_with(\"123_job_id\")", "def test_job_fail(client):\n response = client.get('/status/random')\n assert response.status_code == 400", "async def get_status():", "def test_pending_job(self):\n pending_job = json.loads(BASE_JSON % ('null', 'null', 0, 1433166609))[0]\n pending_job.pop(\"status\")\n self.assertEquals(self.query_api.get_job_status(pending_job), PENDING)", "def test_get_status(self):\n response = self.client.open(\n '/v1/status',\n method='GET')\n self.assert200(response,\n 'Response body is : ' + response.data.decode('utf-8'))", "def test_successful(self):\n\n url = '/%s/job-types/pending/' % self.api\n response = self.client.generic('GET', url)\n self.assertEqual(response.status_code, status.HTTP_200_OK, response.content)\n\n result = json.loads(response.content)\n self.assertEqual(len(result['results']), 1)\n self.assertEqual(result['results'][0]['job_type']['name'], self.job.job_type.name)\n self.assertEqual(result['results'][0]['count'], 1)\n self.assertIsNotNone(result['results'][0]['longest_pending'])", "def test_active_and_date_range(self):\n\n url = '/%s/job-types/status/?is_active=true&started=%s&ended=%s' % ( self.api,\n '2016-01-01T00:00:00Z',\n '2016-01-02T00:00:00Z')\n response = self.client.generic('GET', url)\n self.assertEqual(response.status_code, status.HTTP_200_OK, response.content)\n\n result = json.loads(response.content)\n self.assertEqual(len(result['results']), 3)\n self.assertEqual(result['results'][0]['job_type']['name'], self.job1.job_type.name)\n self.assertEqual(len(result['results'][0]['job_counts']), 0)", "def test_readiness(self):\n with DockerHost('host1',\n additional_docker_options=CLUSTER_STORE_DOCKER_OPTIONS) as host1:\n retry_until_success(host1.assert_is_ready, retries=30)", "def test_service_status(self, api_instance):\n params = api_instance.get_service_status()\n # Only key we care about here is GetServiceStatus\n assert params[\"Action\"] == \"GetServiceStatus\"", "def get_status(self):\n url = \"data_request?id=jobstatus&job=%d&plugin=zwave\" % self.id\n return self.vera.get(url)", "def request_status(job_id):\n status = _database_operations.get_status(job_id, Session())\n if status is None:\n flask.abort(404)\n else:\n return json.dumps({\n 'status': status.status,\n 'finished': status.finished\n })", "def test_hugepage_service_state(Service):\n\n service = Service('disable-transparent-hugepages')\n\n assert service.is_enabled\n assert service.is_running", "def test_polljobs_callcount(mock_status):\n\n jobs = {\n \"jobone\": {\n \"laststatus\": \"Running\",\n \"scheduler\": \"LSF\",\n \"jobid\": \"123456\"\n },\n \"jobtwo\": {\n \"laststatus\": \"Queued\",\n \"scheduler\": \"LSF\",\n \"jobid\": \"123456\"\n },\n \"jobthree\": {\n \"laststatus\": \"Submit Error\",\n \"scheduler\": \"LSF\",\n \"jobid\": \"123456\"\n },\n \"jobfour\": {\n \"laststatus\": \"Waiting Submission\",\n \"scheduler\": \"LSF\",\n \"jobid\": \"123456\"\n },\n \"jobfive\": {\n \"laststatus\": \"Finished\",\n \"scheduler\": \"LSF\",\n \"jobid\": \"123456\"\n },\n \"jobsix\": {\n \"laststatus\": \"Complete\",\n \"scheduler\": \"LSF\",\n \"jobid\": \"123456\"\n }\n }\n\n mock_status.return_value = \"Running\"\n returnval = _polljobs(jobs, False)\n\n assert mock_status.call_count == 2, \\\n \"Should only be polling running and queued jobs\"\n assert jobs[\"jobtwo\"][\"laststatus\"] == \"Running\"\n assert returnval is True", "def test_execute_get_success():\n response_queue = run_get(\n TestData.RECEPTOR_CONFIG,\n json.dumps(TestData.JOB_TEMPLATE_PAYLOAD_SINGLE_PAGE),\n TestData.JOB_TEMPLATE_RESPONSE,\n )\n response = response_queue.get()\n validate_get_response(\n response,\n 200,\n TestData.JOB_TEMPLATE_COUNT,\n [TestData.JOB_TEMPLATE_1, TestData.JOB_TEMPLATE_2],\n )", "def job_status(self, job_id):\n url = self.base_url + \"/ml-service/phoenix-ml/job/status?id={0}\".format(job_id)\n # url = \"http://10.1.2.110:8199/phoenix-ml/job/status?id=12\"\n headers = {\"ApiKey\": self.api_key}\n response = requests.get(url=url, headers=headers)\n return response.text", "def test_get_job_by_id(self):\n response = self.client.open(\n '/tx-queue/2/scheduler/job/{jobId}'.format(jobId=1),\n method='GET')\n self.assert200(response,\n 'Response body is : ' + response.data.decode('utf-8'))", "async def get_jobs(): \n return mngr.getAllJobs()", "def get_job_status(job_url, build_number, username, password):\n try:\n url = \"{}{}/api/json\".format(job_url, str(build_number))\n res = requests.get(url, auth=(username, password))\n build_status_json = json.loads(res.text)\n return build_status_json[\"result\"]\n\n except requests.exceptions.RequestException as e:\n print (e)\n sys.exit(2)", "def test_get_node_status(self):\n pass", "def get_status(job_id):\n job = fetch_data.AsyncResult(job_id, app=app)\n return jsonify({'job_id': job_id, 'status': job.status})", "def service_check(self, env):\n import params\n\n self.active_master_host = params.hawqmaster_host\n self.active_master_port = params.hawq_master_address_port\n self.checks_failed = 0\n self.total_checks = 2\n\n # Checks HAWQ cluster state\n self.check_state()\n\n # Runs check for writing and reading tables on HAWQ\n self.check_hawq()\n\n # Runs check for writing and reading external tables on HDFS using PXF, if PXF is installed\n if params.is_pxf_installed:\n self.total_checks += 1\n self.check_hawq_pxf_hdfs()\n else:\n Logger.info(\"PXF not installed. Skipping HAWQ-PXF checks...\")\n\n if self.checks_failed != 0:\n Logger.error(\"** FAILURE **: Service check failed {0} of {1} checks\".format(self.checks_failed, self.total_checks))\n sys.exit(1)\n\n Logger.info(\"Service check completed successfully\")", "def test_fake_health_get(self):\n pass", "def get_status() -> None:\n assert scraper.get_status() == True", "async def my_job_async(mustfail):\n if mustfail:\n raise RuntimeError('Job failed, as requested!')\n\n return {\n 'message': 'job well done',\n 'payload': {'coolstuff': 'here'},\n 'readiness': 1.0\n }", "def test_get_status(self):\n resp = self.build_api.getStatus().json()\n assert 'status' in resp\n assert 'message' in resp", "async def test_get_huge_artifact():\n writer = SimpleWriter()\n work_queue = asyncio.Queue()\n await work_queue.put(TestData.JOB_GET_PAYLOAD)\n worker = tower_api_worker.TowerApiWorker(TestData.config, writer, work_queue)\n headers = {\"Content-Type\": \"application/json\"}\n with aioresponses() as mocked:\n mocked.get(\n TestData.JOB_MONITOR_URL,\n status=200,\n body=json.dumps(TestData.JOB_1_SUCCESSFUL_HUGE),\n headers=headers,\n )\n with pytest.raises(Exception) as excinfo:\n await worker.start()\n assert \"Artifacts is over 1024 bytes\" in str(excinfo.value)", "def check(job, logger, **kwargs):\n resources = Resource.objects.filter(\n attributes__field__name=\"health_check_config\",\n lifecycle='ACTIVE'\n ).distinct()\n set_progress(\n f\"Will run health checks for {resources.count()} resource(s): \"\n f\"{[resource.name for resource in resources]}\")\n\n check_results = []\n\n for resource in resources:\n logger.info(f\"Will run health checks for resource '{resource.name}'.\")\n config_dict = get_config_value(resource)\n failing_health_checks = 0\n\n # Run all the health checks configured for this resource.\n for health_check in config_dict.get('health_checks', {}):\n max_retries = health_check.get('max_retries', 3)\n retry_interval_seconds = health_check.get('retry_interval_seconds', 1)\n\n name = health_check.get('name')\n job.set_progress(f\"Beginning health check '{name}'.\")\n url = health_check.get('url')\n accepted_statuses = health_check.get('accepted_status_codes')\n timeout_seconds = health_check.get('timeout_seconds', 3)\n\n retry_attempts = 0\n while retry_attempts <= max_retries:\n try:\n if retry_attempts > 1:\n logger.info(f\"On retry attempt {retry_attempts}.\")\n status_code = requests.get(url, timeout=timeout_seconds).status_code\n\n if accepted_statuses and status_code not in accepted_statuses:\n # Failure.\n msg = (\n f\"HTTP Request returned {status_code}, \"\n f\"which is not in the accepted statuses: {accepted_statuses}\"\n f\"for health check '{name}'.\"\n )\n logger.debug(msg)\n retry_attempts += 1\n else:\n # Pass - We got a valid status. We can stop now.\n logger.info(f\"Health check '{name}' completed with success.\")\n break\n\n except Exception as e:\n # Bad, could be ConnectionError, which will count as a failure.\n logger.debug(e)\n retry_attempts += 1\n\n # Wait for the specified retry interval before trying again\n time.sleep(retry_interval_seconds)\n\n if retry_attempts == max_retries:\n job.set_progress(f\"Max retries exceeded for health check '{name}'.\")\n failing_health_checks += 1\n\n # Summarize this resource's health check results.\n data_dict = {\n 'time': datetime.datetime.now(),\n 'resource_id': resource.id,\n 'resource_name': resource.name,\n 'failing_health_checks': failing_health_checks,\n }\n\n check_results.append(data_dict)\n\n context = {\n \"health_check_results\": check_results,\n }\n\n # Return the dict to be processed by the \"Then\" action\n return 'SUCCESS', '', '', {'context': context}", "async def test_health_check(client: AsyncClient):\n\n response = await client.get(f\"/health-check\")\n assert response.status_code == 200\n\n data = response.json()\n assert data[\"service\"][\"status\"] == \"healthy\"\n assert data[\"service\"][\"error\"] is None\n assert data[\"database\"][\"status\"] == \"healthy\"\n assert data[\"database\"][\"error\"] is None", "def test_pending_job(self):\n\n pending_job = json.loads(TREEHERDER_JOB % (\"unknown\", \"pending\"))\n self.assertEquals(self.query_api.get_job_status(pending_job), PENDING)", "def test_readiness_env_port(self):\n with DockerHost('host1',\n additional_docker_options=CLUSTER_STORE_DOCKER_OPTIONS, start_calico=False) as host1:\n host1.start_calico_node(env_options=\"-e FELIX_HEALTHPORT=9032 -e FELIX_HEALTHENABLED=true\")\n retry_until_success(host1.assert_is_ready, retries=30)", "def check(args, session: Session = NEW_SESSION) -> None:\n if args.allow_multiple and not args.limit > 1:\n raise SystemExit(\"To use option --allow-multiple, you must set the limit to a value greater than 1.\")\n if args.hostname and args.local:\n raise SystemExit(\"You can't use --hostname and --local at the same time\")\n\n query = select(Job).where(Job.state == JobState.RUNNING).order_by(Job.latest_heartbeat.desc())\n if args.job_type:\n query = query.where(Job.job_type == args.job_type)\n if args.hostname:\n query = query.where(Job.hostname == args.hostname)\n if args.local:\n query = query.where(Job.hostname == get_hostname())\n if args.limit > 0:\n query = query.limit(args.limit)\n\n alive_jobs: list[Job] = [job for job in session.scalars(query) if job.is_alive()]\n\n count_alive_jobs = len(alive_jobs)\n if count_alive_jobs == 0:\n raise SystemExit(\"No alive jobs found.\")\n if count_alive_jobs > 1 and not args.allow_multiple:\n raise SystemExit(f\"Found {count_alive_jobs} alive jobs. Expected only one.\")\n if count_alive_jobs == 1:\n print(\"Found one alive job.\")\n else:\n print(f\"Found {count_alive_jobs} alive jobs.\")", "def get(self):\n parser = reqparse.RequestParser()\n parser.add_argument(\"job_id\", type=str, location=\"args\")\n args = parser.parse_args()\n job_id = args[\"job_id\"]\n if job_id is None:\n return errors.all_errors(\n \"CLIENT_MISSING_PARAMETER\", \"job_id (str) parameter is required\"\n )\n\n try:\n qstat_command = config.Config.PBS_QSTAT + \" -f \" + job_id + \" -Fjson\"\n try:\n get_job_info = subprocess.check_output(shlex.split(qstat_command))\n try:\n sanitize_input = get_job_info.decode(\"utf-8\")\n for match in re.findall(\n '\"project\":(\\d+),', sanitize_input, re.MULTILINE\n ):\n # Clear case where project starts with digits to prevent leading zero errors\n print(\n f'Detected \"project\":{match}, > Will be replaced to prevent int leading zero error'\n )\n sanitize_input = sanitize_input.replace(\n f'\"project\":{match},', f'\"project\":\"{match}\",'\n )\n\n job_info = ast.literal_eval(sanitize_input)\n except Exception as err:\n return {\n \"success\": False,\n \"message\": \"Unable to retrieve this job. Job may have terminated. Error: \"\n + str(job_info),\n }, 210\n\n job_key = list(job_info[\"Jobs\"].keys())[0]\n return {\"success\": True, \"message\": job_info[\"Jobs\"][job_key]}, 200\n\n except Exception as err:\n return {\n \"success\": False,\n \"message\": \"Unable to retrieve Job ID (job may have terminated and is no longer in the queue)\",\n }, 210\n\n except Exception as err:\n return {\"success\": False, \"message\": \"Unknown error: \" + str(err)}, 500", "def test_successful(self):\n\n url = '/%s/job-types/status/' % self.api\n response = self.client.generic('GET', url)\n self.assertEqual(response.status_code, status.HTTP_200_OK, response.content)\n\n result = json.loads(response.content)\n self.assertEqual(len(result['results']), 4)\n self.assertEqual(result['results'][0]['job_type']['name'], self.job1.job_type.name)\n self.assertEqual(result['results'][0]['job_counts'][0]['count'], 1)", "def my_job_func(*, mustfail):\n\n if mustfail:\n raise RuntimeError('Job failed, as requested!')\n\n return {\n 'message': 'Job well done!',\n 'payload': {'coolstuff': 'here'},\n 'readiness': 1.0\n }", "def get_net_service_status(self):\n\t\treturn Job(SDK.PrlSrv_GetNetServiceStatus(self.handle)[0])", "def test_version_is_active(self):\n\n url = '/%s/job-types/job-type-for-view-test/?is_active=false' % self.api\n response = self.client.get(url)\n self.assertEqual(response.status_code, status.HTTP_200_OK, response.content)\n\n result = json.loads(response.content)\n self.assertEqual(len(result['results']), 1)", "def test_is_system(self):\n\n url = '/%s/job-types/?is_system=false' % self.api\n response = self.client.generic('GET', url)\n self.assertEqual(response.status_code, status.HTTP_200_OK, response.content)\n result = json.loads(response.content)\n self.assertEqual(len(result['results']), 5)\n\n url = '/%s/job-types/?is_system=true' % self.api\n response = self.client.generic('GET', url)\n self.assertEqual(response.status_code, status.HTTP_200_OK, response.content)\n\n result = json.loads(response.content)\n self.assertEqual(len(result['results']), 1)", "def test_get_jobs_status(\n globals, urls, client, mock_test_responses, context_fixture):\n\n context = context_fixture('Healthy')\n mock_test_responses(task='upload', status=CoreStatus.DONE)\n responses.add(\n responses.GET, urls('task', 'upload'),\n json={\n '_id': globals['upload'],\n 'status': CoreStatus.FAIL\n },\n status=200,\n content_type='application/json')\n\n for i in range(2):\n client.upload(file=globals['test_csv_file'], name=str(i))\n\n context.run()\n job_fail = context.get_jobs_status(status=['fail'])\n\n assert job_fail.iloc[0]['status'] == 'fail'\n assert job_fail.iloc[0]['Job'] == '1'", "def get_status():\n # TODO tie this in with requests that can fetch the status of the pod from the cluster\n\n if request.method == \"GET\":\n \"\"\"\n request looks like:\n {\n \"workflow_name\": \"test-workflow\"\n }\n \"\"\"\n\n req = request.get_json(force=True)\n if workflow_exists(req['workflow_name']):\n # TODO fit into database\n # Get the pod by workflow and read the status\n # status = RUNNING_JOBS[req['workflow_name']].get_pod_status()\n response = {\n \"status\": 'Still running'\n }\n else:\n app.logger.error(\n f\"Received request asking the pod status in {req['workflow_name']} \"\n f\"but this workflow is not present in running jobs\"\n f\"record. Nothing to do.\")\n response = {\n \"status\": \"Not running\"\n }\n\n return jsonify(response)", "def test_call_first_time(self, query_repo_url, get_credentials, valid_revision, get):\n self.assertEquals(\n self.query_api._get_all_jobs(\"try\", \"146071751b1e\"),\n json.loads(JOBS_SCHEDULE))\n\n assert get.call_count == 1\n\n # Test that this fills our caches\n self.assertEquals(\n query_jobs.JOBS_CACHE[(\"try\", \"146071751b1e\")],\n json.loads(JOBS_SCHEDULE))", "def test_weird_job(self):\n weird_job = json.loads(BASE_JSON % (20, 1433166610, 1, 1433166609))[0]\n with self.assertRaises(Exception):\n self.query_api.get_job_status(weird_job)", "def check_job_status_by_id(job_id):\n print('=' * 40)\n print('check_status_by_job_id', job_id)\n print('=' * 40)\n\n it_worked = check_job_status(job)\n if it_worked:\n return ok_resp(job)\n\n user_msg = ('PreprocessJob still in process: %s') % (job_id)\n return err_resp(user_msg)", "def get(self):\n status = \"stopped\"\n shared_memory_locks[\"data_worker\"].acquire()\n if self.shared_memory_manager_dict[\"data_worker_running\"]:\n status = \"running\"\n shared_memory_locks[\"data_worker\"].release()\n if self.shared_memory_manager_dict[\"service_reconfiguring\"]:\n status += \",reconfiguring\"\n self.write({\"status\": status})", "def check_status():\n # assume no web dynos on master - there should never be a web dyno on master\n r = req.get(f\"{MASTER_API_URL}/formation/worker\", headers=MASTER_API_HEADERS)\n if r.status_code != req.codes.ok:\n print(\"Couldn't get master worker formation\")\n print(r.status_code, \":\", r.text)\n return 'unknown:1'\n master_worker = r.json()['quantity'] # this is guaranteed to work i think\n r = req.get(f\"{SLAVE_API_URL}/formation/worker\", headers=SLAVE_API_HEADERS)\n if r.status_code != req.codes.ok:\n print(\"Couldn't get slave worker formation\")\n print(r.status_code, \":\", r.text)\n return 'unknown:2'\n slave_worker = r.json()['quantity']\n r = req.get(f\"{SLAVE_API_URL}/formation/web\", headers=SLAVE_API_HEADERS)\n if r.status_code != req.codes.ok:\n print(\"Couldn't get slave web formation\")\n print(r.status_code, \":\", r.text)\n return 'unknown:3'\n slave_web = r.json()['quantity']\n # all done\n if slave_web != 0:\n return 'forbidden-web'\n elif master_worker != 0 and slave_worker != 0:\n return 'both'\n elif master_worker != 0:\n return 'master'\n elif slave_worker != 0:\n return 'slave'\n else:\n return 'none'", "def test_health_check(self):\n result = self.app.get('/v1/health')\n\n # assert the status code of the response 200 (OK)\n self.assertEqual(result.status_code, 200)\n self.assertEqual(result.data, b'UP')", "def getStatus():", "def health_check(task_service_id):\n logger.info(f\"Checking task service status for {task_service_id}\")\n task_service = TaskService.objects.get(kf_id=task_service_id)\n task_service.refresh_from_db()\n task_service.health_check()", "def test_heartbeat(self):\n\n with self.client:\n response = self.client.get('/heartbeat')\n data = response.json\n self.assert200(response)\n self.assertIn('status', data)", "def test_health(self):\n self.assert_request('get', '/_health')", "def test_valid_n_jobs(n_jobs: Any) -> None:\n check_n_jobs(n_jobs)", "def check_status(self, job_id, config_id=1):\n response = self.do_request(\n self.base_url +\n \"/oasis/statusAsync/\" +\n str(config_id) + \"/\" +\n str(job_id) + \"/\"\n )\n return response", "def test_stale_job_failure(self):\n self.report_start(self.whoami())\n\n self.panda_type.health_check_job = self.factory.make_job_json(health_check='true')\n self.panda_type.save()\n\n Device.objects.get(hostname='panda01').state_transition_to(Device.OFFLINE)\n Device.objects.get(hostname='panda02').state_transition_to(Device.IDLE)\n Device.objects.get(hostname='panda03').state_transition_to(Device.RUNNING)\n Device.objects.get(hostname='panda04').state_transition_to(Device.RESERVED)\n Device.objects.get(hostname='panda05').state_transition_to(Device.RETIRED)\n Device.objects.get(hostname='panda06').state_transition_to(Device.OFFLINING)\n\n job = self.submit_job()\n job.status = TestJob.RUNNING\n job.save()\n job_id = job.id\n\n job = TestJob.objects.get(id=job_id)\n self.assertEqual(job.requested_device_type.name, self.panda_type.name)\n self.assertEqual(job.status, TestJob.RUNNING)\n\n panda01 = Device.objects.get(hostname='panda01')\n self.assertEqual(panda01.health_status, Device.HEALTH_UNKNOWN)\n # set a side-effect of a broken state\n panda01.current_job = job\n panda01.save()\n panda01 = Device.objects.get(hostname='panda01')\n self.assertEqual(panda01.current_job, job)\n self.assertEqual(panda01.status, Device.OFFLINE)\n self.assertEqual(panda01.health_status, Device.HEALTH_UNKNOWN)\n\n panda02 = Device.objects.get(hostname='panda02')\n self.assertEqual(panda02.current_job, None)\n self.assertEqual(panda02.status, Device.IDLE)\n self.assertEqual(panda02.health_status, Device.HEALTH_UNKNOWN)\n\n panda03 = Device.objects.get(hostname='panda03')\n panda03.health_status = Device.HEALTH_PASS\n panda03.save()\n panda03 = Device.objects.get(hostname='panda03')\n self.assertEqual(panda03.health_status, Device.HEALTH_PASS)\n\n panda04 = Device.objects.get(hostname='panda04')\n panda04.health_status = Device.HEALTH_FAIL\n panda04.save()\n panda04 = Device.objects.get(hostname='panda04')\n self.assertEqual(panda04.health_status, Device.HEALTH_FAIL)\n\n self.assertEqual(\n Device.objects.get(hostname='panda05').health_status,\n Device.HEALTH_UNKNOWN)\n self.assertEqual(\n Device.objects.get(hostname='panda06').health_status,\n Device.HEALTH_UNKNOWN)\n\n jobs = self.scheduler_tick()\n\n self.assertEqual(len(jobs), 1)\n job = TestJob.objects.get(id=job_id)\n self.assertEqual(job.status, TestJob.RUNNING)\n\n for job in jobs:\n job_obj = TestJob.objects.get(pk=job.id) # reload\n job_obj.status = TestJob.COMPLETE\n self.job_finished(job_obj)\n\n self.assertEqual(len(jobs), 1)\n job = TestJob.objects.get(id=job_id)\n self.assertEqual(job.status, TestJob.RUNNING)\n\n panda01 = Device.objects.get(hostname='panda01')\n panda02 = Device.objects.get(hostname='panda02')\n panda03 = Device.objects.get(hostname='panda03')\n panda04 = Device.objects.get(hostname='panda04')\n panda05 = Device.objects.get(hostname='panda05')\n panda06 = Device.objects.get(hostname='panda06')\n\n self.assertIsNone(panda02.current_job)\n self.assertEqual(panda02.status, Device.IDLE)\n\n self.assertEqual(panda01.status, Device.OFFLINE)\n self.assertEqual(panda02.status, Device.IDLE)\n self.assertEqual(panda03.status, Device.RUNNING)\n self.assertEqual(panda04.status, Device.RESERVED)\n self.assertEqual(panda05.status, Device.RETIRED)\n self.assertEqual(panda06.status, Device.OFFLINING)\n\n jobs = self.scheduler_tick()\n\n self.assertEqual(len(jobs), 0)\n\n self.assertIsNone(Device.objects.get(hostname='panda02').current_job)\n\n jobs = self.scheduler_tick()\n for job in jobs:\n job_obj = TestJob.objects.get(pk=job.id) # reload\n job_obj.status = TestJob.COMPLETE\n self.job_finished(job_obj)\n\n self.assertEqual(len(jobs), 0)\n\n jobs = self.scheduler_tick()\n\n self.assertEqual(len(jobs), 0)\n\n panda02 = Device.objects.get(hostname='panda02')\n\n # FIXME: this is actually a bug and will need an update when the bug is fixed.\n self.assertEqual(panda01.current_job.id, job_id)\n\n self.assertEqual(panda02.current_job, None)\n self.assertEqual(panda02.status, Device.IDLE)\n self.assertEqual(panda02.health_status, Device.HEALTH_PASS)\n\n panda01 = Device.objects.get(hostname='panda01')\n self.assertEqual(panda01.status, Device.OFFLINE)\n self.assertEqual(panda01.health_status, Device.HEALTH_UNKNOWN)\n\n panda03 = Device.objects.get(hostname='panda03')\n self.assertEqual(panda03.health_status, Device.HEALTH_PASS)\n self.assertEqual(panda03.status, Device.RUNNING)\n\n panda04 = Device.objects.get(hostname='panda04')\n self.assertEqual(panda04.status, Device.RESERVED)\n self.assertEqual(panda04.health_status, Device.HEALTH_FAIL)\n\n panda05 = Device.objects.get(hostname='panda05')\n self.assertEqual(panda05.status, Device.RETIRED)\n self.assertEqual(panda05.health_status, Device.HEALTH_UNKNOWN)\n\n panda06 = Device.objects.get(hostname='panda06')\n self.assertEqual(panda06.status, Device.OFFLINING)\n self.assertEqual(panda06.health_status, Device.HEALTH_UNKNOWN)\n\n self.cleanup(self.whoami())", "def test_zombie_get(self):\n\n # Pretend to be the first agent instance\n zombie_session_id = self._open_session()\n # Start doing a GET\n zombie = BackgroundGet(self)\n\n # Leaving that GET open, imagine the agent now gets killed hard\n self._mock_restart()\n\n # The agent restarts, now I pretend to be the second agent instance\n healthy_session_id = self._open_session(expect_termination=zombie_session_id)\n healthy = BackgroundGet(self)\n\n # In the HYD-2063 case, there are now two HTTP GET handlers subscribed to the\n # TX messages for our host, so when we send a message, it might go to the healthy\n # one, or it might go to the unhealthy one. Send ten messages to give a decent\n # chance that if they all go to the right place then there isn't a bug.\n message_count = 10\n for i in range(0, message_count):\n sent_fresh_message = {\n \"fqdn\": self.CLIENT_NAME,\n \"type\": \"DATA\",\n \"plugin\": \"test_messaging\",\n \"session_id\": healthy_session_id,\n \"session_seq\": i,\n \"body\": None,\n }\n self._send_one_amqp(sent_fresh_message)\n # Avoid bunching up the messages so that they have a decent\n # chance of being sent to different handlers if there are\n # multiple handelrs in flight\n time.sleep(0.1)\n\n self.assertListEqual(zombie.messages, [])\n healthy_messages = len(healthy.messages)\n for attempt in range(9):\n messages = BackgroundGet(self).messages\n healthy_messages += len(messages)\n if healthy_messages >= message_count or not messages:\n break # retrieved them all, or a GET timeout\n\n self.assertEqual(healthy_messages, message_count)", "async def test_get_tasks(self):\n await self.populate_test_data() # Populate data in foglamp.scheduled_processes\n\n scheduler = Scheduler(_address, _m_port)\n await scheduler.start()\n\n # declare _scheduler task\n interval_schedule = IntervalSchedule()\n interval_schedule.name = 'get_tasks'\n interval_schedule.process_name = \"sleep5\"\n interval_schedule.repeat = datetime.timedelta(seconds=1)\n interval_schedule.exclusive = False\n\n await scheduler.save_schedule(interval_schedule)\n\n await asyncio.sleep(15)\n\n # Assert running tasks\n tasks = await scheduler.get_tasks(\n where=[\"state\", \"=\", int(Task.State.INTERRUPTED)])\n assert not tasks\n\n tasks = await scheduler.get_tasks(\n where=[\"end_time\", \"=\", 'NULL'])\n assert tasks\n\n tasks = await scheduler.get_tasks(limit=50)\n states = [int(task.state) for task in tasks]\n\n assert len(tasks) > 1\n assert int(Task.State.RUNNING) in states\n assert int(Task.State.COMPLETE) in states\n\n tasks = await scheduler.get_tasks(1)\n assert len(tasks) == 1\n\n tasks = await scheduler.get_tasks(\n where=[\"state\", \"=\", int(Task.State.RUNNING)],\n sort=[[\"state\", \"desc\"]], offset=50)\n assert not tasks\n\n tasks = await scheduler.get_tasks(\n where=[\"state\", \"=\", int(Task.State.RUNNING)],\n sort=[[\"state\", \"desc\"], [\"start_time\", \"asc\"]])\n assert tasks\n\n tasks = await scheduler.get_tasks(or_where_list=[[\"state\", \"=\", int(Task.State.RUNNING)], \\\n [\"state\", \"=\", int(Task.State.RUNNING)]])\n assert tasks\n\n tasks = await scheduler.get_tasks(and_where_list=[[\"state\", \"=\", int(Task.State.RUNNING)], \\\n [\"state\", \"=\", int(Task.State.RUNNING)]])\n assert tasks\n\n await self.stop_scheduler(scheduler)", "def health_check():\n printed_something = False\n\n job_checks = {}\n job_names = []\n for job in config.enabled_jobs:\n spec = nomad.parse(get_job(job.template))\n printed_something |= bool(nomad.check_events_and_logs(job.name))\n for service, checks in nomad.get_health_checks_from_spec(spec):\n if not checks:\n log.warn(f'service {service} has no health checks')\n continue\n job_checks[service] = checks\n job_names.append(job.name)\n printed_something |= nomad.wait_for_service_health_checks(consul, job_names, job_checks, nowait=True)\n\n if printed_something:\n log.error('Problems detected; see logs above.')\n sys.exit(1)\n else:\n log.info('No problems detected.')", "def is_alive():\n return jsonify({'message': 'Service is alive'}), 200", "def examine_job(self):\n if self.data is None:\n print(\"Could not download job id\", self.job_id)\n return\n self.duration = self.read_data(\"duration\")\n self.between_commands = self.read_data(\"between_commands\")\n\n print(\"---\")\n print(\"test_id: {}\".format(self.job_id))\n print(\"duration:\")\n Job.print_results(self.duration)\n print(\"between_commands:\")\n Job.print_results(self.between_commands)\n print(\"\")", "def test_api_get(httpretty, new_job):\n url = 'https://salesforce/services/async/34.0/job/THEJOBID'\n httpretty.register_uri('GET', url, status=200, body=b'some xml and stuff')\n response = new_job.request('get', url, expected_response=200)\n assert response == b'some xml and stuff'", "def get_status(job_key):\n job = Job.fetch(job_key, connection=conn)\n\n logs_url = \"{}{}/runner/logs/{}\".format(request.url_root, API_VERSION, job_key)\n status_dict = {\"status\": \"\", \"logs_url\": logs_url}\n return_code = 200\n if job.is_finished:\n status_dict['status'] = \"success\"\n return_code = 200\n elif job.is_failed:\n status_dict['status'] = \"terminal\"\n return_code = 400\n else:\n status_dict['status'] = \"running\"\n status_dict['logs_url'] = \"\"\n return_code = 202\n\n return jsonify(status_dict), return_code", "def get_async_job_status(self, job_id, batch=False):\n path = '%s' % job_id\n return self.make_request(path, 'GET', batch=batch)", "def __await_helms_installation(self, job_id, expected_services_count):\n end_waiting = datetime.now().timestamp() + self.TIMEOUT_MIN * 60 * 1000\n curr_status = self.helm_results.get(job_id)\n while datetime.now().timestamp() <= end_waiting:\n curr_status = self.helm_results.get(job_id, {\"services\": []})\n if expected_services_count != len(curr_status[\"services\"]):\n time.sleep(1.)\n else:\n self.helm_results.pop(job_id)\n return curr_status\n self.helm_results.pop(job_id)\n return curr_status", "def test_check_health(self):\n cache = DummyCache()\n ok, msg = cache.check_health()\n self.assertTrue(ok)", "def endace_get_search_status_command(app, args):\r\n\r\n jobid = args.get(\"jobid\")\r\n if len(re.findall(r'([0-9a-fA-F]+)', jobid)) == 5:\r\n # calling search status function of app instance\r\n result = app.get_search_status(jobid)\r\n\r\n # create entry context to return to Demisto\r\n output = {'Endace.Search.Response(val.JobID == obj.JobID)': result}\r\n table_header = ['Task', 'JobID', 'Status', 'Error', 'JobProgress', 'DataSources', 'TotalBytes']\r\n readable_output = tableToMarkdown('EndaceResult', result, headers=table_header, removeNull=False)\r\n raw_response = result\r\n\r\n return readable_output, output, raw_response\r\n\r\n else:\r\n raise ValueError(\"Wrong JOB ID provided\")", "def test_unknown_job(self):\n unknown_job = json.loads(BASE_JSON % ('null', 'null', 0, 1433166609))[0]\n self.assertEquals(self.query_api.get_job_status(unknown_job), UNKNOWN)", "def _get_jobs():\n return _get_bigquery_service().jobs()", "def health_check(cls):\n cb = cls.CACHE_BACKEND()\n return cb.health_check()", "def test_get_requests(self):\n response = self.client.open('/api/provisioning/port',\n method='GET')\n self.assert200(response, \"Response body is : \" + response.data.decode('utf-8'))", "def test_successful_resources(self):\n\n url = '/%s/jobs/%i/' % (self.api, self.job.id)\n response = self.client.generic('GET', url)\n self.assertEqual(response.status_code, status.HTTP_200_OK, response.content)\n\n result = json.loads(response.content)\n\n self.assertEqual(result['resources']['resources']['cpus'], 1.0)\n self.assertEqual(result['resources']['resources']['mem'], 128.0)\n self.assertEqual(result['resources']['resources']['disk'], 10.0)\n\n url = '/%s/jobs/%i/' % (self.api, self.job2.id)\n response = self.client.generic('GET', url)\n self.assertEqual(response.status_code, status.HTTP_200_OK, response.content)\n\n result = json.loads(response.content)\n\n self.assertEqual(result['resources']['resources']['cpus'], 1.0)\n self.assertEqual(result['resources']['resources']['mem'], 1024.0)\n self.assertEqual(result['resources']['resources']['disk'], 1040.0)", "def test_call_second_time(self, query_repo_url, get_credentials, valid_revision, get):\n # Making sure the cache is filled so we don't depend on the order of the tests.\n query_jobs.JOBS_CACHE[(\"try\", \"146071751b1e\")] = json.loads(JOBS_SCHEDULE)\n self.assertEquals(\n self.query_api._get_all_jobs(\"try\", \"146071751b1e\"),\n json.loads(JOBS_SCHEDULE))\n # _get_all_jobs should return its value directly from\n # cache without calling get\n assert get.call_count == 0", "def test_heartbeat(self):\n pass", "def test_polljobs_finished(mock_status):\n\n jobs = {\n \"lbowconf\": {\n \"test-machine-queue-slots\": 2,\n \"test-machine-queue-max\": 4\n },\n \"jobone\": {\n \"resource\": \"test-machine\",\n \"laststatus\": \"Running\",\n \"scheduler\": \"LSF\",\n \"jobid\": \"123456\"\n },\n \"jobtwo\": {\n \"resource\": \"test-machine\",\n \"laststatus\": \"Queued\",\n \"scheduler\": \"LSF\",\n \"jobid\": \"123456\"\n },\n \"jobthree\": {\n \"resource\": \"test-machine\",\n \"laststatus\": \"Submit Error\",\n \"scheduler\": \"LSF\",\n \"jobid\": \"123456\"\n },\n \"jobfour\": {\n \"resource\": \"test-machine\",\n \"laststatus\": \"Waiting Submission\",\n \"scheduler\": \"LSF\",\n \"jobid\": \"123456\"\n },\n \"jobfive\": {\n \"resource\": \"test-machine\",\n \"laststatus\": \"Finished\",\n \"scheduler\": \"LSF\",\n \"jobid\": \"123456\"\n },\n \"jobsix\": {\n \"resource\": \"test-machine\",\n \"laststatus\": \"Complete\",\n \"scheduler\": \"LSF\",\n \"jobid\": \"123456\"\n }\n }\n\n mock_status.return_value = \"Finished\"\n _polljobs(jobs, False)\n\n assert mock_status.call_count == 2, \\\n \"Should only be polling running and queued jobs\"\n assert jobs[\"jobone\"][\"laststatus\"] == \"Finished\"\n assert jobs[\"jobtwo\"][\"laststatus\"] == \"Finished\"\n assert jobs[\"lbowconf\"][\"test-machine-queue-slots\"] == \"0\"", "def get(self, job_id):\n\n if job_id:\n status = {\"state\": self.runner_service.status(job_id)}\n else:\n # TODO: Update the correct status for all jobs; the filtering in jobrunner doesn't work here.\n all_status = self.runner_service.status_all()\n status_dict = {}\n for k, v in all_status.iteritems():\n status_dict[k] = {\"state\": v}\n status = status_dict\n\n self.write_json(status)", "def check_job_status(self, jobid=None):\n\n if jobid is None:\n if hasattr(self, 'current_job'):\n jobid = self.current_job\n else:\n jobid = self.current_job\n\n response = self._request(\n 'GET', CosmoSim.QUERY_URL + '/{}'.format(jobid) + '/phase',\n auth=(self.username, self.password), data={'print': 'b'},\n cache=False)\n\n log.info(\"Job {}: {}\".format(jobid, response.content))\n return response.content", "def test_get_node_state_servicelight(self):\n pass", "async def healthcheck(self):\n for service in self.services:\n await service.healthcheck()" ]
[ "0.72830915", "0.6839322", "0.6632437", "0.65635777", "0.63915956", "0.63915956", "0.6313089", "0.6205987", "0.61734414", "0.61597115", "0.614555", "0.6124894", "0.6102056", "0.6053104", "0.604147", "0.60116535", "0.59794265", "0.59655803", "0.5928916", "0.5915668", "0.5885323", "0.5857235", "0.58556134", "0.58373505", "0.5816909", "0.57968056", "0.5785838", "0.5765063", "0.57615423", "0.5757878", "0.5733725", "0.57069355", "0.569865", "0.5693183", "0.5688069", "0.56750405", "0.5669429", "0.5668635", "0.56678736", "0.56483364", "0.5643667", "0.5632118", "0.5629496", "0.5624704", "0.56232345", "0.56152", "0.5612347", "0.5611951", "0.5606881", "0.5605049", "0.559661", "0.55929786", "0.5586979", "0.5573915", "0.5562614", "0.5558501", "0.55583656", "0.55582106", "0.55530983", "0.55522573", "0.5549105", "0.5532826", "0.5527477", "0.55271035", "0.5515509", "0.5514171", "0.5504623", "0.5494198", "0.54941076", "0.54891807", "0.5481421", "0.5472823", "0.54696876", "0.54645175", "0.54534096", "0.5449778", "0.5446164", "0.54436475", "0.54424524", "0.5430866", "0.5426296", "0.5426012", "0.5425647", "0.54231334", "0.5419315", "0.5416429", "0.5416268", "0.5411952", "0.54045695", "0.53990054", "0.5398978", "0.5393232", "0.5363236", "0.5361569", "0.53586495", "0.5355155", "0.53518826", "0.53512263", "0.5344404", "0.53424305" ]
0.7425097
0
Retrieves the list of all User objects
def get_all_users(): users = [] for mv in storage.all("User").values(): users.append(mv.to_dict()) return jsonify(users)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_all_users():\n return Users.query.all()", "def get_all_users():", "def get_all_users():\n return User.query.all()", "def get_all_users(self):\n \n sql = \"select * from users\"\n return self._query_all(sql)", "def get_users(self):\n users = []\n page = 1\n while not len(users) % 100:\n users += self._get('/users?{0}'.format(urllib.urlencode({'per_page': 100, 'page': page})))\n if not users:\n break\n page += 1\n return users", "def get(self):\n return get_all_users()", "def get(self):\n return get_all_users()", "def get(self):\n return get_all_users()", "def get(self):\n return get_all_users()", "def listUsers(self):\n return self._client.listUsers()", "def all_users(cls):\n return UsersModel.query.all()", "def get_users(self):\r\n\t\tlogger.debug(\"Fetch users\")\r\n\t\t\r\n\t\treturn login.get_users()", "def get_users():\n\n return User.query.all() # [<User user_id=1 fname=Alice lname=Apple>]", "def user_list(request):\r\n params = request.params\r\n order = params.get('order', None)\r\n limit = params.get('limit', None)\r\n user_list = UserMgr.get_list(order=order, limit=limit)\r\n ret = {\r\n 'count': len(user_list),\r\n 'users': [dict(h) for h in user_list],\r\n }\r\n return _api_response(request, ret)", "def get_all_users():\n return UserModel.query.filter_by(deleted_at=None)", "def user_list(ctx):\n data = ctx.obj.get_all_users()\n output_json_data(data)", "def get_users():\n coll = data_access.get_user_collection()\n users = [User(**u) for u in coll.find()]\n return users", "def user_list():\n users = User.objects.all()\n return {\"users\": users}", "def get_all_users():\n return session.query(User).all()", "def get_all_user():\n user = UserModel.objects()\n return jsonify(user), 200", "def get(self):\n\n offset = 0\n limit = Config.get_page_limit()\n args = request.args\n try:\n offset = request.args['offset']\n except Exception:\n pass\n\n try:\n limit = request.args['limit']\n except Exception:\n pass\n\n return self.get_request_handler(request.headers).get_all_users(offset=offset, limit=limit)", "def get_all_users():\n db = api.db.get_conn()\n return list(db.users.find({}, {\"_id\": 0, \"password_hash\": 0}))", "def get(self):\n return User_Model.query.all()", "def get_users(self, query_args={}):\n endpoint = '/v3/educator/users'\n result = self.request(endpoint, query_args)\n\n users = []\n for data in result.response:\n user = User(data)\n users.append(user)\n\n return users", "def get_users(self):\n fields = ['name', ]\n return self.get_data(\"myUsers\", fields)", "def get_user_list():\n\tudb = UserPageDB()\n\ttry:\n\t\tusers = udb.user_list()\n\t\treturn UserList([_transform_user(u) for u in users])\n\tfinally:\n\t\tudb.close()", "def get_all_users():\n return jsonify(admin.get_all_users(current_app.scoped_session()))", "def list_users(self):\n raise NotImplementedError", "def get_user_list():\n response = []\n for user in mongo.db.users.find():\n user[\"_id\"] = str(user[\"_id\"])\n response.append(user)\n return response", "def list(cls, context, filters=None, limit=3000, marker=1,\n sort_key='id', sort_dir='asc'):\n #import pdb; pdb.set_trace()\n db_users = cls.dbapi.get_user_list(\n context, limit=limit, marker=marker, sort_key=sort_key,\n sort_dir=sort_dir, filters=filters)\n total = db_users.total\n return [User._from_db_object(cls(context), obj) for obj in db_users], total", "def get_all_users(self):\n query = \"SELECT * FROM users\"\n self.cursor.execute(query)\n result = self.cursor.fetchall()\n return result", "def get_all_users(db):\n return list(db['user'].find())", "def get_users(self):\n return self.get_all_dbusers()", "def listUsers(self):\n return tuple(User.create({'name':name},self._modelDataManager) for name in self.pm_getUserManager().listUsers())", "def get_users(self, *args, **kwargs):\n\n users_data = api.get_users(\n *args,\n api_key=self.__creds.api_key_v2, \n **kwargs)\n return [en.User(creds=self.__creds, **user_data) for user_data in users_data]", "def get_users():\n return db.fetch_users()", "def get_users(self) -> List[User]:\n with self.client.create_session() as session:\n users = session.query(RDSUser).filter(RDSUser.is_active.is_(True)).all()\n\n return [self._build_user_from_record(user_record=user) for user in users]", "def get_users(self) -> List[Dict[str, Any]]:\n users = self.user_manager.get_users()\n return [\n {\n 'user_id': user.user_id,\n 'username': user.username,\n 'created_at': user.created_at.isoformat(),\n }\n for user in users\n ]", "def list_users(self):\n return self.get_admin(\"users\")", "def get_users(self):\n return self.execute(TABELLE['users']['select']['all'])", "def list_users():\n check_admin()\n results = User.query.order_by(-User.id)\n return render_template('user_list.html', users=results)", "def getUsers(self) -> List[bbUser.bbUser]:\n return list(self.users.values())", "def getAllUsers(self):\n ret = []\n\n users = User.getAll()\n\n for e in users:\n ret.append(e.getAsDict())\n\n return {\"users\" : ret}", "def get_users(self, params=None):\n url = 'users'\n if params:\n url += '?%s' % urllib.urlencode(params)\n resp, body = self.get(url)\n self.expected_success(200, resp.status)\n body = json.loads(body)\n return service_client.ResponseBodyList(resp, body['users'])", "def fetch_all_users():\n url = \"{}/workspace/{}/users\".format(V1_API_URL, WORKSPACE_ID)\n responses = requests.get(url, headers=HEADERS)\n return [\n {\n \"acronym\": user[\"name\"].lower(),\n \"clockify_id\": user[\"id\"],\n \"email\": user[\"email\"].lower(),\n }\n for user in responses.json()\n ]", "def list_users(self, limit=None, marker=None):\n return self._user_manager.list(limit=limit, marker=marker)", "def display_users(cls):\n return cls.user_list", "async def list_users(self, ctx):\n \n path = \"Users\"\n headers = {\n 'accept': 'application/json'\n }\n response = send_request(method=\"get\", path=path, headers=headers)\n users = []\n for page in response:\n users.append(f\"**{page['Name']}**: ``{page['Id']}``\")\n log.debug(users)\n\n embed = embeds.make_embed(ctx=ctx, title=\"List Users\", image_url=\"https://emby.media/resources/logowhite_1881.png\")\n\n await LinePaginator.paginate([line for line in users], ctx, embed, restrict_to_user=ctx.author)", "def get_users(self):\n return get_users(self['__store'].db, self)", "def _load_users(self) -> List[Dict]:\n try:\n api_call = self.web_client.api_call('users.list')\n if api_call.get('ok'):\n return api_call.get('members')\n except Exception:\n LOGGER.exception('Cannot get users')\n raise", "def user_list():\n if session['user_admin'] == False:\n abort(403)\n\n # Retrieve all Users\n sqa_sess = sqa_session()\n users = sqa_sess.query(User).all()\n\n return render_template('admin/user_list.html', users=users)", "def get_users():\n users = storage.all('User')\n users_list = []\n for user in users.values():\n users_list.append(user.to_dict())\n return jsonify(users_list), 200", "def get_all_users(self, path_prefix='/', marker=None, max_items=None):\r\n params = {'PathPrefix' : path_prefix}\r\n if marker:\r\n params['Marker'] = marker\r\n if max_items:\r\n params['MaxItems'] = max_items\r\n return self.get_response('ListUsers', params, list_marker='Users')", "def list_users():\n return json_response(\n status=200,\n response_data={\n \"success\": True,\n \"data\": {\n \"users\": [user.serialize() for user in User.all()]\n }\n }\n )", "def list_users(self):\n _url = \"http://\" + self.host_ip + \":35357/v2.0/users\"\n _body = None\n _headers = {'Content-type': 'application/json',\n 'x-auth-token': self.cloud_admin_info['token_project']}\n\n response = self.request(\"GET\", _url, _headers, _body)\n if response is None:\n LOG_OBJ.error(\" no response from Server\")\n return response\n if response.status not in [200, 201, 202, 203, 204]:\n LOG_OBJ.error(\n \"get user list Failed with status %s \" %\n response.status)\n return response.status\n\n output = json.loads(response.data)\n LOG_OBJ.info(\"users List : %s\")\n return output[\"users\"]", "def users_list():\n users = User.query.all()\n serialized_objects = users_schema.dumps(users, sort_keys=True, indent=4)\n\n return Response(\n response=serialized_objects,\n status=http_status.OK,\n mimetype=\"application/json\"\n )", "def get_users():\n return jsonify([\n users.to_dict()\n for users in models.storage.all('User').values()\n ])", "def list(self, *args, **kwargs):\n users = User.objects.filter(is_superuser=False)\n return self.list_by(users, self.serializer_class)", "def list_user():\n\tbegin = 0\n\tlength = 25\n\ttry:\n\t\tif request.json != None:\n\t\t\tbegin = int(request.json.get('begin', 0))\n\t\t\tlength = int(request.json.get('length', 25))\n\texcept:\n\t\tabort(403)\n\tif length > 100 :\n\t\tlength = 100\n\tuserList = User.list(begin, length)\n\tif userList == None:\n\t\tabort(400)\n\treturn jsonify({'users': map(lambda(e): e.output(), userList), 'begin': begin, 'length': len(userList)})", "def all_users():\n\n users = crud.get_users()\n\n return render_template('all_users.html', users=users)", "def list_users(self, uid):\n uid = self._check_uid(uid)\n user_data = self._router_request(\n self._make_request_data(\n 'getUserList',\n data=dict(\n uid=uid\n )\n )\n )\n return user_data['data']", "def show_users(self):\n\n u = User(self.settings)\n users_list = u.find_users()\n\n # transform the results in a \"jsonifiable\"-form\n json_results = []\n for user in users_list:\n json_results.append(user.to_json())\n\n # return\n return json_results", "def _get_users_list(self):\n return self.users['user_id'].tolist()", "def get_users():\n users = User.query.order_by(User.id).all()\n users = {user.id: user.username for user in users}\n\n response = jsonify({\"success\": True, \"users\": users})\n\n return response", "def users_get(): # noqa: E501\n base.check_session()\n ret = []\n for u in users.values():\n ret.append(_cleanuser(u))\n return ret", "def users(self) -> list[User]:\n return self._connection.users", "def get_user_list():\r\n session = tables.get_session()\r\n if session is None:\r\n return {'success': False, 'reason': 'failed'}\r\n try:\r\n user_account = UserAccount()\r\n user_account.find_all_user(session)\r\n except SQLAlchemyError as err:\r\n LOGGER.error('Get user details failed: %s', err)\r\n return {'success': False, 'reason': 'failed'}\r\n finally:\r\n session.close()\r\n return {'success': True}", "def get_users():\n return Response(f\"{User.get_all_users()}\", 200, mimetype='text/plain')", "def get_users(self):\n res = self.conn.cursor().execute('SELECT id,email,username FROM users')\n return res.fetchall()", "def get_users_list_full(self, session):\n\n users = session.query(\n User.chat_id,\n User.is_banned,\n User.username,\n User.first_name,\n User.last_name,\n User.time_registered\n ).filter(User.is_admin==False).all()\n return users", "def user_list(request_dict):\n users = User.query.all()\n users_list = list()\n for user in users:\n users_list.append(user)\n\n return JSONTools.user_list_reply(users_list)", "def fetch_all_users():\n users = find_users()\n return to_response(users, \"No users\")", "def all_users(self):\n\n cur = self.db.cursor()\n cur.execute(\n \"\"\"SELECT user_id, username, phone, email, role, date_created \n FROM users\"\"\")\n \n user_from_db = cur.fetchall()\n if cur.rowcount >= 1: \n resp = self.serialize_user(user_from_db) \n return resp\n return None", "def do_user_list(cs, args):\n _, users = cs.users.list()\n fields = ['user_id', 'username', 'email', 'realname', 'comment']\n utils.print_list(users, fields, sortby=args.sortby)", "def show_users():\n users_list = []\n all_users = storage.all('User')\n for obj in all_users.values():\n users_list.append(obj.to_dict())\n return jsonify(users_list)", "def user_list(self):\n self.cur.execute(\"SELECT username FROM users\")\n users = []\n for username in self.cur.fetchall():\n users.append(username[0])\n return users", "def users(self, site = None):\r\n uids = self.user_ids()\r\n if uids:\r\n users = Account._byID(uids, True, return_dict = False)\r\n return [self.ajax_user(u) for u in users]\r\n else:\r\n return ()", "def user_list():\n\n users = User.query.all()\n\n return render_template(\"user_list.html\", users=users)", "def list_users():\n if not check_content_type():\n return jsonify(status=CONTENT_TYPE_ERROR)\n reqdata = request.json\n if not check_token(reqdata[\"token\"]):\n return jsonify(status=TOKEN_ERROR)\n users = db.session.query(User).all()\n resdata = []\n for user in users:\n resdata.append({\"id\" : user.id, \"login\" : user.login, \"password\" : user.hash_password})\n return jsonify(data=resdata, status=OK_STATUS)", "def get_user_list(self, connection):\n http = get_web_service(connection)\n try:\n req = http.request('GET', connection[\"url\"] + '/users/?_format=json')\n data = json.loads(req.data.decode('utf-8'))\n # print(json.dumps(data, indent=4, sort_keys=True))\n return data\n except urllib3.exceptions.HTTPError as e:\n print(\"Connection error\")\n print(e)", "def user_list():\n\n users = User.query.all()\n return render_template(\"/user_list.html\", users=users)", "def get_users():\n users = User.query # no need to order\n users_data = [user.to_dict() for user in users.all()]\n return jsonify(users=users_data)", "def get(self):\n\n users = [marshal(user, user_fields) for user in models.ExerciseUser.select()]\n\n return users", "def get_all_users(session):\n\n return session.query(User).all()", "def get_all_users():\n token = request.headers.get('token')\n\n # Token Validation\n token_valid, response = is_token_valid(token)\n if not token_valid:\n return response\n token_username = response\n\n # Privilege handling\n if token_username != 'admin':\n return jsonify({'message': \"You aren't allowed to access this\"}), 404\n\n return jsonify(list(Users.values())), 200", "def user_list():\n\n users = User.query.all()\n return render_template(\"user_list.html\", users=users)", "def user_list():\n\n users = User.query.all()\n return render_template(\"user_list.html\", users=users)", "def user_list():\n\n users = User.query.all()\n return render_template(\"user_list.html\", users=users)", "def user_list():\n\n users = User.query.all()\n return render_template(\"user_list.html\", users=users)", "def user_list():\n\n users = User.query.all()\n return render_template(\"user_list.html\", users=users)", "def user_list():\n\n users = User.query.all()\n return render_template(\"user_list.html\", users=users)", "def view_list_users(self, user):\r\n return user.realm._users.keys()", "def list_users(self, instance, limit=None, marker=None):\n return instance.list_users(limit=limit, marker=marker)", "def queryset(self):\n User = get_user_model()\n return User.objects", "def users(self):\r\n return resource.Users(self)", "def getAllUsers(self):\r\n return [(ind, user) for ind, user in enumerate(self.users)]", "def do_get_all_users(self, *args):\n self.user_data = self.connection_obj.get_all()\n self.__class__.print_func(self, self.user_data)", "def user_list():\n\n users = User.query.all()\n \n return render_template(\"user_list.html\", users=users)", "def users_get(self) -> Dict[str, list]:\n self.__logger.debug('Eva.users_get called')\n return self.__http_client.users_get()", "def get(self, request):\n all_users = UserModel.objects.all()\n all_users_serializer = UserSerializer(all_users, many=True)\n return Response(all_users_serializer.data)" ]
[ "0.83742595", "0.828774", "0.8259821", "0.8236466", "0.8208948", "0.81540406", "0.81540406", "0.81540406", "0.81540406", "0.81478584", "0.8116875", "0.81126046", "0.80643564", "0.8059964", "0.8054353", "0.8028967", "0.8026462", "0.7979344", "0.79498196", "0.7948819", "0.79406506", "0.79326934", "0.7921447", "0.791617", "0.79123974", "0.79088104", "0.790184", "0.7858222", "0.78425026", "0.783902", "0.78151643", "0.7811589", "0.78087384", "0.77970207", "0.77904963", "0.7783935", "0.7765289", "0.7736639", "0.77354395", "0.7721347", "0.770708", "0.77044547", "0.77010804", "0.76836175", "0.76703775", "0.76584864", "0.7642113", "0.7629955", "0.7621009", "0.762012", "0.7619777", "0.7619495", "0.7613716", "0.7612252", "0.7602494", "0.7589505", "0.75857276", "0.7584131", "0.75681806", "0.7562876", "0.7543084", "0.75397485", "0.75340176", "0.7524388", "0.75239235", "0.75160056", "0.7511962", "0.7509436", "0.75010645", "0.7495025", "0.7489207", "0.74877626", "0.7473661", "0.74701196", "0.74652445", "0.74643433", "0.7461528", "0.74589074", "0.74579304", "0.74478203", "0.7443948", "0.74395716", "0.7435299", "0.74299836", "0.7408169", "0.7405792", "0.7405792", "0.7405792", "0.7405792", "0.7405792", "0.7405792", "0.73902994", "0.7386077", "0.73719615", "0.7370767", "0.73680377", "0.73654", "0.7361504", "0.73610437", "0.7359458" ]
0.7877465
27
Retrieves a User object
def get_user(user_id=None): user = storage.get("User", user_id) if user is None: abort(404) return jsonify(user.to_dict())
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get(self):\n\n user_id = get_jwt_identity()\n user = user_crud.get(user_id)\n if not user:\n abort(404, message=\"User not Found\")\n\n return user", "def get(self, user_id):\n return User.get(user_id)", "def get_user(self, user, instance=None):\n instance = self._get_resource(_instance.Instance, instance)\n return self._get(_user.User, user)", "def get_user(cls, user_id):\n try:\n return User.objects.get(pk=user_id)\n except User.DoesNotExist:\n return None", "def get_user(self, user_id):\n User = get_user_model()\n try:\n return User.objects.get(pk=user_id)\n except User.DoesNotExist:\n return None", "def get_user(self, *, params: Optional[dict] = None) -> \"resource_types.User\":\n\n return communicator.User(self.__requester).fetch(parameters=params)", "def get_user(self, object_id):\n return self.get_object(\"user\", object_id)", "def get_user(self, user_id):\n try:\n User = get_user_model()\n return User.objects.get(pk=user_id)\n except User.DoesNotExist:\n return None", "def get_user(self, user_id):\n return UserModel._default_manager.get(pk=user_id)", "def get_user(user_id):\n try:\n return UserModel.objects.get(id=user_id)\n except UserModel.DoesNotExist:\n return None", "def get_user(request: Request) -> User:\n\n return _check_and_extract_user(request)", "def getUser(self, resource):\n if isinstance(resource, int):\n resource = 'users/{0}'.format(resource)\n\n res = self.getRequest(resource)\n\n if res:\n user = vsdModels.User(**res)\n return user\n else:\n return None", "def get_user(self):\n if \"user\" not in self._data:\n self._data[\"user\"] = User.objects.get(pk=self.kwargs[\"user_id\"])\n return self._data[\"user\"]", "def get_user(self):\n\n user_session = self.get()\n if not user_session:\n return None\n\n us = ServiceLocator.resolve(ServiceLocator.USERS)\n return us.single(user_session.login)", "def get_object(self):\n return User.objects.get(username=self.request.user.username)", "def retrieve_user(cls, username):\n\t\tuser_detail = dbop.get_user(username)\n\t\treturn cls(**user_detail)", "def get_user(self) -> User:\n return self.__user", "def get_user(pk):\n user = UserService(user=pk).get_user_by_id()\n return CustomResponse(data=user).response()", "def get_user(user: User) -> User:\n if user.is_authenticated:\n return user\n else:\n return get_anonymous_user()", "def get(self, id):\n\t\ttry:\n\t\t\tflask_app.logger.debug('We are getting the user: %d', id)\n\t\t\treturn user_service.get(id)\n\t\texcept AssertionError as e:\n\t\t\tuser_space.abort(400, e.args[0], status = \"Could not get user\", statusCode = \"400\")\n\t\texcept Exception as e:\n\t\t\tuser_space.abort(500, e.args[0], status = \"Could not get user\", statusCode = \"500\")", "def get_user(self):\n if not self.is_valid():\n return None\n # error checking done in: clean_email\n # NOTE: all emails are stored in lower-case\n e = self.clean_email().lower()\n return User.objects.get(email=e)", "def get_user(self, u_id: int) -> Optional[Users]:\n try:\n user = self.session.query(Users).get(u_id)\n\n return user\n except Exception as excpt:\n self.session.rollback()\n print(f'Could not get user: {excpt}')\n\n return None", "def get_user(self, user_id):\n uri = 'users/' + user_id\n return self.make_request(uri)", "def get_user(self):\n try:\n return User.objects.get(id=self.user_id)\n except User.DoesNotExist:\n return AnonymousProfile()", "async def get(cls, user_id):\n try:\n user = await db.one(cls.SELECT_USER, user_id=user_id)\n except exceptions.NoResultFound:\n LOGGER.error(\"Could not find user=%s.\", user_id)\n raise DatabaseError\n except SQLAlchemyError as err:\n LOGGER.error(\"Failed to fetch user=%s. Error: %s\", user_id, err)\n raise DatabaseError\n\n return user", "def get(self, no):\n user = get_a_user(no)\n if not user:\n api.abort(404)\n else:\n return user", "def get_user(self, instance, name):\n return instance.get_user(name)", "def get_user(self, user_id=None):\n raise NotImplementedError", "def get_user(self, user_id):\n response = self._get_page_param('user', user_id).json()\n\n if not response:\n raise InvalidUserID\n\n return User(response)", "async def fetch_user(self, id: str):\n user = await self.http.get_user(id)\n return User(state=self.http, data=user)", "def get_user(self, user_id):\n oauth_user = OAuthioUser.objects.filter(user__id=user_id)\n if oauth_user.exists():\n return oauth_user.get().user", "def get_user(self, user_id: int) -> dict:\n user = self.call_method('getUser', user_id=user_id)\n return user", "def get_user_by_id(user_id):\n return User.query.get(user_id)", "def get_user():\n userdict = jsonify2(current_user.db_user, 'User')\n return current_app.bitjws.create_response(userdict)", "def getUser():\n username = post_param('username', '')\n if username == '':\n username = get_param('username', '')\n password = get_param('password', '')\n else:\n password = post_param('password', '')\n \n if username == '':\n return None\n else:\n return User(username, password)", "def me_get(): # noqa: E501\n s = base.check_session()\n return _cleanuser(s['user'])", "async def get_user(self, name=None, id=None) -> User:\n if name:\n return await self.get_user_by_username(name)\n if id:\n return await self.get_user_by_id(id)\n return None", "def get(user_id: int) -> User:\n try:\n user = User.objects.get(id=user_id)\n except User.DoesNotExist:\n logger.error(\n 'Getter(user_id = %d) in BaseUser throws User.DoesNotExist exception.' %\n user_id)\n raise NonUserException\n return user", "def user(self):\n u = self.user_info\n return self.user_model.get_by_id(u['user_id']) if u else None", "def get_user(user_id=None):\n users = storage.all('User')\n user = users.get('User' + \".\" + user_id)\n if user is None:\n abort(404)\n else:\n return jsonify(user.to_dict()), 200", "def get_user(user_id):\n user = storage.get(User, user_id)\n if user is None:\n abort(404)\n else:\n return jsonify(user.to_dict())", "def get_user(user_id):\n user = storage.get(\"User\", user_id)\n if user is None:\n abort(404)\n return user.to_dict()", "def get(cls,id):\n result = execute_query(\"\"\"SELECT * FROM Users Where username = ?\n \"\"\",\n [id])\n try:\n user = User(id,result[0][1])\n except Exception as e:\n return None\n \n return user", "def get_user(user_id):\n user = models.storage.get('User', user_id)\n if user is None:\n abort(404)\n return jsonify(user.to_dict())", "def getUser(self, name):\n return User.create(self.pm_getUserManager().getUser(self._unbox(name)),self._modelDataManager)", "def get_user(self):\n\n r = requests.get(\n self._url('/usermanagement/userinfo'),\n headers={'Authorization': self.token},\n proxies=self.proxy)\n r.raise_for_status()\n user = r.json()\n log.info('user {:s} currently logged in'.format(user['login']))\n\n return user", "def get_user(self, user_id):\n return self.my_get_user(self.get_all_dbusers(), user_id)", "def load_user():\n\n return User.query.get(int(id))", "def get_user_by_id(user_id):\n\n return User.query.get(user_id)", "def load_user(user_id):\n return models.UserModel.query.get(int(user_id))", "async def get_user(**kwargs: Any) -> UserModel:\n user = await UserModel.objects.get_or_none(**kwargs)\n\n if not user:\n raise NotFoundError(\"User not found.\")\n\n return user", "def get_user(self, user_id=None, nick=None):\n if user_id in self:\n return self[user_id]\n else:\n return User(self, user_id, nick=nick)", "def load_user(uid):\n return User.query.get(uid)", "def user(self):\n u = self.user_info\n return self.user_model.get_by_id(u['user_id']) if u else None", "def user(self, user_id=None):\r\n return users.User(self, user_id)", "def get_user_by_id(user_id):\n\n return User.query.get(user_id)", "def get_user_by_id(user_id):\n\n return User.query.get(user_id)", "def get_user_by_id(user_id):\n\n return User.query.get(user_id)", "def get_user_by_id(user_id):\n\n return User.query.get(user_id)", "def load_user(user_id):\n return User.query.get(user_id)", "def load_user(user_id):\n return User.query.get(user_id)", "def get(self, user_id):\n user = UserServices(public_id=user_id).get_an_item()\n if not user:\n api.abort(404)\n else:\n return user", "def get(id):\n return User.query.filter_by(id=id).first()", "def getUser(self, id : int) -> bbUser.bbUser:\n id = self.validateID(id)\n return self.users[id]", "def load_user(user_id):\n return app.user_models.query.get(int(user_id))", "def get_user(id):\n user = User.query.get(id)\n return user_schema.jsonify(user)", "def get_user(user_id):\n usr = storage.get(User, user_id)\n if usr:\n return jsonify(usr.to_dict())\n else:\n abort(404)", "def GetAppEngineUser(user_id):\n email_address = GetEmailAddress(user_id)\n if email_address:\n return users.User(email_address)\n else:\n return None", "def get_user(username):\n return Users.query.filter_by(username=username).first()", "def get_user(name):\n try:\n return User.objects.get(name=name)\n except ObjectDoesNotExist:\n raise ObjectDoesNotFound(\"There is no user '{}'.\".format(name))", "def get_user(self, username) -> User:\n raise NotImplementedError", "def get_user(session):\n\n # check if we already have a user id in the session, if so attempt to pull\n # the user data from mongo\n user_id = session.get('user_id')\n try:\n user = User.objects.get(pk=user_id)\n except db.DoesNotExist:\n pass\n else:\n return user\n\n # if we don't already have user details stored then check if we have an\n # access token retrieved via the oauth process\n access_token = session.get('access_token')\n if access_token is None:\n return None\n\n # build the request object\n headers = {'Authorization': 'OAuth '+ access_token[0]}\n req = urllib2.Request('https://www.googleapis.com/oauth2/v1/userinfo', None, headers)\n\n # make the request, catching any http errors\n try:\n res = urllib2.urlopen(req)\n except urllib2.URLError:\n return None\n else:\n user_data = json.loads(res.read())\n user = User(user_id=user_data['id'], email=user_data['email'], name=user_data['name'])\n if 'picture' in user_data:\n user.avatar = user_data['picture']\n else:\n user.avatar = 'https://lh3.googleusercontent.com/-XdUIqdMkCWA/AAAAAAAAAAI/AAAAAAAAAAA/4252rscbv5M/photo.jpg'\n user = user.save()\n return user", "def get_user(self, user_id):\n try:\n return Account.objects.get(pk=user_id)\n except Account.DoesNotExist:\n return None", "def get(self, username):\n return User.find_by_username_or_email(username)", "def get_user(self, validated_data):\n user = CustomUser.objects.get(pk=validated_data['user_id'])\n return user", "def get_user(cls, email=None, user_id=None):\n\n params = {'email': email, 'user_id': user_id}\n user_dict = cls._do_call(\n 'GET', cls.api_endpoint + 'users', params=params)\n return user_dict", "def load_user(user_id):\n return User.get_by_id(int(user_id))", "def load_user(user_id):\n return User.get_by_id(int(user_id))", "def load_user(user_id):\n return User.get_by_id(int(user_id))", "def load_user(user_id):\n return User.query.get(int(user_id))", "def load_user(user_id):\n return User.query.get(int(user_id))", "def load_user(user_id):\n return User.query.get(int(user_id))", "def load_user(user_id):\n return User.query.get(int(user_id))", "def load_user(user_id):\n return User.query.get(int(user_id))", "def load_user(user_id):\n return User.query.get(int(user_id))", "def get_user(user_ref):\n UserModel = get_user_model()\n try:\n return UserModel.objects.get(username=user_ref)\n except UserModel.DoesNotExist:\n return UserModel.objects.get(email=user_ref)", "def get_user(self, token: str) -> Optional[User]:", "def get_user(self, token: str) -> Optional[User]:", "def load_user(user_id):\r\n return User.query.get(int(user_id))", "def get_user(self, id: utils.Intable) -> User | None:\n id64 = make_id64(id=id, type=Type.Individual)\n return self._connection.get_user(id64)", "def user(self, user_id):\r\n return users.User(self, user_id)", "def user(self, user_id):\r\n return users.User(self, user_id)", "def get_object(self):\n try:\n self.object = User.objects.get(username= self.request.user)\n print(self.object)\n return self.object\n except:\n return None", "def get_object(self):\n try:\n self.object = User.objects.get(username= self.request.user)\n print(self.object)\n return self.object\n except:\n return None", "def user(email):\r\n return User.objects.get(email=email)", "def get_object(self):\n return get_object_or_404(User, id__iexact=self.request.user.id)", "def get_object(self):\n return get_object_or_404(User, id__iexact=self.request.user.id)", "def load_user(user_id):\n return Users.query.get(user_id)", "def get_user_from_id(user_id):\n return Users.query.filter_by(id=user_id).first()", "def load_user(id):\n user = db.session.query(User).filter(User.id == id).first()\n return user" ]
[ "0.81341594", "0.8094171", "0.8086876", "0.7977499", "0.7955156", "0.7948816", "0.79354584", "0.7924837", "0.7887365", "0.78764534", "0.7852467", "0.7792065", "0.7780637", "0.77725995", "0.7761683", "0.7751996", "0.77192086", "0.7679085", "0.76748264", "0.766108", "0.76554185", "0.765015", "0.7623932", "0.76210916", "0.76118785", "0.76078314", "0.7567133", "0.75661314", "0.75601876", "0.7557815", "0.7554832", "0.755323", "0.7547413", "0.7532461", "0.75256014", "0.7518435", "0.75162065", "0.75154513", "0.75143903", "0.7512784", "0.7509097", "0.7496731", "0.7495193", "0.74945056", "0.74825644", "0.7473871", "0.74728304", "0.7467881", "0.7466257", "0.7465854", "0.74614894", "0.74365044", "0.7433592", "0.7429014", "0.74261534", "0.7420386", "0.7420386", "0.7420386", "0.7420386", "0.74180204", "0.74180204", "0.7417004", "0.74158216", "0.7405083", "0.7403433", "0.7401862", "0.7393255", "0.73930764", "0.7390801", "0.73850065", "0.7375296", "0.73740077", "0.73485833", "0.73470575", "0.734228", "0.73332", "0.73289543", "0.73289543", "0.73289543", "0.73282", "0.73282", "0.73282", "0.73282", "0.73282", "0.73282", "0.7326867", "0.7321246", "0.7321246", "0.73183393", "0.7317856", "0.7316575", "0.7316575", "0.7313169", "0.7313169", "0.73107344", "0.7310638", "0.7310638", "0.7300447", "0.72978485", "0.72780114" ]
0.751007
40
Deletes a User object
def delete_user(user_id=None): user = storage.get("User", user_id) if user is None: abort(404) else: storage.delete(user) storage.save() return jsonify({}), 200
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def delete_user(self, user):\n self.delete(user)", "def delete_user():", "def delete_user():\n #TODO user delete\n pass", "def delete_user(self):\n\n User.user_list.remove(self)", "def delete_user(self):\n User.user_list.remove(self)", "def delete_user(self):\n User.user_list.remove(self)", "def delete_user(self):\n User.user_list.remove(self)", "def delete_user(self, user):\n # noinspection PyUnresolvedReferences\n self.delete(user)", "def delete_user(self, user):\n self.execute(TABELLE['id_users'][\"delete\"], user[\"id\"])", "def delete(self):\n\n user_id = get_jwt_identity()\n user = user_crud.get(user_id)\n if not user:\n abort(404, message=\"User not Found\")\n all_tokens = auth_crud.get_user_tokens(user_id)\n tokens = [token.to_dict() for token in all_tokens]\n for token in tokens:\n auth_crud.revoke_token(token['id'], user_id)\n user = user_crud.remove(user_id)\n\n return {'msg': 'User Removed'}", "def delete(self, user_id):\r\n return delete_user(request, user_id)", "def delete(user_id: int):\n usr = get_by_id(user_id)\n if not usr:\n raise UserNotFound\n\n db.session.delete(usr)\n db.session.commit()", "def delete_user(self, user_id):\n\n # ask the model to delete the user\n um = User(self.settings)\n status = um.delete(user_id)\n\n # return\n return status", "def delete(user_id):\n assert isinstance(user_id, ObjectId)\n\n User.objects(id=user_id).delete()", "def user_delete(user_id=None):\n obj = storage.get(\"User\", user_id)\n if obj is None:\n abort(404)\n storage.delete(obj)\n storage.save()\n return jsonify({}), 200", "def delete_user(user_id):\n\n user = User.query.get(user_id)\n db.session.delete(user)\n db.session.commit()\n return", "def user_delete(user_id):\n user = storage.get('User', user_id)\n if user is None:\n abort(404)\n user.delete()\n storage.save()\n return jsonify({}), 200", "def delete_user(id):\n pass", "def delete_user(user_id=None):\n obj = storage.get('User', user_id)\n if obj is None:\n abort(404)\n else:\n storage.delete(obj)\n storage.save()\n return jsonify({}), 200", "def user_id_delete(user_id):\n user = storage.get(\"User\", user_id)\n\n if user is None:\n abort(404)\n user.delete()\n del user\n return make_response(jsonify({}), 200)", "def delete_user(user_id):\n user = storage.get(User, user_id)\n if user is None:\n abort(404)\n storage.delete(user)\n storage.save()\n return jsonify({}), 200", "def delete(self):\n data = UserRegister.parser.parse_args()\n user = UserModel.find_by_username(data['username'])\n\n if user:\n user.delete_from_db()\n else :\n return {'message': 'User not found!'} , 204\n\n return {'message': 'User deleted'},202", "def delete_user(self, user):\n name = utils.get_name(user)\n self._user_manager.delete(name)", "def delete_user(user_id):\n user = User.query.get_or_404(user_id)\n db.session.delete(user)\n db.session.commit()\n\n return redirect('/')", "def delete_user(user_id):\n\n user = User.query.get_or_404(user_id)\n db.session.delete(user)\n db.session.commit()\n\n return redirect(\"/users\")", "def delete_user(self, _id):\n return self.make_request(\"DELETE\", \"users/\"+_id, {})", "def delete(self, user_id):\n\n user = User.objects.get_or_404(public_id=user_id)\n return user.delete()", "def delete_user(user_id):\n usr = storage.get(User, user_id)\n if usr:\n usr.delete(), storage.save()\n return {}\n else:\n abort(404)", "def delete(self, user_id):\n return delete_user(user_id)", "def delete_user(user_id):\n user = User.query.get_or_404(user_id)\n db.session.delete(user)\n db.session.commit()\n\n return redirect(\"/users\")", "def delete_user(user_id):\n user_obj = storage.get(\"User\", user_id)\n if user_obj:\n storage.delete(user_obj)\n storage.save()\n return jsonify({}), 200\n else:\n abort(404)", "def delete_user(user_id):\n temp = models.storage.get('User', user_id)\n if temp is None:\n abort(404)\n temp.delete()\n models.storage.save()\n return jsonify({})", "async def delete_user(user_id):\n \n user = User.select().where(User.id == user_id).first()\n\n if not user:\n return HTTPException(404, 'User not found')\n else:\n user.delete_instance()\n\n return f\"User {user.username} deleted successfully\"", "def delete_user(self, instance, name):\n return instance.delete_user(name)", "def deleteUser(user):\n delete_user(user)\n return redirect(url_for('login'))", "def delete_user():\r\n raise NotImplementedError()", "def delete_user(self, user_id):\n return self._delete('/users/{0}'.format(user_id))", "def delete(self, id):\n\t\ttry:\n\t\t\tuser_service.delete(id)\n\t\texcept AssertionError as e:\n\t\t\tuser_space.abort(400, e.args[0], status = \"Could not delete user\", statusCode = \"400\")\n\t\texcept Exception as e:\n\t\t\tuser_space.abort(500, e.args[0], status = \"Could not delete user\", statusCode = \"500\")", "def delete_user(cls, user_email):\n\n User.query.filter_by(email=user_email).delete()\n\n db.session.commit()\n\n print \"Successfully deleted user with the email: %s!\" % user_email", "def delete_user(id):\n user = Users.query.filter_by(id=id).first()\n user.delete()\n if not user:\n return send_msg(404, 'Not Found')\n return send_msg(204, \"No data\")", "def del_user(self, username):\n pass", "def delete_user(self, user_name):\n user = self.get_user(user_name)\n return self.client.delete_resource(user.get('href'))", "def delete(cls):\n user = user_schema.load(request.get_json(), partial=(\"email\",))\n\n current_identity = get_jwt_identity()\n db_user = UserModel.find_by_id(current_identity)\n logging.info(\n f\"Delete called by {db_user.id}: {db_user.username} with data: {user['username']}\"\n )\n if db_user.username == user['username']:\n if is_correct_password(db_user.pw_salt, db_user.pw_hash, user['password']):\n db_user.delete_from_db()\n return {\"message\": msgs.DELETED.format(db_user.username)}, 200\n else:\n return {\"error\": msgs.INVALID_PASSWORD}, 401\n return {\"error\": msgs.OWN_RECORD_ONLY}, 401", "def delete_user(self) -> 'outputs.ActingUserResponse':\n return pulumi.get(self, \"delete_user\")", "def del_user(id):\n user = User.query.get(id)\n\n db.session.delete(user)\n db.session.commit()\n\n return userSchema.jsonify(user)", "def delete_user(id):\n user_repo = UserRepository(db)\n base_repo = BaseRepository(db, User)\n u = base_repo.get_by_id(id)\n if not u:\n click.echo(\"User with specified id does not exists.\")\n return ERROR_USER_DOES_NOT_EXIST\n user_repo.delete_user(u)\n click.echo(\"User with id \" + str(id) + \" has been deleted.\")", "def delete_user(self) -> None:\n table_dictionary = {\n 'Apple': {\n 'table': 'AppleReceipts',\n 'user_id': 'User_id'\n },\n 'ESL': {\n 'table': 'ESLReceipts',\n 'user_id': 'User_id'\n },\n 'Transactions': {\n 'table': 'Transactions',\n 'user_id': 'User_id'\n },\n 'Users': {\n 'table': 'Users',\n 'user_id': 'id'\n },\n }\n\n # delete the current user's information from the db.\n for key in table_dictionary:\n query = f\"\"\"\n DELETE\n FROM {table_dictionary[key]['table']}\n WHERE {table_dictionary[key]['user_id']}=?;\n \"\"\"\n self.db.commit(query, values=(self.id,))\n\n # perform a sign out\n self.sign_out()\n\n log(f\"User:{self.id} has deleted their account.\")", "def delete(user_id):\n # Get the user requested\n user = User.query.filter(User.user_id == user_id).one_or_none()\n\n if user is not None:\n db.session.delete(user)\n db.session.commit()\n return (\n \"User {user_id} deleted\".format(user_id=user_id), 200\n )\n\n else:\n abort(\n 404,\n \"Person not found for Id: {user_id}\".format(user_id=user_id),\n )", "def delete_user_process(user_id):\n\n db_user = User.query.get_or_404(user_id)\n\n db.session.delete(db_user)\n db.session.commit()\n\n return redirect(\"/users\")", "def delete_user(request):\n user_id = request.POST.get('user_id')\n User.objects.filter(id=user_id).delete()\n response = {'status': 1, 'status_message': 'Success'}\n return HttpResponse(json.dumps(response))", "def delete_user(UserName=None, AuthenticationType=None):\n pass", "def delete(self, user_id):\n user = User.query.get(user_id)\n \n if user is None:\n return abort(422, message=\"User does not exist\")\n \n # check if the user is an admin and is the only one\n admins = User.query.filter_by(admin=True).all()\n if user.id == get_jwt_identity() and len(admins) == 1:\n return abort(422, message=\"User is the only admin, there must be at least one admin in the system\")\n \n user.delete()\n \n return { 'message': \"User '{}' has been deleted\".format(user.id) }", "def deleteUser(self,name):\n raise BorkedDeleteUser", "def delete_user(user_id):\n netAdminToolDB = app.config['DATABASE']\n user = netAdminToolDB.get_user(user_id)\n\n if user == None:\n return jsonify({'error': 'User_id not found'}), 404\n\n netAdminToolDB.delete_user(user_id)\n return jsonify({'result': True})", "def del_user_id(user_id):\r\n obj = storage.get(User, user_id)\r\n if obj is None:\r\n abort(404)\r\n obj.delete()\r\n storage.save()\r\n return jsonify({}), 200", "def delete_user():\n del globalopts.appdata[request.user]\n del globalopts.users[request.user]\n return \"\", 200", "def delete_user(self, user_id):\n if self.database is None:\n raise Exception(\"No database.\")\n if user_id is None or len(user_id) == 0:\n raise Exception(\"Bad parameter.\")\n return self.database.delete_user(user_id)", "def delete(self, user_id):\n user = User.query.get(user_id)\n\n if user is None:\n return mk_response(\"User does not exist\", 422)\n\n # check if the user is an admin and is the only one\n admins = User.query.filter_by(admin=True).all()\n if user.id == get_jwt_identity() and len(admins) == 1:\n return mk_response(\"User is the only admin, there must \" +\n \"be at least one admin in the system\", 422)\n\n user.delete()\n\n return mk_response(\"User '{}' has been deleted\".format(user.id))", "def delete_user(payload, user_id):\n user = User.query.get(user_id)\n # exception for non existing id\n if user is None:\n abort(404)\n # set error status\n error = False\n # delete the user\n try:\n user.delete()\n except Exception:\n user.rollback()\n error = True\n print(sys.exc_info())\n finally:\n user.close_session()\n\n if error:\n abort(422)\n\n return jsonify({\n 'success': True,\n 'deleted': user_id\n })", "def delete_user(self, user_name):\r\n params = {'UserName' : user_name}\r\n return self.get_response('DeleteUser', params)", "def delete(self, user):\n q = \"DELETE FROM profiles WHERE user=?\"\n try:\n self._query(q, (user,), fetch='none')\n except Exception as e:\n raise e", "def delete(self, request, *args, **kwargs):\n self.object = self.get_object()\n user = request.user\n success_url = reverse_lazy('muxic:user', kwargs={'username': user.username})\n self.object.delete()\n return HttpResponseRedirect(success_url)", "def delete(self, id):\n userDao.delete(id)\n return \"\", 204", "def delete_user(self, user):\n try:\n with dbm.open(self.dbm_path, 'c', 0o600) as db:\n del db[user.name]\n except KeyError as k:\n pass", "def delete(self):\n return self._router_request(\n self._make_request_data(\n 'deleteUserCommand',\n data=dict(\n uid=self.parent,\n id=self.id\n )\n )\n )", "def delete_user(self, user):\n # type: (dict) -> dict\n self.request_url = \"{0}/{1}/{2}\".format(self.API_URL, self.USER_ENDPOINT, user['id'])\n return self.__create_request(payload=user, request_type=self.REQUEST_DELETE, version=\"v1\")", "def delete_user(self):\n db_acces = DbMethods()\n username = self.result_table.currentItem().text()\n response = db_acces.delete_user(username)\n\n if response == True:\n self.populate_table()\n else:\n message = Message(\n self.language[\"error\"], self.language[\"inf_error\"])\n warning_message = message.create_iw_message(\n self.language[\"ok\"], \"warning\")\n warning_message.exec()", "def deleteUser(self, request, context):\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')", "async def delete(self, request, uid):\n return await super(User, self).delete_item(request.app.pool, 'user',\n uid)", "def delete_user(self, user):\n\n if self.sql_read_only:\n return False\n\n if not self.check_prereqs():\n return False\n\n if not self.has_user(user):\n return False\n\n db = self.env.get_db_cnx()\n cursor = db.cursor()\n\n query=self.create_query(self.sql_delete_user_query,{'username_field':self.sql_username_field,'username':user})\n self.log.debug(\"sqlflexibleauthstore: delete_user: %s\" % (query,))\n cursor.execute(query)\n\n db.commit()\n del_user_attribute(self.env,username=user)\n return True", "def orm_delete_user(session: Session, user: User):\n try:\n session.delete(user) # Delete the user\n session.commit() # Commit the change\n LOGGER.success(f\"Deleted user: {user}\")\n except IntegrityError as e:\n LOGGER.error(e.orig)\n raise e.orig\n except SQLAlchemyError as e:\n LOGGER.error(f\"Unexpected error when deleting user: {e}\")\n raise e", "def deleteUser(self):\r\n #Find name and ID column\r\n userData = self.getCurrentUserData()\r\n\r\n #Prompt for confirmation\r\n deleteChoice = QMessageBox.question(self.view, 'Confirm user deletion', \r\n 'Are you sure you want to delete user ' \r\n + userData['Name'] + \" with ID \" + userData['User_ID'] + \r\n \" from database permanently?\", \r\n QMessageBox.Yes | QMessageBox.No)\r\n \r\n if (deleteChoice == QMessageBox.Yes):\r\n DBController().deleteUser(userData['User_ID'] )\r\n self.updateUserTable() #Re-fill table\r", "def delete_user(self, username):\n Log.info(\"Start to delete user.\")\n if self.check_if_user_exists(username):\n self.click((\"xpath\", self.user_checkbox_xpath.format(username)))\n self.click(self.user_del_btn)\n self.click(self.dialog_del_btn)\n self.wait_unit_el_present(self.user_table)\n Log.info(\"Use is deleted.\")\n else:\n Log.info(\"User \" + username + \" is not in the user list, not delete.\")", "def delete_user(request, user):\n\n if models.Group.created_by(user).count() > 0:\n raise UserDeletionError('Cannot delete user who is a group creator.')\n\n user.groups = []\n\n query = _all_user_annotations_query(request, user)\n annotations = es_helpers.scan(client=request.es.conn, query={'query': query})\n for annotation in annotations:\n storage.delete_annotation(request, annotation['_id'])\n\n request.db.delete(user)", "def delete(self, id):\n # Get the user from the auth header\n auth_username, auth_password = decode_basic_auth_info(request)\n auth_user = User.query.filter(User.username==auth_username).first()\n if not auth_user.admin:\n return Response(status=403)\n\n user = User.query.get(id)\n if user is None:\n return Response(status=400)\n db.session.delete(user)\n db.session.commit()\n return Response(status=202)", "def delete(self, new_data, user_id):\n print(new_data)\n request_id = get_jwt_identity()\n user = user_crud.get(request_id)\n if not user.is_superuser:\n abort(401,\n message=\"You do not have permission to view this endpoint\")\n all_tokens = auth_crud.get_user_tokens(user_id)\n tokens = [token.to_dict() for token in all_tokens]\n for token in tokens:\n auth_crud.revoke_token(token['id'], user_id)\n user = user_crud.remove(user_id)\n\n return {'msg': 'User Removed'}", "def delete_user(BrokerId=None, Username=None):\n pass", "def delete_user(id_user: int):\n mycursor.execute(f\"\"\"DELETE FROM User\n WHERE id_user = {id_user}\"\"\")\n mydb.commit()\n return f\"L'utilisateur {id_user} a été supprimé\"", "def delete_user(username):\n user = session.query(User).filter(User.username == username).first()\n if user:\n session.delete(user)\n return True", "def delete_user():\n user_id = validate_id(request.args.get(\"id\"))\n config = config_service.get_one({\"user\": str(user_id)})\n config_service.delete(str(config[\"_id\"]))\n if user_service.delete_user(user_id) != user_id:\n response = {\n \"status\": False,\n \"message\": f\"No se pudo eliminar el usuario: {str(user_id)}\",\n }\n resp = make_response(jsonify(response), 404)\n else:\n response = {\n \"status\": True,\n \"message\": f\"Se elimino corretamente el usuario: {str(user_id)}\",\n }\n resp = make_response(jsonify(response), 200)\n resp.headers[\"Content-Type\"] = \"application/json\"\n return resp", "def delete(self, user_id):\n res = self._user.delete_user(user_id)\n\n if res:\n return {\n \"status\": 200,\n \"data\": [{\n \"id\": res[\"id\"],\n \"message\": \"user record has been deleted\"\n }]\n }, 200\n else:\n return {\n \"status\": 404,\n \"error\": \"Not found for id {}\".format(user_id)\n }, 404", "def delete_user_account(connection,user):\r\n with connection:\r\n connection.execute(DELETE_SPECIFIC_USER,(user,))", "def test_delete_user(self):\n\n self.new_user.save_user()\n test_user = User('Sophia', 'Robai', '0722857832', 'ValentineRobai.@gmail.com', 'val',\n 'password')\n test_user.save_user()\n self.new_user.delete_user()\n self.assertEqual(len(User.UserDetails), 1)", "def del_user(request):\r\n mdict = request.matchdict\r\n\r\n # Submit a username.\r\n del_username = mdict.get('username', None)\r\n\r\n if del_username is None:\r\n LOG.error('No username to remove.')\r\n request.response.status_int = 400\r\n return _api_response(request, {\r\n 'error': 'Bad Request: No username to remove.',\r\n })\r\n\r\n u = UserMgr.get(username=del_username)\r\n\r\n if not u:\r\n LOG.error('Username not found.')\r\n request.response.status_int = 404\r\n return _api_response(request, {\r\n 'error': 'User not found.',\r\n })\r\n\r\n try:\r\n # First delete all the tag references for this user's bookmarks.\r\n res = DBSession.query(Bmark.bid).filter(Bmark.username == u.username)\r\n bids = [b[0] for b in res]\r\n\r\n qry = bmarks_tags.delete(bmarks_tags.c.bmark_id.in_(bids))\r\n qry.execute()\r\n\r\n # Delete all of the bmarks for this year.\r\n Bmark.query.filter(Bmark.username == u.username).delete()\r\n DBSession.delete(u)\r\n return _api_response(request, {\r\n 'success': True,\r\n 'message': 'Removed user: ' + del_username\r\n })\r\n except Exception, exc:\r\n # There might be cascade issues or something that causes us to fail in\r\n # removing.\r\n LOG.error(exc)\r\n request.response.status_int = 500\r\n return _api_response(request, {\r\n 'error': 'Bad Request: ' + str(exc)\r\n })", "def test_user_delete_POST(self):\r\n\r\n User.signup(username='testuser5',\r\n password=\"PASSWORD\", location=\"US-FL\")\r\n db.session.commit()\r\n\r\n u = User.query.filter_by(username='testuser5').one()\r\n\r\n with app.app_context():\r\n form = UserDeleteForm(password=\"PASSWORD\", confirm=\"PASSWORD\")\r\n\r\n with self.client as c:\r\n with c.session_transaction() as sess:\r\n sess[CURR_USER_KEY] = u.id\r\n response = self.client.post(\r\n f'/users/{u.username}/delete', data=form.data, follow_redirects=True)\r\n self.assertEqual(response.status_code, 200)\r\n self.assertIn(b'User account successfully deleted.', response.data)\r\n u = User.query.filter_by(username='testuser5').all()\r\n self.assertEqual(len(u), 0)", "def delete(request):\n if request.method != 'POST':\n return HttpResponseNotAllowed(['POST'])\n username = request.user.get_username()\n context = RequestContext(request, {\n 'username': username})\n try:\n DataHubManager.remove_user(username=username, remove_db=True)\n django_logout(request)\n return render(request, 'delete-done.html', context)\n except User.DoesNotExist:\n return HttpResponseNotFound('User {0} not found.'.format(username))", "def remove(self, user):\r\n url = '{0}/{1}'.format(self.get_url(), user)\r\n\r\n return http.Request('DELETE', url), parsers.parse_empty", "def delete(self, user_id):\n\n try:\n self.get(user_id)\n url = \"{0}/users/{1}\".format(self.base_url, user_id)\n url = self._add_token_to_url(url)\n self.session.headers.update({\"Content-Type\": \"application/x-www-form-urlencoded\"})\n self.logger.debug(\"Deleting user with ID: <{0}>\".format(user_id))\n response = self.session.delete(url)\n self.logger.debug(\"Received response code {0} with reason {1}\"\n .format(response.status_code, response.reason))\n if response.status_code == 200:\n self.logger.debug(\"User successfully deleted\")\n else:\n raise InvalidResponseCodeException(\"Response code invalid, the expected response code is {0}, \"\n \"the actual response code is {1}\".format(200, response.status_code))\n return None\n except UserNotFoundException as err:\n self.logger.debug(\"User not found, error {0}\".format(err))", "def _delete_user(self, user):\n if User.delete_user(user):\n self.session.output({'deleted': 'user {} and their related accounts'.format(user)})\n return True\n else:\n self.session.output({'invalid_user': 'please enter valid user ID!\\n'}, '[ Fail to delete user ]')\n return False", "def delete_user(user_id):\n try:\n conn = mysql.connect()\n cursor = conn.cursor()\n cursor.execute(\"DELETE FROM users WHERE user_id=%s\", user_id)\n conn.commit()\n cursor.close()\n conn.close()\n resp = jsonify(\"User deleted successfully!\")\n resp.status_code = 200\n return resp\n except Exception as exception:\n return jsonify(str(exception))", "def delete_user(username: str):\n \n if \"username\" not in session or session[\"username\"] != username:\n raise Unauthorized()\n\n user = User.query.get(username)\n db.session.delete(user)\n db.session.commit()\n session.pop(\"username\")\n\n return redirect(\"/login\")", "def delete(self, user: 'UserCondensed'):\n self._delete(entity=user)", "def delete_user(self, userId):\n\n try:\n query = \"delete from user where userId = {}\".format(userId)\n print(query)\n cur = self.con.cursor()\n cur.execute(query)\n self.con.commit()\n\n logger.info(\"Deleted\")\n except Exception as e:\n logger.error(\"Error occured at data deletion..\", e)", "def delete_user():\n token = request.args.get('token')\n data = jwt.decode(token, app.config['SECRET_KEY'])\n\n permit = functions.delete_user(data)\n if permit:\n return make_response(jsonify({'Delete': 'User Deleted Successfully'}), 201)\n else:\n return make_response(jsonify({'Delete Failed': 'Credentials not match or the user not exist'}), 201)", "def del_user_by_username(name):\n collection = get_collection(\"user\")\n collection.delete_one({\"name\": name})\n return True", "def delete_user_by_username_r(\n self,\n\n headers: t.Dict[str, str] = None,\n body: JSONEncodable = None,\n fields_data: t.Dict[str, str] = None,\n **kwargs\n ):\n r = self._do_call(\n method='DELETE',\n url=f'{self.API_BASE_URL}/users/',\n headers=headers,\n body=body,\n fields=fields_data,\n **kwargs\n )\n return r", "def delete_user(cls, user_id=None, email=None):\n params = {\n 'email': email,\n 'user_id': user_id\n }\n user_dict = cls._do_call(\n 'DELETE', cls.api_endpoint + 'users', params)\n return user_dict", "def delete_user(username):\n\n if 'username' not in session or username != session['username']:\n flash('Please login first!')\n return redirect('/login')\n\n user = User.query.get(username)\n db.session.delete(user)\n db.session.commit()\n session.pop('username')\n\n return redirect('/login')", "def deleteUser(self, uID):\n\n cursor = self.conn.cursor()\n query = \"DELETE FROM Users CASCADE \" \\\n \"WHERE uID= %s RETURNING cID; \"\n cursor.execute(query, (uID,))\n cID = cursor.fetchone()[0]\n\n query = \"DELETE FROM Credential \" \\\n \"WHERE cID= %s; \"\n cursor.execute(query, (cID,))\n\n self.conn.commit()\n return", "def delete_user(username):\n if 'username' in session and session['username'] == username:\n User.query.filter_by(username=username).delete()\n db.session.commit()\n session.pop('username')\n return redirect('/')" ]
[ "0.8626425", "0.85164607", "0.83695376", "0.8364185", "0.8343344", "0.8343344", "0.8343344", "0.83141226", "0.82571805", "0.8212667", "0.82057565", "0.8198734", "0.8185502", "0.815266", "0.8147197", "0.8142381", "0.81233865", "0.80933213", "0.8089616", "0.7982235", "0.7977738", "0.7973847", "0.79720366", "0.7970398", "0.79548", "0.7950634", "0.79503787", "0.7932785", "0.79280895", "0.7926495", "0.7921556", "0.792145", "0.78742665", "0.78696704", "0.7861694", "0.7859304", "0.7834065", "0.77850705", "0.7770652", "0.7765007", "0.77533954", "0.7732425", "0.7714016", "0.77135485", "0.7712823", "0.76890266", "0.7685661", "0.7671113", "0.76659185", "0.7657983", "0.7651254", "0.76483554", "0.7640357", "0.76232773", "0.76001763", "0.7598978", "0.75820404", "0.7530165", "0.7527841", "0.7511318", "0.75046414", "0.7503189", "0.74983895", "0.74682266", "0.7454911", "0.7452419", "0.7409744", "0.73492175", "0.73479694", "0.73348236", "0.73308337", "0.73289746", "0.73180115", "0.73155767", "0.7289139", "0.72788304", "0.7278108", "0.7276191", "0.72727567", "0.7271438", "0.72664356", "0.7254438", "0.7249599", "0.7228156", "0.7217734", "0.7216774", "0.7206392", "0.7201554", "0.7186533", "0.7159938", "0.7158901", "0.71562344", "0.71493316", "0.71321833", "0.7121397", "0.71002007", "0.7090984", "0.7090839", "0.7090053", "0.70806134" ]
0.7958771
24
Updates a User object
def update_user(user_id): user = storage.get("User", user_id) if user is None: abort(404) json_input = request.get_json() if json_input is None: abort(400, "Not a JSON") for key, value in json_input.items(): if key not in ['id', 'email', 'created_at', 'updated_at']: setattr(user, key, value) user.save() return jsonify(user.to_dict())
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def update_user():", "def update_user(cls, **kwargs):\n return cls._do_call(\n 'PUT', cls.api_endpoint + 'users', params=kwargs)", "def update_user():\n #TODO user update \n pass", "def update(self, user: U) -> None:\n ...", "def put(self, user_id):\r\n return update_user(request, user_id)", "def update_user(self, instance, user, name=None, password=None, host=None):\n return instance.update_user(user, name=name, password=password,\n host=host)", "def update_user(id):\n pass", "def put(self, user_id):\n data = request.json\n return update_user(data, user_id)", "def do_user_update():\n targetUsers = User.query.filter_by(id=request.form['id']).all()\n if not any(targetUsers):\n return user_list(\"Unknown user.\")\n\n targetUser = targetUsers[0]\n\n targetUser.first_name = request.form['first_name']\n targetUser.name = request.form['name']\n targetUser.nick = request.form['nick']\n targetUser.mail = request.form['mail']\n targetUser.role = request.form['role']\n targetUser.state = request.form['state']\n targetUser.gender = request.form['gender']\n targetUser.meter_id = request.form['meter_id']\n targetUser.group_id = request.form['group_id']\n\n db.session.commit()\n return user_list(\"Updated user \" + targetUser.name)", "def update_user(self):\n self.client.force_authenticate(user=self.user)\n self.response = self.client.patch(\n reverse(\n 'edit_account',kwargs={ 'pk': self.user.id}),\n self.updated_data, format='json'\n )\n self.user = CustomUser.objects.get(username=self.user.username)", "def update_user(self, user_id, **kwargs):\n user = self.get(user_id, raise_error=True)\n if 'display_name' in kwargs:\n user.display_name = kwargs['display_name']\n if 'email' in kwargs:\n user.email = kwargs['email']\n if 'verified' in kwargs:\n user.verified = kwargs['verified']\n self.session.add(user)", "def put(self, id):\n data = flask.request.json\n user_dao.update_user(id, data)\n return None, 204", "def update_user(user_id):\n\n user = User.query.get_or_404(user_id)\n user.first_name = request.form[\"edit_first_name\"]\n user.last_name = request.form[\"edit_last_name\"]\n user.image_url = request.form[\"edit_image_url\"]\n\n db.session.add(user)\n db.session.commit()\n return redirect(\"/users\")", "def update_user(self, user):\n # type: (dict) -> dict\n self.request_url = \"{0}/{1}/{2}\".format(self.API_URL, self.USER_ENDPOINT, user['id'])\n return self.__create_request(payload=user, request_type=self.REQUEST_PUT, version=\"v1\")", "def update_user(user_id):\n update_usr = request.get_json()\n if not update_usr:\n abort(400, {'Not a JSON'})\n usr = storage.get(User, user_id)\n if not usr:\n abort(404)\n else:\n for key, value in update_usr.items():\n setattr(usr, key, value)\n storage.save()\n return jsonify(usr.to_dict())", "def update_user(user_id):\n user = User.query.get_or_404(user_id)\n user.first_name = request.form['first_name']\n user.last_name = request.form['last_name']\n user.image_url = request.form['image_url']\n\n db.session.add(user)\n db.session.commit()\n\n return redirect(\"/users\")", "def updateUser(self, payload):\n\t\turl = \"https://habitica.com/api/v3/user\"\n\t\treturn(putUrl(url, self.credentials, payload))", "def update_user(user_id, data):\n logging.debug(\"Uptating user: user_id={}\".format(user_id))\n return ask('appusers/{0}'.format(user_id), data, 'put')", "def update_user():\n user = request.json\n user[\"_id\"] = validate_id(user[\"_id\"])\n if not user_service.update_user(user):\n response = {\n \"status\": False,\n \"message\": f\"No se pudo actualizar el usuario: {str(user['_id'])}\",\n }\n resp = make_response(dumps(response), 404)\n else:\n response = {\n \"status\": True,\n \"message\": f\"Se actualizo corretamente el usuario: {str(user['_id'])}\",\n }\n resp = make_response(dumps(response), 200)\n resp.headers[\"Content-Type\"] = \"application/json\"\n return resp", "def update_user(request):\n post = request.POST.dict()\n user = post.get('user_id')\n if user is None:\n response = {'status':-1, 'status_message':'No user_id specified'}\n return HttpResponse(json.dumps(response))\n try:\n user_obj = User.objects.get(id=user)\n except User.DoesNotExist:\n response = {'status':-1, 'status_message':'Invalid user_id: {}'.format(user)}\n return HttpResponse(json.dumps(response))\n user_obj.first_name = post.get('first_name')\n user_obj.last_name = post.get('last_name')\n password = post.get('password')\n if password and password != \"**********\":\n # update the password\n user_obj.set_password(password)\n if post.get('username'):\n user_obj.username = post['username']\n user_obj.email = post.get('email')\n user_obj.is_superuser = json.loads(post.get('is_admin', 'false'))\n user_obj.is_active = json.loads(post.get('is_enabled', 'false'))\n user_obj.save()\n response = {'status': 1, 'status_message': 'Success'}\n return HttpResponse(json.dumps(response))", "def update_user(user_id):\n user = User.query.get_or_404(user_id)\n user.first_name = request.form['first_name']\n user.last_name = request.form['last_name']\n user.image_url = request.form['image_url']\n\n\n db.session.add(user)\n db.session.commit()\n flash(f\"{user.full_name} user has been edited.\")\n\n return redirect(\"/users\")", "def update_usermenity(user_id):\n user = storage.get(User, user_id)\n\n if user is None:\n abort(404)\n\n put_data = request.get_json()\n if not put_data:\n abort(400, 'Not a JSON')\n\n for k, v in put_data.items():\n if k not in ['id', 'email', 'created_at', 'updated_at']:\n setattr(user, k, v)\n else:\n continue\n user.save()\n storage.save()\n return make_response(jsonify(user.to_dict()), 200)", "def update_user(context, params):\n\n user = User.objects.filter(id=params.get('id')).first()\n if not user:\n raise ValueError(\"user not found\")\n user.language = Language.objects.filter(id=params.get('language_id', None)).first()\n user.deputy = User.objects.filter(id=params.get('deputy_id', None)).first()\n # user.edited_by = context.user\n\n user.save()\n\n update_person(context, user, params)\n\n user.save()\n return user", "def update_user(self, user_id, name, passwd):\n\n # find the user\n um = User(self.settings)\n user = um.find_user(user_id)\n\n # update it\n status, user = user.update(name, passwd)\n\n # json representation\n json_user = user.to_json()\n \n # return\n return status, json_user", "def update_user(user_id):\n body = request.get_json(silent=True)\n if body is None:\n abort(400, jsonify(error=\"Not a JSON\"))\n user = models.storage.get('User', user_id)\n if user is None:\n abort(404)\n for key, value in body.items():\n if key not in ('id', 'email', 'created_at', 'updated_at'):\n setattr(user, key, value)\n user.save()\n return jsonify(user.to_dict())", "def put(self, username):\n user = UserModel.get_by_username(username)\n if user:\n data = self.parse.parse_args()\n user.email = data.get('email')\n user.set_password(data.get('password'))\n user.update()\n return user.as_dict()\n return {\"message\": \"user not found\"}", "async def update_user(user_to_update: UserUpdateModel):\n\n user = User.select().where(User.id == user_to_update.id).first()\n\n if not user:\n return HTTPException(404, 'User not found')\n else:\n User.update(username=user_to_update.username, email=user_to_update.email).where(User.id == user_to_update.id).execute()\n\n return UserResponseModel(\n id=user.id,\n username=user_to_update.username,\n email=user_to_update.email\n )", "def user_update(user_id, user_info):\n user = lookup_user_by_id(user_id)\n for (key, value) in user_info.iteritems():\n if key == \"first_name\" and value is not None:\n user.first_name = value\n elif key == \"last_name\" and value is not None:\n user.last_name = value\n elif key == \"email\" and value is not None:\n try:\n lookup_user_by_email(value)\n except:\n user.email = value\n elif key == \"password\" and value is not None:\n user.set_password(value)\n elif key == \"active\" and value is not None:\n if value:\n user.activate()\n else:\n user.deactivate()\n send_activation_mail.delay(user_id)\n elif key == \"social\" and value is not None:\n user.meta['social'] = value\n elif key == \"address\" and value is not None:\n user.meta['address'] = value\n elif key == \"crm\" and value is not None:\n user.meta['crm'] = value\n elif key == \"local\" and value is not None:\n user.meta['local'] = value\n return user_to_dict(user)", "def update(self, request, *args, **kwargs):\n return super(UserViewSet, self).update(request, *args, **kwargs)", "def put(self, new_data, user_id):\n\n request_id = get_jwt_identity()\n user = user_crud.get(request_id)\n if not user.is_superuser:\n abort(401,\n message=\"You do not have permission to view this endpoint\")\n user = user_crud.get(user_id)\n user = user_crud.update(user, new_data)\n if not user['added']:\n abort(409, message=\"User with this email exist\")\n user = user['db_obj']\n return user", "def test_040_update_user(self):\n\n testflow.step(\"Updating user %s\", TEST_USER2)\n assert USER_CLI.run(\n 'edit',\n TEST_USER2,\n attribute='firstName=userX2',\n )[0]", "def update_user(self, user):\n query = TABELLE['id_users']['update']\n return self.execute(query,\n (user['admin'], user['tester'], user['loot_user'], user['loot_admin'], user['banned'],\n user['id']))", "def put(self, user_id):\n\n user_data, error = user_schema.load(api.payload['data'])\n\n user = User.objects.get_or_404(public_id=user_id)\n user.update(updated_at=datetime.utcnow, **user_data)\n \n return user_schema.dump(user)", "def update_user_obj(user_id=None):\n dic = {}\n list_key = ['id', 'email', 'created_at', 'updated_at']\n obj = storage.get(\"User\", user_id)\n if obj is None:\n abort(404)\n dic = request.get_json(silent=True)\n if dic is None:\n abort(400, \"Not a JSON\")\n for key, value in dic.items():\n if key not in list_key:\n setattr(obj, key, value)\n obj.save()\n return jsonify(obj.to_dict()), 200", "def fusion_api_edit_user(self, body, uri, api=None, headers=None):\n return self.user.update(body, uri, api, headers)", "def update_user(self, user, name=None, password=None, host=None):\n return self._user_manager.update(user, name=name, password=password,\n host=host)", "def update_user(cls, userid, user_email, user_phone):\n\n user_to_edit = User.query.filter_by(user_id=userid).one()\n\n user_to_edit.email = user_email\n user_to_edit.mobile_phone = user_phone\n\n db.session.commit()\n return user_to_edit", "def updateUser(self, request, context):\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')", "def update_user(user_id):\n new_dict = request.get_json(silent=True)\n if type(new_dict) is dict:\n user_obj = storage.get(\"User\", user_id)\n if user_obj is None:\n abort(404)\n for k, v in new_dict.items():\n if k not in [\"id\", \"email\", \"created_at\", \"updated_at\"]:\n setattr(user_obj, k, v)\n user_obj.save()\n return jsonify(user_obj.to_dict()), 200\n else:\n response = jsonify({\"error\": \"Not a JSON\"}), 400\n return response", "def update_user(username):\n try:\n member = Member.objects.get(username=username)\n except Member.DoesNotExist:\n pass\n else:\n member.save()", "def update_user():\n json = request.json\n name = json[\"name\"]\n email = json[\"email\"]\n pwd = json[\"pwd\"]\n user_id = json[\"user_id\"]\n if name and email and pwd and user_id and request.method == \"POST\":\n # save edits\n sql = \"UPDATE users SET user_name=%s, user_email=%s, \" \\\n \"user_password=%s WHERE user_id=%s\"\n data = (name, email, pwd, user_id)\n try:\n conn = mysql.connect()\n cursor = conn.cursor()\n cursor.execute(sql, data)\n conn.commit()\n resp = jsonify(\"User updated successfully!\")\n resp.status_code = 200\n cursor.close()\n conn.close()\n return resp\n except Exception as exception:\n return jsonify(str(exception))\n else:\n return jsonify(\"Please provide id, name, email and pwd\")", "def put(self, new_data):\n\n user_id = get_jwt_identity()\n user = user_crud.get(user_id)\n if not user:\n abort(404, message=\"User not Found\")\n user = user_crud.update(user, new_data)\n if not user['added']:\n abort(409, message=\"User with this email exits\")\n user = user['db_obj']\n return user", "def put(self, id):\n return userDao.update(id, api.payload)", "def update_user(self) -> db.User:\n log.debug(\"Fetching updated user data from the database\")\n self.user = self.session.query(db.User).filter(db.User.user_id == self.chat.id).one_or_none()\n return self.user", "def test_update_user(self):\n token = self.authenticate_user(self.auth_user_data).data[\"token\"]\n response = self.client.put(self.user_url,\n self.user_data,\n HTTP_AUTHORIZATION=f'token {token}',\n format='json')\n self.assertEqual(response.status_code, status.HTTP_200_OK)", "def testUpdateUser(self):\n UserAPI().create([(u'test', u'secret', u'name', u'name@example.com')])\n user = getUser(u'test')\n passwordHash = user.passwordHash\n self.store.commit()\n info = TUserUpdate(u'test', u'password', u'new-name',\n u'new-name@example.com')\n with login(u'fluiddb', self.admin.objectID, self.transact) as session:\n yield self.facade.updateUser(session, info)\n\n self.store.rollback()\n self.assertEqual(u'test', user.username)\n self.assertNotEqual(passwordHash, user.passwordHash)\n self.assertEqual(u'new-name', user.fullname)\n self.assertEqual(u'new-name@example.com', user.email)", "def update_user_data(self, new_user: User):\n self.user_data.update_user_data(new_user)", "async def update(self):\n self.data = await self.api.user.get()", "def update_user(user_id, email, preferred_category_id, preferred_country_id):\n user = User.query.get(user_id)\n\n user.email = email\n user.preferred_category_id = preferred_category_id\n user.preferred_country_id = preferred_country_id\n\n db.session.commit()\n return user", "def update_db_with_user_edits(user_id):\n user = User.query.get_or_404(user_id)\n user.first_name = request.form['first_name']\n user.last_name = request.form['last_name']\n user.img_url = request.form['img_url']\n\n db.session.add(user)\n db.session.commit()\n\n return redirect('/users')", "def put(self, request):\n profile = Profile.get_by_id(request.user.id)\n if not profile:\n return HttpResponse(status=403)\n user = CustomUser.objects.get(id=request.user.id)\n update_data = json.loads(request.body.decode('utf-8'))\n user.update(first_name=update_data.get('first_name'),\n last_name=update_data.get('last_name'))\n profile.update(\n birthday=update_data.get('birthday'),\n gender=update_data.get('gender'),\n hobbies=update_data.get('hobbies'),\n facebook=update_data.get('facebook'))\n data = profile.to_dict()\n return JsonResponse(data, status=200)", "def save(self, context=None):\n updates = self.obj_get_changes()\n self.dbapi.update_user(context, self.id, updates)\n self.obj_reset_changes()", "def put(self, username):\n user = query_user_by_name(username)\n if user is None:\n return 'User does not exit', 404\n if invalid_user(username):\n return 'Unauthorized User', 401\n body = request.get_json()\n user.parse_body(body)\n db.session.add(user)\n db.session.commit()\n return user.serialize(), 200", "def update(self, uid, body):\n user_record = UserRecord.get_user(uid, auth=web_sdk.auth)\n user = User.query.filter_by(uid=user_record.uid).first()\n\n if not user_record or not user:\n raise abort(404, description=\"NotFound\", response=\"not_found\")\n\n user_record.serialize(body)\n user_record.update_user()\n\n if \"complete_register\" in body:\n user_record.make_claims(\n {\"complete_register\": body[\"complete_register\"]}\n )\n\n if \"specialities\" in body:\n if not isinstance(body[\"specialities\"], list):\n raise HandlerException(\n 400, \"Bad request: specialities should be array\"\n )\n user.update_specialities(body[\"specialities\"])\n\n if \"methods\" in body:\n if not isinstance(body[\"methods\"], list):\n raise HandlerException(\n 400, \"Bad request: methods should be array\"\n )\n user.update_methods(body[\"methods\"])\n\n if \"plans\" in body:\n if not isinstance(body[\"plans\"], list):\n raise HandlerException(\n 400, \"Bad request: plans should be array\"\n )\n user.update_plans(body[\"plans\"])\n\n user.serialize(body)\n user.save()\n\n return {\"uid\": user_record.uid, \"a\": user_record, \"b\": user}", "def update(self, oid, name=None, email=None, default_project=None, \n domain=None, password=None, enabled=None, description=None):\n data = {\"user\": {}}\n \n if name is not None:\n data['user']['name'] = name\n if email is not None:\n data['user']['email'] = email \n if default_project is not None:\n data['user']['default_project_id'] = default_project\n if domain is not None:\n data['user']['domain_id'] = domain\n if password is not None:\n data['user']['password'] = password\n if enabled is not None:\n data['user']['enabled'] = enabled\n if description is not None:\n data['user']['description'] = description \n \n path = '/users/%s' % oid\n res = self.client.call(path, 'PATCH', data=json.dumps(data), \n token=self.manager.identity.token)\n self.logger.debug('Update openstack user: %s' % truncate(res))\n return res[0]['user']", "def update_user(request_form, user_id, password_hash=None):\n values = {'login': request_form.get('login').strip(),\n 'password': password_hash if password_hash else request_form.get('password').strip()}\n db_session.query(Users).filter_by(id=user_id).update(values)\n db_session.commit()\n return 'Updated user #%s: %s.' % (user_id, values['login']), 'success'", "def update(user_id, user):\n # Get the user requested from the db into session\n update_user = User.query.filter(\n User.user_id == user_id\n ).one_or_none()\n\n # Try to find an existing user with the same name as the update\n first_name = user.get(\"first_name\")\n last_name = user.get(\"last_name\")\n\n existing_user = (\n User.query.filter(User.first_name == first_name)\n .filter(User.last_name == last_name)\n .one_or_none()\n )\n\n if update_user is None:\n abort(\n 404,\n \"User not found for Id: {user_id}\".format(user_id=user_id),\n )\n\n elif (\n existing_user is not None and existing_user.user_id != user_id\n ):\n abort(\n 409,\n \"User {first_name} {last_name} exists already\".format(\n first_name=first_name, last_name=last_name\n ),\n )\n\n # Otherwise go ahead and update!\n else:\n\n # turn the passed in user into a db object\n schema = UserSchema()\n update = schema.load(user, session=db.session)\n\n # Set the id to the user we want to update\n update.user_id = update_user.user_id\n\n # merge the new object into the old and commit it to the db\n db.session.merge(update)\n db.session.commit()\n\n # return updated user in the response\n data = schema.dump(update_user)\n\n return data, 200", "def update(self, user_id, first_name=None, last_name=None, email=None, title=None,\n dept=None, notes=None, admin_role=None, app_role=None, email_notification=None):\n\n url = \"{0}/users/{1}\".format(self.base_url, user_id)\n url = self._add_token_to_url(url)\n payload = self.get(user_id)\n\n # get rid of fields that aren't required for PUT\n pop_fields = ['complete_json',\n 'entity_type',\n 'id',\n 'image',\n 'is_deleted',\n 'tags',\n 'username']\n for field in pop_fields:\n payload.pop(field)\n\n # replace fields with updated ones from kwargs\n if first_name:\n payload[\"first_name\"] = first_name\n if last_name:\n payload[\"last_name\"] = last_name\n if email:\n payload[\"email\"] = email\n if title:\n payload[\"title\"] = title\n if dept:\n payload[\"dept\"] = dept\n if notes:\n payload[\"notes\"] = notes\n if app_role:\n payload[\"user_type\"] = app_role\n if email_notification is not None:\n payload[\"subscribed_to_emails\"] = email_notification\n\n # Logic for setting admin status is slightly more complicated:\n if admin_role is None:\n pass\n elif admin_role == \"app_admin\":\n payload[\"admin\"] = True\n payload[\"roles\"] = \"\"\n elif admin_role == \"data_admin\":\n payload[\"admin\"] = False\n payload[\"roles\"] = \"data_admin\"\n else:\n payload[\"admin\"] = False\n payload[\"roles\"] = \"\"\n\n self.logger.debug(\"Sending the user information {0} to {1}\".format(json.dumps(payload), url))\n self.session.headers.update({\"Content-Type\": \"application/json\"}) # Set special header for this post\n response = self.session.put(url, data=json.dumps(payload), verify=False)\n self.logger.debug(\"Received response code {0} with reason {1}...\".format(response.status_code, response.reason))\n self.session.headers.pop(\"Content-Type\") # Remove header, as it affects other tests\n return response.json()['response']", "def update_user(user_id):\n netAdminToolDB = app.config['DATABASE']\n user = netAdminToolDB.get_user(user_id)\n if user == None:\n return jsonify({'error': 'User_id not found'}), 404\n\n input = request.get_json()\n\n if input == None:\n return jsonfiy({'error': 'Invalid PUT request'}), 400\n\n # Send input directly to update_user function, which checks each key\n netAdminToolDB.update_user(user_id, **input)\n user = netAdminToolDB.get_user(user_id)\n userDict = dict(user)\n uri = url_for('get_user', user_id=user.id, _external=True)\n userDict['uri'] = uri\n\n return jsonify({'user': userDict}), 200", "def _update_user(cursor, user_id, user):\n # Create a tuple with user fields\n user_data = (user[User.PROPERTIES.FOLLOWERS],\n user[User.PROPERTIES.FOLLOWING],\n user[User.PROPERTIES.DESIGNS],\n user[User.PROPERTIES.COLLECTIONS],\n user[User.PROPERTIES.MAKES],\n user[User.PROPERTIES.LIKES],\n user[User.PROPERTIES.SKILL_LEVEL],\n user_id)\n\n cursor.execute(dbq.UPDATE_USER, user_data)\n logger.debug(\"user_id {} updated\".format(user_id))", "def update_user(username):\n name = request.get_json().get(\"name\", None)\n role = request.get_json().get(\"role\", None)\n email = request.get_json().get(\"email\", None)\n return jsonify(\n admin.update_user(current_app.scoped_session(), username, role, email, name)\n )", "def request_user_update():\n target_user = User.query.filter_by(id=request.args['id']).first()\n if target_user is None:\n return user_list(\"Unknown user.\")\n\n return Response(render_template('admin/user/create-update.html',\n csrf_token=(\n get_raw_jwt() or {}).get(\"csrf\"),\n target=\"/admin/user/update\",\n genders=list(GenderType),\n states=list(StateType),\n groups=Group.query.all(),\n roles=list(RoleType),\n id=target_user.id,\n gender=target_user.gender,\n first_name=target_user.first_name,\n name=target_user.name,\n nick=target_user.nick,\n mail=target_user.mail,\n meter_id=target_user.meter_id,\n group_id=target_user.group_id,\n role=target_user.role,\n state=target_user.state),\n mimetype='text/html')", "def update_user_profile(user_info):\n user_id = user_info[\"USER_ID\"]\n user_collection.find_one_and_update(\n {\"_id\": user_id},\n {\n \"$set\": {\n \"username\": user_info[\"username\"],\n \"email\": user_info[\"email\"],\n \"avatar\": user_info[\"avatar\"],\n \"githubURL\": user_info[\"githubURL\"],\n \"linkedinURL\": user_info[\"linkedinURL\"],\n \"stackoverflowURL\": user_info[\"stackoverflowURL\"],\n \"skills\": user_info[\"skills\"],\n }\n },\n upsert=False,\n )", "def update(self, request, *args, **kwargs):\n instance = self.get_object()\n instance.username = request.data.get(\"username\")\n instance.first_name = request.data.get('first_name')\n instance.last_name = request.data.get('last_name')\n instance.email = request.data.get('email')\n\n instance.save()\n\n serializer = self.get_serializer(instance)\n # serializer.is_valid(raise_exception=True)\n # self.perform_update(serializer)\n\n return Response({\"data\": serializer.data, \"status\": status.HTTP_200_OK})", "def update_user(self, username, password, fullname, description, email):\n params = {\n \"f\" : \"json\",\n \"username\" : username\n }\n if password is not None:\n params['password'] = password\n if fullname is not None:\n params['fullname'] = fullname\n if description is not None:\n params['description'] = description\n if email is not None:\n params['email'] = email\n uURL = self._url + \"/users/update\"\n return self._con.post(path=uURL, postdata=params)", "def patch(self, user_id):\n\n data = request.get_json()\n\n res = self._user.update_user(user_id, data)\n\n if res:\n return {\n \"status\": 200,\n \"data\": [{\n \"id\": res[\"id\"],\n \"message\": \"user record has been updated\"\n }]\n }, 200\n else:\n return {\n \"status\": 404,\n \"error\": \"Not found for id {}\".format(user_id)\n }, 404", "def update(self, uid, body, user_auth):\n user_record = UserRecord.get_user(uid, auth=admin_sdk.auth)\n user = Admin.query.filter_by(uid=user_record.uid).first()\n\n if not user_record or not user:\n raise HandlerException(404, \"Not found user\")\n\n if user_auth[\"uid\"] == uid:\n raise HandlerException(\n 401, \"Logged user can't modify own profile in this endpoint\"\n )\n\n if not user_auth[\"b\"].has_access(user.privileges, True):\n raise HandlerException(\n 401,\n \"Logged user doesn't have sufficient permissions \\\n to create a user with equal or higher privileges\",\n )\n\n user_record.serialize(body)\n user_record.update_user()\n\n if \"privileges\" in body:\n user_record.make_claims(\n {\"admin\": True, \"access_level\": body[\"privileges\"]}\n )\n\n user.serialize(body)\n user.save()\n\n return {\"uid\": user_record.uid, \"a\": user_record, \"b\": user}", "def update_user_info(user, save=True):\n p = bayou.Person.from_default_services(user.username)\n\n user.email = p.email if p.email else user.email\n user.first_name = p.first_name if p.first_name else user.first_name\n user.last_name = p.surname if p.surname else user.last_name\n\n if save:\n user.save()\n\n return user", "def user_update(sender, instance, created, **kwargs):\n payload = DiscordUserSerializer(instance).data\n ws.send_notification(ws.types.USER_UPDATE, payload)", "def mod_user(self, username, data):\n headers = {\"user-agent\": self.u_agent}\n req_url = self.normalize_admin_url(u\"users/{}\".format(username))\n res = requests.put(\n req_url,\n headers=headers,\n auth=self.auth,\n data=json.dumps(data),\n verify=False,\n )\n if res.status_code == 200:\n return Response(0, u\"User {} has been modified\".format(username))\n else:\n return Response(res.status_code, res)", "def update_user(self, username):\n parser_update.add_argument('email', type=validate_email,\n required=False, nullable=False,\n help=\"Email must be formatted correctly\")\n\n parser_update.add_argument('phoneNumber', type=validate_phonenumber,\n required=False, nullable=False,\n help=\"Enter a valid phone number\")\n\n parser_update.add_argument('firstname', type=validate_characters,\n required=False, nullable=False,\n help=\"First name must be formatted correctly\")\n\n parser_update.add_argument('lastname', type=validate_characters,\n required=False, nullable=False,\n help=\"Last name must be formatted correctly\")\n\n parser_update.add_argument('othernames', type=validate_characters,\n required=False, nullable=False,\n help=\"Other name must be formatted correctly\")\n\n user = self.get_user(username)\n if user is None:\n return None\n\n args = parser_update.parse_args()\n new_data = {\n 'email': request.json.get('email', user['email']).lower(),\n 'firstname': request.json.get('firstname', user['firstname']).capitalize(),\n 'lastname': request.json.get('lastname', user['lastname']).capitalize(),\n 'othernames': request.json.get('othernames', user['othernames']).capitalize(),\n 'phoneNumber': request.json.get('phoneNumber', user['phonenumber']),\n }\n\n getEmail = self.get_user(new_data['email'])\n verification_status = True\n\n if user['email'] != new_data['email']:\n if getEmail is not None:\n return 'email exists'\n verification_status = False\n\n query = \"\"\"UPDATE users SET firstname=%s,lastname=%s,othernames=%s,\\\n email=%s,phonenumber=%s,emailverified=%s WHERE username=%s\"\"\"\n values = new_data['firstname'], new_data['lastname'], new_data['othernames'], new_data['email'], new_data['phoneNumber'], verification_status, username\n\n conn = self.db\n cursor = conn.cursor()\n cursor.execute(query, values)\n conn.commit()\n return new_data", "def patch_user(user_id):\n success = True\n try:\n usr = db.session.query(User).get(user_id)\n for item in request.json:\n if item == 'username':\n usr.username = request.json['username']\n elif item == 'email':\n usr.username = request.json['email']\n db.session.commit()\n except:\n success = False\n return jsonify(success=success)", "def update_user(id):\n with app.app_context():\n user = User.query.get(id)\n if user is None:\n return \"User not found\", 404\n skills = validate_skills(request.get_json().get(\"skills\"))\n if not skills:\n return \"Invalid skills\", 400\n\n for skill in skills:\n skill_db = Skill.query.filter_by(name=skill).first()\n if skill_db is None:\n skill_db = Skill(name=skill)\n db.session.add(skill_db)\n \n user.skills = [\n skill for skill in Skill.query.filter(Skill.name.in_(skills)).all()\n ]\n \n users_response = UsersResponse(\n users=[\n {\n \"id\": user.id,\n \"name\": user.name,\n \"skills\": [skill.name for skill in user.skills]\n }\n ]\n )\n db.session.commit()\n return users_response.json(), 200", "def update_user_and_pw(cls, userid, user_email, user_password, user_phone):\n\n user_to_edit = User.query.filter_by(user_id=userid).one()\n\n user_to_edit.email = user_email\n user_to_edit.password = user_password\n user_to_edit.mobile_phone = user_phone\n\n db.session.commit()\n return user_to_edit", "def wmUpdateUser(self):\n user_role = uiCommon.GetSessionUserRole()\n if user_role != \"Administrator\":\n raise Exception(\"Only Administrators can edit user accounts.\")\n\n args = uiCommon.getAjaxArgs()\n\n u = catouser.User()\n u.FromID(args[\"ID\"])\n\n if u.ID:\n # these changes are done BEFORE we manipulate the user properties for update.\n\n new_pw = uiCommon.unpackJSON(args.get(\"Password\"))\n random_pw = args.get(\"NewRandomPassword\")\n\n # if a password was provided, or the random flag was set...exclusively\n if new_pw:\n # if the user requesting the change *IS* the user being changed...\n # set force_change to False\n force = True\n if u.ID == uiCommon.GetSessionUserID():\n force = False\n\n u.ChangePassword(new_password=new_pw, force_change=force)\n uiCommon.WriteObjectChangeLog(catocommon.CatoObjectTypes.User, u.ID, u.FullName, \"Password changed.\")\n elif random_pw:\n u.ChangePassword(generate=random_pw)\n uiCommon.WriteObjectChangeLog(catocommon.CatoObjectTypes.User, u.ID, u.FullName, \"Password reset.\")\n\n # now we can change the properties\n u.LoginID = args.get(\"LoginID\")\n u.FullName = args.get(\"FullName\")\n u.Status = args.get(\"Status\")\n u.AuthenticationType = args.get(\"AuthenticationType\")\n u.ForceChange = args.get(\"ForceChange\")\n u.Email = args.get(\"Email\")\n u.Role = args.get(\"Role\")\n u.FailedLoginAttempts = args.get(\"FailedLoginAttempts\")\n u.Expires = args.get(\"Expires\")\n\n u._Groups = args.get(\"Groups\")\n\n if u.DBUpdate():\n uiCommon.WriteObjectChangeLog(catocommon.CatoObjectTypes.User, u.ID, u.ID, \"User updated.\")\n\n return json.dumps({\"result\": \"success\"})", "def update_user_profile_info(user_id, user_fname, user_lname, email):\n \n user=User.query.filter(User.user_id == user_id).first()\n\n if email != None:\n user.update_email(email)\n if user_fname != None:\n user.update_first_name(user_fname)\n if user_lname != None:\n user.update_last_name\n \n db.session.commit()", "def test_update_account_user(self):\n self._require_login()\n\n response = self.client.put('/v1/users/' +str(self.user.id)+'/',\n {\"username\": 'toni@malucao', \"password\": 'cidadeeee'},\n format='json')\n\n self.assertEqual(response.status_code, 200,\n 'Expected Response Code 200, received {0} instead.'.format(response.status_code))", "def api_update_user(username):\n user = self.get_user(user=username)\n\n errs = self.user_manager.update_user_as_admin(user, request.json)\n\n if errs:\n return {'errors': errs}\n\n return {'user': user.serialize()}", "def update_user_profile(email, **kwargs): # PUT\n user = coll(\"users\").find_one({\"_id\": email})\n if not user:\n return {\"message\": \"User not found\"}, 404\n\n coll(\"users\").update_one({\"_id\": email}, {\"$set\": kwargs})\n\n return {\"message\": \"User profile successfully updated\"}, 200", "def update(self, instance, validate_data):\n # this is we want to make sure the password is set using the\n # set_password function instead of just setting it to\n # whichever values is provided\n\n # instance is the model instance that is linked to model\n # serizlizer that is user object\n # validate_data is fields = (\"email\", \"password\", \"name\")\n # that have been through the validation and ready to update\n # first is remove the password from the validated data\n # do that using the dictionary pop function\n # use None here is because with the pop function\n # must provide a default value\n password = validate_data.pop('password', None)\n # super() will call the ModelSerializer update function\n user = super().update(instance, validate_data)\n\n if password:\n user.set_password(password)\n user.save()\n\n return user", "def update_field(self, uid, body, user_auth):\n user_record = UserRecord.get_user(uid, auth=admin_sdk.auth)\n user = Admin.query.filter_by(uid=user_record.uid).first()\n\n if not user_record or not user:\n raise HandlerException(404, \"Not found user\")\n\n if user_auth[\"uid\"] == uid:\n raise HandlerException(\n 401, \"Logged user can't modify own profile in this endpoint\"\n )\n\n if not user_auth[\"b\"].has_access(user.privileges, True):\n raise HandlerException(\n 401,\n \"Logged user doesn't have sufficient permissions \\\n to update a user with equal or higher privileges\",\n )\n\n user_record.serialize(body)\n user_record.update_user()\n\n if \"privileges\" in body:\n user_record.make_claims(\n {\"admin\": True, \"access_level\": body[\"privileges\"]}\n )\n\n user.serialize(body)\n user.save()\n\n return {\"uid\": user_record.uid, \"a\": user_record, \"b\": user}", "def update(\n self, name: str = None, company: str = None, bio: str = None, avatar: str = None\n ):\n query = gql(\n \"\"\"\n mutation UserUpdate($user: UserUpdateInput!) {\n userUpdate(user: $user)\n }\n \"\"\"\n )\n params = {\"name\": name, \"company\": company, \"bio\": bio, \"avatar\": avatar}\n\n params = {\"user\": {k: v for k, v in params.items() if v is not None}}\n\n if not params[\"user\"]:\n return SpeckleException(\n message=\"You must provide at least one field to update your user profile\"\n )\n\n return self.make_request(\n query=query, params=params, return_type=\"userUpdate\", parse_response=False\n )", "def test_update_user(self):\n\n update_dict = dict(\n username='test_another_username',\n role='test_new_role',\n department='test_new_department'\n )\n\n # Update non-existing user\n updated = self.user_api.update_user(MAGEN_USER['user_uuid'], update_dict)\n self.assertTrue(updated.success)\n self.assertEqual(updated.count, 0)\n\n # Insert user in Database\n inserted = self.user_api.insert_user(MAGEN_USER)\n self.assertTrue(inserted.success)\n\n # Update existing user\n updated = self.user_api.update_user(MAGEN_USER['user_uuid'], update_dict)\n self.assertTrue(updated.success)\n self.assertEqual(updated.count, 1)\n # Verify that data was updated\n selected = self.user_api.get_user(MAGEN_USER['user_uuid'])\n self.assertTrue(selected.success)\n self.assertEqual(selected.documents['username'], update_dict['username'])\n self.assertEqual(selected.documents['role'], update_dict['role'])\n self.assertEqual(selected.documents['department'], update_dict['department'])", "def update( self, trans, id, payload, **kwd ):\n current_user = trans.user\n user_to_update = self.user_manager.by_id( self.decode_id( id ) )\n\n # only allow updating other users if they're admin\n editing_someone_else = current_user != user_to_update\n is_admin = trans.api_inherit_admin or self.user_manager.is_admin( current_user )\n if editing_someone_else and not is_admin:\n raise exceptions.InsufficientPermissionsException( 'you are not allowed to update that user', id=id )\n\n self.user_deserializer.deserialize( user_to_update, payload, user=current_user, trans=trans )\n return self.user_serializer.serialize_to_view( user_to_update, view='detailed' )", "def put(self):\n form = UpdateForm.from_json(request.get_json())\n\n if not g.user:\n return jsonify({\"login_staus\": False, \"message\": \"Please login\"})\n\n if form.validate_on_submit():\n current_user = User.query.filter_by(id=g.user.id).first()\n if form.email.data:\n current_user.email = form.email.data\n if form.new_password.data:\n current_user.password = form.new_password.data\n db.session.commit()\n return jsonify({\"update_status\": True})\n return jsonify({\"update_status\": False, \"message\": form.errors})", "def update_field(self, uid, body):\n user_record = UserRecord.get_user(uid, auth=web_sdk.auth)\n user = User.query.filter_by(uid=user_record.uid).first()\n\n if not user_record or not user:\n raise HandlerException(404, \"Not found user\")\n\n user_record.serialize(body)\n user_record.update_user()\n\n if \"complete_register\" in body:\n user_record.make_claims(\n {\"complete_register\": body[\"complete_register\"]}\n )\n\n if \"specialities\" in body:\n if not isinstance(body[\"specialities\"], list):\n raise HandlerException(\n 400, \"Bad request: specialities should be array\"\n )\n user.update_specialities(body[\"specialities\"])\n\n if \"methods\" in body:\n if not isinstance(body[\"methods\"], list):\n raise HandlerException(\n 400, \"Bad request: methods should be array\"\n )\n user.update_methods(body[\"methods\"])\n\n if \"plans\" in body:\n if not isinstance(body[\"plans\"], list):\n raise HandlerException(\n 400, \"Bad request: plans should be array\"\n )\n user.update_plans(body[\"plans\"])\n\n user.serialize(body)\n user.save()\n\n return {\"uid\": user_record.uid, \"a\": user_record, \"b\": user}", "def update(self, instance, validated_data):\n # instance is model instance that is linked to our model serializer that's going to be our user object.\n # The validated data is going to be these fields that have been through the validation and ready to update.\n # pop the old password from dictionary and replace with None\n password = validated_data.pop('password', None)\n user = super().update(instance, validated_data)\n\n if password:\n user.set_password(password) # set new password\n user.save()\n return user", "def _save_user(self, user):\n self.firebase.patch(f'/{self.USERS_KEY}', {str(user.id): user.username})", "def update_user_data(payload, user_id):\n user = User.query.get(user_id)\n # exception for non existing id\n if user is None:\n abort(404)\n # set error status\n error = False\n # get posted data from json request\n body = request.get_json()\n # update user data\n keys = body.keys()\n try:\n if 'e_mail' in keys:\n user.e_mail = body['e_mail']\n if 'address' in keys:\n user.address = body['address']\n if 'auth0_id' in keys:\n user.auth0_id = body['auth0_id']\n if 'role' in keys:\n user.role = body['role']\n user.update()\n formatted_user = user.format()\n except Exception:\n user.rollback()\n error = True\n print(sys.exc_info())\n finally:\n user.close_session()\n\n if error:\n abort(422)\n\n return jsonify({\n 'success': True,\n 'user': formatted_user\n })", "async def update_user(new_data: UpdateUser, user_id: str = Path(..., description=\"ID value of the desired user\"),\n db_handler: DBHandler = Depends(database_dependency)):\n try:\n updated_record = await db_handler.update_user(user_id=user_id, new_data=new_data)\n updated_record = init_BaseUser(updated_record)\n except DBHandlerException as e:\n return JSONResponse(status_code=400)\n\n return updated_record", "def updateUser(self, uID, ufirstname, ulastname, udescription, urole, uclassification):\n cursor = self.conn.cursor()\n query= \"UPDATE Users \"\\\n \"SET ufirstname= %s, ulastname= %s, udescription= %s, urole= %s, uclassification= %s \"\\\n \"WHERE uID= %s; \"\n cursor.execute(query,(ufirstname, ulastname, udescription, urole, uclassification,uID,))\n self.conn.commit()\n return uID", "def test_update_user(self):\n pass", "def update_user(user_id):\n try:\n # Get the value which needs to be updated\n try:\n body = ast.literal_eval(json.dumps(request.get_json()))\n except:\n # Bad request as the request body is not available\n # Add message for debugging purpose\n return \"\", 400\n\n # Updating the user\n records_updated = collection.update_one({\"id\": int(user_id)}, body)\n\n # Check if resource is updated\n if records_updated.modified_count > 0:\n # Prepare the response as resource is updated successfully\n return \"\", 200\n else:\n # Bad request as the resource is not available to update\n # Add message for debugging purpose\n return \"\", 404\n except:\n # Error while trying to update the resource\n # Add message for debugging purpose\n return \"\", 500", "def update_user(self, userid, username, email, password, cursor):\n sql=\"UPDATE users SET username=%s, email=%s, password=%s WHERE userid=%s\"\n cursor.execute(sql,(username,email,password,userid))\n return cursor", "def update_profile(request, id):\n username = request.POST.get(\"username\")\n first_name, last_name = request.POST.get(\"fullname\").split()\n email = request.POST.get(\"email\")\n phone = request.POST.get(\"phone\")\n address = request.POST.get(\"address\")\n\n userObj = User.objects.get(id=id)\n userObj.first_name = first_name\n userObj.last_name= last_name\n userObj.username= username\n userObj.email = email\n userObj.phone = phone\n userObj.address = address\n userObj.save()\n messages.success(request, \"Your profile is successfully update.\", fail_silently=False)\n return redirect(\"user_profile\", id)", "def account_update(request):\r\n params = request.params\r\n json_body = request.json_body\r\n user_acct = request.user\r\n\r\n if 'name' in params and params['name'] is not None:\r\n name = params.get('name')\r\n user_acct.name = name\r\n\r\n if 'name' in json_body and json_body['name'] is not None:\r\n name = json_body.get('name')\r\n user_acct.name = name\r\n\r\n if 'email' in params and params['email'] is not None:\r\n email = params.get('email')\r\n user_acct.email = email.lower()\r\n\r\n if 'email' in json_body and json_body['email'] is not None:\r\n email = json_body.get('email')\r\n user_acct.email = email.lower()\r\n\r\n return _api_response(request, user_acct.safe_data())", "def update_user(cls, user_dict: dict, current_user_id=None, is_self_update=False):\n user: UserModel = cls.from_dict(user_dict)\n user_id = user.id\n valid_user: UserModel = UserModel.find_by_id(user_id)\n if not valid_user:\n return None\n # Verify if self update is from the same user, if not return None.\n if is_self_update:\n if valid_user.id != current_user_id:\n return None\n\n # Update a valid user.\n valid_user.username = user.username\n valid_user.name = user.name\n valid_user.surname = user.surname\n # Update password only if it doesn't match and is self update.\n if valid_user.password != user.password and is_self_update:\n valid_user.password = app_utils.encrypt_password(user.password)\n\n # only update roles or rights if is not self update. (Only admin can update roles.)\n if not is_self_update:\n # Update role relational field.\n roles_ids = user_dict.get(\"roles\")\n right_ids = user_dict.get(\"rights\")\n if not roles_ids:\n raise exceptions.RoleNotFound(\"User must have at least one role assigned.\")\n\n # delete all roles\n valid_user._delete_roles()\n valid_user._delete_rights()\n # add new ones.\n valid_user.add_roles(roles_ids=roles_ids, current_user_id=current_user_id)\n valid_user.add_rights(right_ids=right_ids, current_user_id=current_user_id)\n\n return valid_user", "def test_admin_update_user(self):\n resp = self.admin_create_user()\n reply = self.admin_login()\n token = reply['token']\n user = dict(\n name='Summer Lover',\n username='lover',\n password='Andela8',\n role='attendant'\n )\n resp = self.client.put(\n '/api/v1/users/2',\n content_type='application/json',\n data=json.dumps(user),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n self.assertEqual(reply['message'], 'user updated!')\n self.assertEqual(resp.status_code, 200)", "def update(self, instance, validated_data):\n user = super().update(instance, validated_data)\n return user", "def update_user(self, user_id, username, realname, passhash):\n if user_id is None:\n self.log_error(MongoDatabase.update_user.__name__ + \"Unexpected empty object: user_id\")\n return False\n\n try:\n user_id_obj = ObjectId(user_id)\n values = {}\n if username is not None:\n values['username'] = username\n if realname is not None:\n values['realname'] = realname\n if passhash is not None:\n values['hash'] = passhash\n if len(values) > 0:\n self.users_collection.update_one({\"_id\": user_id_obj}, {\"$set\": values}, upsert=False)\n return True\n except:\n traceback.print_exc(file=sys.stdout)\n self.log_error(sys.exc_info()[0])\n return False" ]
[ "0.8470099", "0.8341868", "0.817528", "0.81275237", "0.7892335", "0.7821547", "0.7795651", "0.77814746", "0.7775861", "0.7737184", "0.76707584", "0.76067144", "0.75910306", "0.7590645", "0.7536968", "0.74962765", "0.7480669", "0.74541986", "0.74528563", "0.7435805", "0.7414097", "0.7401251", "0.73902667", "0.73805463", "0.7377737", "0.7326023", "0.73241705", "0.73132753", "0.727167", "0.7268099", "0.7255985", "0.72559327", "0.72553444", "0.7253936", "0.7252027", "0.7243184", "0.72365385", "0.7221202", "0.71967286", "0.7192934", "0.7184806", "0.71663827", "0.71546465", "0.71523976", "0.7135067", "0.7132284", "0.71273345", "0.71267503", "0.71141773", "0.71134615", "0.7110166", "0.71005046", "0.7095805", "0.7095451", "0.7081531", "0.7071168", "0.7067006", "0.7060641", "0.7056499", "0.70553946", "0.70330405", "0.7026147", "0.7024799", "0.70143455", "0.70138305", "0.70100504", "0.70038766", "0.69982", "0.69929427", "0.6992621", "0.69657874", "0.69560194", "0.69443625", "0.6944311", "0.6938392", "0.693817", "0.6934886", "0.69192326", "0.6895745", "0.68633723", "0.6855553", "0.6852509", "0.6819317", "0.6804646", "0.6794845", "0.67805403", "0.67791563", "0.6766844", "0.6765668", "0.6743762", "0.6737908", "0.6730241", "0.6727106", "0.6724353", "0.67243284", "0.67223495", "0.67173", "0.67168325", "0.67057407", "0.6687435" ]
0.7520065
15
=============================================================== save_obj(obj, saved_name ) =============================================================== this function is used to save any python object to your hard desk
def save_obj(obj, saved_name ): with open( saved_name + '.pkl', 'wb') as f: pickle.dump(obj, f, pickle.HIGHEST_PROTOCOL)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def save_obj(obj, name):\r\n with open('../pickle/' + name + '.pkl', 'wb') as fout:\r\n pickle.dump(obj, fout, pickle.HIGHEST_PROTOCOL)\r\n # end with\r", "def save_obj(obj, name):\n \n with open(name + '.pkl', 'wb') as objec:\n pickle.dump(obj, objec)", "def save_obj(obj, name):\n with open('../../data/' + name + '.pkl', 'wb') as f:\n pickle.dump(obj, f, pickle.HIGHEST_PROTOCOL)", "def save_obj(obj, name):\n with open('../../data/' + name + '.pkl', 'wb') as f:\n pickle.dump(obj, f, pickle.HIGHEST_PROTOCOL)", "def _save_obj(obj, name):\n with open('/bigdisk/pickles/' + name, 'w') as f:\n pickle.dump(obj, f, pickle.HIGHEST_PROTOCOL)", "def saveObj(obj, name):\n\n os.system(\"touch \" + name + \".pkl\")\n with open(name + '.pkl', 'wb') as f:\n pickle.dump(obj, f, pickle.HIGHEST_PROTOCOL)", "def saveobject(obj, filename):\n # import cPickle as pickle\n with open(filename, 'wb') as output:\n pickle.dump(obj, output, pickle.HIGHEST_PROTOCOL)", "def save_object(obj, filename):\n with open(filename, 'wb') as output_file: # Overwrites any existing file.\n pickle.dump(obj, output_file, pickle.HIGHEST_PROTOCOL)", "def save_object(obj, file_name):\n file_name = osp.abspath(file_name)\n with open(file_name, 'wb') as f:\n pickle.dump(obj, f, pickle.HIGHEST_PROTOCOL)", "def save_object(obj, filename):\n with open(filename, 'wb') as output:\n pickle.dump(obj, output, protocol=2)", "def save_object(obj, filename):\r\n with open(filename, 'wb') as output:\r\n pickle.dump(obj, output)", "def save(obj, filename):\n import pickle\n with open(filename, 'w') as f:\n pickle.dump(obj, f, protocol=pickle.HIGHEST_PROTOCOL)", "def pickleSave(object, filename):\n #Todo: Handle exceptions from pickle\n filehandler = open(\"obj/\" + filename + \".obj\", 'wb')\n pickle.dump(object, filehandler)", "def save_object(self, name: str, obj: object):\r\n with open_(self._path_for_pickle(name), \"wb\") as f:\r\n dill.dump(obj, f)", "def save_obj(obj, path ):\n with open(path, 'wb') as f:\n pickle.dump(obj, f)", "def save_object(obj, filename: str):\n with open(filename, 'wb') as save_file:\n pickle.dump(obj, save_file)", "def save_object(o, fn):\n return dump_object(o, fn)", "def save_to_disk(name, object):\n shortname = _dumpify(_compress_name(name) + '.pkl')\n print 'save_to_disk(%s)' % shortname\n pkl_file = open(shortname , 'wb')\n pickle.dump(object, pkl_file, -1) # Pickle the list using the highest protocol available.\n pkl_file.close()", "def save_obj(obj, path: str):\n with open(path, 'wb') as h:\n pickle.dump(obj, h)", "def save_object(self, name: str, object):\n file_path = self.__get_file_path(name)\n self.__serialize_object(file_path, object)", "def save_object(obj, fpath):\r\n with open(fpath, 'wb') as o:\r\n pickle.dump(obj, o)", "def save_object(path,object):\r\n with open(path,\"wb\") as f:\r\n pickle.dump(object,f,pickle.HIGHEST_PROTOCOL)", "def save_pickle(obj, filename):\n with open(filename, 'wb') as file:\n pickle.dump(obj, file)", "def save(self, obj):", "def save(self, obj, filename):\n if not self.enabled:\n return\n\n # get unique filepath and filename\n index = 0\n while True:\n filepath = join(self.path, filename+\"_\"+str(index))\n if os.path.isfile(filepath):\n index = index + 1\n continue\n break\n\n # save object\n os.makedirs(os.path.dirname(filepath), exist_ok=True)\n with open(filepath, \"wb\") as f:\n try:\n pickle.dump(obj, f)\n except Exception as e:\n log.exception(e)\n log.warning(f\"save failed for {filename} {type(obj)}\")", "def save(obj, obj_name):\n try:\n _save(obj, os.path.join(KALE_DATA_DIRECTORY, obj_name))\n except KaleMarshalException as e:\n log.error(e)\n log.debug(\"Original Traceback\", exc_info=e.__traceback__)\n utils.graceful_exit(1)", "def picklesave(obj, path):\n with open(path, 'wb') as file:\n pickle.dump(obj, file)", "def save_obj_joblib(obj, obj_path,obj_name,supersede=False):\n\n obj_path=os.path.join(obj_path,obj_name)\n\n if os.path.isfile(obj_path):\n if supersede:\n try:\n os.remove(obj_path)\n joblib.dump(obj, obj_path)\n print(\"save_obj_joblib: \"+os.path.basename(obj_path)+\" is replaced and saved!\")\n except OSError:\n print(\"save_obj_joblib: Object couldn't be saved\")\n else:\n raise OSError(\"save_obj_joblib: There exists a object with the same name already.\")\n else:\n if os.path.isdir(os.path.dirname(obj_path)):\n pass\n else:\n os.mkdir(os.path.dirname(obj_path))\n joblib.dump(obj, obj_path)\n print(\"save_obj_joblib: \"+os.path.basename(obj_path)+\" is saved!\")", "def save(self, obj):\n raise NotImplementedError", "def pickle_save(file_path, obj):\n with open(file_path, 'wb') as f:\n pickle.dump(obj, f)", "def write_to_file(name, obj):\n\n print 'writing structures to pickle'\n print '----------------------------'\n\n path = os.getcwd() + '/pickles/' + name + '.pkl'\n file = open(path, 'wb')\n pickle.dump(obj, file)\n file.close()", "def save(self, dObj, sFilename):\n\n f = open(sFilename, \"w\")\n p = pickle.Pickler(f)\n p.dump(dObj)\n f.close()", "def save(self, dObj, sFilename):\n\n f = open(sFilename, \"w\")\n p = pickle.Pickler(f)\n p.dump(dObj)\n f.close()", "def save(self, dObj, sFilename):\n\n f = open(sFilename, \"w\")\n p = pickle.Pickler(f)\n p.dump(dObj)\n f.close()", "def pickle_object(obj, ofname: \"Path|str\"):\n ofname = Path(ofname)\n maybe_make_output_dir(ofname)\n with ofname.open(\"wb\") as f:\n pickle.dump(obj, f)", "def save(self, dObj, sFilename):\n\n f = open(sFilename, \"w\")\n p = pickle.Pickler(f)\n p.dump(dObj)\n f.close()", "def write_pickle(obj, file_name):\n with open(file_name, 'wb') as f:\n pickle.dump(obj, f, pickle.HIGHEST_PROTOCOL)", "def save(self):\n\t\tif os.path.exists(\"%s.obj\" % (self.name)):\n\t\t\tos.system(\"rm -f %s.obj\" % (self.name))\n\t\telse: pass\n\t\tfile = open(\"%s.obj\" % (self.name), \"wb\")\n\t\tif callable(self.obj):\n\t\t\tif \"dill\" in sys.modules:\n\t\t\t\ttry:\n\t\t\t\t\tpickle.dump(self.obj, file)\n\t\t\t\texcept:\n\t\t\t\t\twarnings.warn(\"\"\"\\\nCould not pickle function. The following attribute will not be saved with \\\nthis output: %s\"\"\" % (self.name), UserWarning)\n\t\t\t\t\tpickle.dump(None, file)\n\t\t\telse:\n\t\t\t\twarnings.warn(\"\"\"\\\nEncoding functions along with VICE outputs requires the package dill \\\n(installable via pip). The following attribute will not be saved with this \\\noutput: %s\"\"\" % (self.name), UserWarning)\n\t\t\t\ttry:\n\t\t\t\t\tpickle.dump(self._default, file)\n\t\t\t\texcept:\n\t\t\t\t\tpickle.dump(None, file)\n\t\telse:\n\t\t\ttry:\n\t\t\t\tpickle.dump(self.obj, file)\n\t\t\texcept:\n\t\t\t\twarnings.warn(\"\"\"Could not save object %s with this VICE \\\noutput.\"\"\" % (self.name), UserWarning)\n\t\t\t\tpickle.dump(None, file)\n\t\tfile.close()", "def save_object(self, filename, data):\n with open(filename, 'wb') as outp: # Overwrites any existing file.\n pickle.dump(data, outp, pickle.HIGHEST_PROTOCOL)", "def serialize_obj(obj, filename):\n\n f = open(filename, 'wb')\n pickle.dump(obj, f)\n f.close()", "def save_pickle(obj, path):\n may_make_dir(osp.dirname(osp.abspath(path)))\n with open(path, 'wb') as f:\n pickle.dump(obj, f, protocol=2)", "def save():", "def savePickle(object, name):\n epoch = time.time()\n filename = name + str(epoch) + \".pkl\" # Save name\n fullPath = path.join(PICKLE_DIR, filename) # Save path\n\n # Get permissions and save the file\n with open(fullPath, \"w\") as outfile:\n pickle.dump(object, outfile)", "def save(object, filename, protocol = 0):\n file = gzip.GzipFile(filename, 'wb')\n file.write(pickle.dumps(object, protocol))\n file.close()", "def write_pickle(obj, path):\n with open(path, 'wb') as file:\n pickle.dump(obj, file)", "def save_pickle(obj, path):\n may_make_dir(osp.dirname(path))\n with open(path, 'w') as f:\n pickle.dump(obj, f)", "def save_object(obj, filename):\n\n result = pickle.dumps(obj)\n with gzip.GzipFile(filename, 'wb') as dest: dest.write(result)\n dest.close()", "def save_pickle(obj,path,mkdirs=True): \n if mkdirs:\n ensure_dir(path)\n with open(path,'wb') as file:\n pickle.dump(obj,file,protocol=pickle.HIGHEST_PROTOCOL)", "def save(object, filename, protocol = 0):\n file = gzip.GzipFile(filename, 'wb')\n file.write(pickle.dumps(object, protocol))\n file.close()", "def saveIntoFile(obj, path=\".\", file_name=\"saved_object.pickle\",\n folder_name=None):\n\n path_with_name = \"%s/%s\" % (path, file_name)\n if folder_name:\n os.makedirs(path_with_name + folder_name)\n path_with_name = \"%s/%s/%s\" % (path, folder_name, file_name)\n try:\n with open(path_with_name, \"wb\") as output:\n pickle.dump(obj, output, pickle.HIGHEST_PROTOCOL)\n print(\"Object has been saved into %s/%s\" % (path, file_name))\n except IOError:\n raise InvalidFilesPath(\"Path: %s\\tfile name: %s\" % (path, file_name))", "def save_dictionary(filename, obj):\n filename = os.path.join(FILE_DIR, 'assets/obj/' + filename)\n with open(filename, 'wb') as output:\n pickle.dump(obj, output, pickle.HIGHEST_PROTOCOL)", "def store(obj, filename, suffix = ''):\n # It is a numpy array\n if type(obj) == np.ndarray:\n path,f = writefile(filename, obj_id='numpy_objs', suffix=suffix)\n json.dump(obj, fp=f, cls=NumpyEncoder,\n separators=(',', ':'), sort_keys=True, indent=4)\n print '> saved with JSON to {}'.format(path)\n else:\n path, f = writefile(filename, obj_id='other_objs', suffix=suffix)\n pickle.dump(obj, file=f)\n print '> saved with dill (pickled) to {}'.format(path)\n return path", "def pickle_write(file_path, obj):\n\n with open(file_path, 'wb') as file:\n pickle.dump(obj, file)", "def save(item,name):\n\n file = open(name,'wb')\n dump(item,file)\n file.close()", "def save_viz_object(viz_object: OrqVizObject, filename: str):\n\n with open(filename, \"wb\") as f:\n pickle.dump(viz_object, f)", "def save_pickle(obj, filename, use_dill=False, protocol=4, create_folder=True):\n if create_folder:\n _create_folder_if_not_exist(filename)\n\n # Save\n with open(filename, 'wb') as file:\n if not use_dill:\n pickle.dump(obj, file, protocol=protocol)\n else:\n dill.dump(obj, file)", "def serialize(self): \n with open(self.path+self.name, \"wb\") as pfile:\n pickle.dump(self.pyObj, pfile)", "def save(cls, ob):\n return cls._save_to_avos(cls.__name__, ob)", "def save():\n pass", "def save(obj, f):\n if PartialState().distributed_type == DistributedType.TPU:\n xm.save(obj, f)\n elif PartialState().local_process_index == 0:\n torch.save(obj, f)", "def pickle_object(Object, file):\n filehandler = open(file, 'wb')\n pickle.dump(Object, filehandler)\n filehandler.close()", "def save(self, obj,\n path:str,\n epoch:int=None,\n scores:float or dict=None):\n parent_path = os.path.normpath(os.path.join(path, os.path.pardir))\n if not os.path.exists(parent_path):\n os.makedirs(parent_path)\n\n if epoch is not None:\n obj['epoch'] = epoch\n if scores is not None:\n if isinstance(scores, float):\n obj['score'] = scores\n elif isinstance(scores, dict):\n for key, value in scores.items():\n obj[key] = value\n torch.save(obj, path)\n\n if self.use_wandb:\n self.log_model_artifact(path=path,\n epoch=epoch,\n scores=scores)\n else:\n self.log_message(f\"Saved model in {path}. Using `wandb` to upload model into Weight & Biases.\")", "def write_pk(obj, filename):\n os.makedirs(os.path.dirname(filename), exist_ok=True)\n with open(filename, 'wb') as fd:\n pickle.dump(obj, fd)", "def save(self, filename):\n pass", "def pickle_to_file(obj, path):\n pickle.dump(obj, open(path, 'wb'))", "def save(self,fn=None):\n\n if (fn==None):\n fn = self._name\n\n print(\"\")\n print(\"Saving Simulation Object into session/\"+fn+\".pkl\")\n return save_session_pickle(self,fn)\n print(\"----\")", "def dump(self, obj, bin=1):\n f = self.open(\"wb\")\n import pickle\n\n try:\n error.checked_call(pickle.dump, obj, f, bin)\n finally:\n f.close()", "def serialize(obj):\n return pickle.dumps(obj)", "def pkl_dump(obj, name, path = 'obj'):\n if '.p' not in name:\n name = name + '.pkl'\n path = os.path.join(path, name)\n pickle.dump(obj, open(path, 'wb'))", "def write_pickle(obj, filepath):\n logging.info('Writing pickle file to {}'.format(filepath))\n with open(filepath, 'wb') as outfile:\n pickle.dump(obj, outfile)", "def dump(self, obj):\r\n return self.localpath.dump(obj)", "def dump_pickle(obj, filename):\n with open(filename, 'wb') as handle:\n joblib.dump(obj, handle, protocol=pickle.HIGHEST_PROTOCOL)", "def write_pickle_object_to_file(self, inpobj):\n with gzip.open('%s.tmp' % self.pickle_file, 'wb') as pkl_file:\n pickle.dump(inpobj, pkl_file, pickle.HIGHEST_PROTOCOL)\n run_command('mv %s.tmp %s' % (self.pickle_file, self.pickle_file))\n return True", "def save_dict(obj, name, path):\n with open(os.path.join(path, name) + '.pkl', 'wb') as f:\n to_be_told = f.tell()\n for pair in obj.items():\n pickle.dump(pair, f, pickle.HIGHEST_PROTOCOL)", "def dump_in_pickle(py_obj, filepath):\n\t\twith open(filepath, 'wb') as pfile:\n\t\t\tpickle.dump(py_obj, pfile)", "def write_pickle(self, obj, mode='wb', **kwargs):\n with self.open(mode) as f:\n return pickle.dump(obj, f, **kwargs)", "def save_nn(nn_obj, path):\n if isinstance(nn_obj, dict):\n prn.saveNN(nn_obj, path)\n elif isinstance(nn_obj, Net_tr):\n tr.save(nn_obj, path)\n else:\n print(\"canceled\")", "def save(self):\n pickle.dump(self, open(self.path, \"wb\"))", "def save(self, handler, name):", "def save(self, fname):\n pass", "def cache_save(item: str, obj: object) -> None:\n\titem = str(item)\n\tcache = \"Cached/\" + item\n\n\tcache_create()\n\n\tpickle.dump(obj, open(cache, \"wb\"))\n\tuux.show_debug(\"Cached object to \" + cache)", "def dump(object, filename):\n import pickle\n\n filename = filename if filename.endswith('.pic') else (filename + '.pic')\n\n with open(filename, 'wb') as f:\n pickle.dump(object, f, protocol=pickle.HIGHEST_PROTOCOL)", "def save(cls, name, string=False):\n if not string:\n pickle.dump(cls.__data[name], open(cls.dirpath + name + '.p', 'wb'))\n else:\n fd = open(cls.dirpath + name + '.txt', 'wb')\n fd.write(str(cls.__data[name]))\n fd.close()\n del cls.__data[name]", "def save_object(self, object, **kwargs):\n object.save()", "def save_list(obj, name, path):\n\n with open(os.path.join(path, name) + '.pkl', 'wb') as f:\n to_be_told = f.tell()\n for pair in obj:\n pickle.dump(pair, f, pickle.HIGHEST_PROTOCOL)\n return to_be_told", "def dumpPickle(obj, filepath):\n\tf = open(filepath, 'wb')\n\tpickle.dump(obj, f, protocol=2)\n\tf.close()", "def save(cls):\n playerdata = getAttributes(cls)\n Data.object_dump(playerdata, \"savedata.dat\")\n del playerdata", "def write_object(self, kumoobj):\n\n path_format = '{base_dir}/{service}/{file_name}/{start}-{end}/{obj_name}'\n outfile = os.path.realpath(path_format.format(self.base_dir, kumoobj.service, kumoobj.file_name, kumoobj.start,\n kumoobj.end, kumoobj.type))\n\n try:\n with open(outfile, 'wb') as f:\n f.write(kumoobj.content)\n except IOError:\n self.logger.exception('Failed to write {} object'.format(kumoobj.type))", "def save_file_data(name, obj, input_path='/inputs'):\n filename = '{}/{}.pkl'.format(input_path, name)\n if not os.path.exists(os.path.dirname(filename)):\n try:\n os.makedirs(os.path.dirname(filename))\n except OSError as exc: # Guard against race condition\n if exc.errno != errno.EEXIST:\n raise\n with open(filename.format(name), 'wb+') as output:\n pickle.dump(obj, output)", "def save_content(obj):\n filename, _, _, content = obj\n storage = get_storage(config['Instance']['content_dir'])\n return storage.save(filename, ContentFile(content))", "def dump_pickle_data(obj, filename):\n path = \"../tmp/{}.pckl\".format(filename)\n f = open(path, 'wb')\n pickle.dump(obj, f)\n f.close()", "def dump_pkl(obj, path):\r\n f = open(path, 'wb')\r\n try:\r\n cPickle.dump(obj, f, protocol=cPickle.HIGHEST_PROTOCOL)\r\n finally:\r\n f.close()", "def save_to_temp(obj, file_name, xml_file, node, hash_id, routine=\"temp_file\"):\n root = xml_file.getroot()\n try:\n output = root.find(\"saved_files\").attrib[\"path\"]\n except KeyError:\n output = None\n if output is not None:\n dir_name = output\n else:\n dir_name = os.path.dirname(file_name)\n parent_name = os.path.basename(dir_name)\n print(\"Saving \" + routine + \" to file\")\n print(\"Output folder: \" + dir_name)\n model_name = root.attrib['name']\n save_folder = \"/saved_files/\" + routine +\"-\"+model_name+ \"_\" +str(hash_id)+ \".bp\"\n\n filename = dir_name+save_folder\n\n relative_path=dir_name+save_folder\n if not os.path.isdir(dir_name+\"/saved_images\"):\n os.mkdir(dir_name+\"/saved_images\")\n for i in plt.get_fignums():\n if i not in saved_already:\n saved_already.append(i)\n Plotting.figure(i)\n save_image_folder = '/saved_images/'+routine+\"-\"+model_name+'-figure%d.png' % i\n Plotting.savefig(dir_name+save_image_folder)\n try:\n Utility.save(obj, filename)\n except IOError:\n os.mkdir(dir_name+\"/saved_files/\")\n Utility.save(obj, filename)\n node.set('path', relative_path)\n root = xml_file.getroot()\n indent(root)\n xml_file.write(file_name)", "def _pickle_sub_obj(\n self,\n sub_obj,\n path\n ):\n with path.open(mode='wb') as f:\n pickle.dump(sub_obj, f, 2)\n return path", "def serialize(obj):\n serial = repr(obj)\n try:\n if eval(serial) == obj:\n return serial\n except:\n pass\n try:\n serial = pickle.dumps(obj)\n return 'pickle.loads(%s)' % repr(serial)\n except:\n raise Exception #unable to serialize", "def save_to_array(arr_name, arr_object):\n return np.save(arr_name, arr_object)", "def save(self):\n data = (\n self.Joints,\n self.Links,\n self.joint_syms,\n self.global_syms,\n self.name,\n self.sym_prefix,\n )\n cloudpickle.dump(data, open(self.save_filename, \"wb\"))", "def save_model(self, filename):\r\n pass", "def save_named_instance(self, name: str, instance):\r\n self.save_object(name, instance)", "def SaveObjectWrapper(args):\n SaveObjectAsHDF5(*args)" ]
[ "0.83799785", "0.83523476", "0.834026", "0.834026", "0.8322952", "0.8320533", "0.81125903", "0.805116", "0.80221593", "0.79732233", "0.7954794", "0.79475254", "0.790895", "0.78999376", "0.7857288", "0.7842152", "0.7814447", "0.7774819", "0.76592255", "0.7642796", "0.76062477", "0.75891984", "0.7582979", "0.75254", "0.7468239", "0.7434214", "0.73950934", "0.72940403", "0.72515553", "0.71275175", "0.7123347", "0.7110147", "0.7110147", "0.7110147", "0.71061176", "0.7055494", "0.7047834", "0.7028982", "0.6972146", "0.6966462", "0.69513494", "0.69505745", "0.6916845", "0.6864548", "0.6828101", "0.68173355", "0.67948425", "0.67446136", "0.67431235", "0.6706339", "0.6705431", "0.6678832", "0.6624393", "0.66205007", "0.6605308", "0.65932924", "0.6584509", "0.6568491", "0.6545518", "0.65423805", "0.65301955", "0.6529156", "0.650722", "0.65024126", "0.65000474", "0.6482701", "0.6453743", "0.6419871", "0.64093375", "0.6398095", "0.6397742", "0.63974285", "0.6395233", "0.63927025", "0.6374023", "0.6367178", "0.63467884", "0.6345083", "0.63313234", "0.63298935", "0.631433", "0.63078856", "0.62827384", "0.6265501", "0.6259959", "0.62572825", "0.6246182", "0.6226477", "0.62224436", "0.62201977", "0.6218267", "0.620935", "0.6203937", "0.61913645", "0.61723655", "0.6168995", "0.6161106", "0.6147336", "0.6143248", "0.61416" ]
0.88148
0
=============================================================== load_obj(saved_name) =============================================================== this function is used to save any python object to your hard desk
def load_obj(saved_name): with open( saved_name + '.pkl', 'rb') as f: return pickle.load(f)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def save_obj(obj, saved_name ):\n with open( saved_name + '.pkl', 'wb') as f:\n pickle.dump(obj, f, pickle.HIGHEST_PROTOCOL)", "def save_obj(obj, name):\n with open('../../data/' + name + '.pkl', 'wb') as f:\n pickle.dump(obj, f, pickle.HIGHEST_PROTOCOL)", "def save_obj(obj, name):\n with open('../../data/' + name + '.pkl', 'wb') as f:\n pickle.dump(obj, f, pickle.HIGHEST_PROTOCOL)", "def save_obj(obj, name):\r\n with open('../pickle/' + name + '.pkl', 'wb') as fout:\r\n pickle.dump(obj, fout, pickle.HIGHEST_PROTOCOL)\r\n # end with\r", "def save_obj(obj, name):\n \n with open(name + '.pkl', 'wb') as objec:\n pickle.dump(obj, objec)", "def _save_obj(obj, name):\n with open('/bigdisk/pickles/' + name, 'w') as f:\n pickle.dump(obj, f, pickle.HIGHEST_PROTOCOL)", "def saveObj(obj, name):\n\n os.system(\"touch \" + name + \".pkl\")\n with open(name + '.pkl', 'wb') as f:\n pickle.dump(obj, f, pickle.HIGHEST_PROTOCOL)", "def load_obj(name):\r\n with open('../pickle/' + name + '.pkl', 'rb') as fout:\r\n return pickle.load(fout)\r\n # end with\r", "def load_obj(name):\n with open('../../data/' + name + '.pkl', 'rb') as f:\n return pickle.load(f)", "def load_obj(name):\n with open('../../data/' + name + '.pkl', 'rb') as f:\n return pickle.load(f)", "def loadObj(name):\n\n with open(name + '.pkl', 'rb') as f:\n return pickle.load(f)", "def save_object(obj, file_name):\n file_name = osp.abspath(file_name)\n with open(file_name, 'wb') as f:\n pickle.dump(obj, f, pickle.HIGHEST_PROTOCOL)", "def _load_obj(name):\n with open('/bigdisk/pickles/' + name, 'r') as f:\n return pickle.load(f)", "def save_obj(obj, path ):\n with open(path, 'wb') as f:\n pickle.dump(obj, f)", "def save_object(self, name: str, obj: object):\r\n with open_(self._path_for_pickle(name), \"wb\") as f:\r\n dill.dump(obj, f)", "def saveobject(obj, filename):\n # import cPickle as pickle\n with open(filename, 'wb') as output:\n pickle.dump(obj, output, pickle.HIGHEST_PROTOCOL)", "def save_object(obj, filename):\n with open(filename, 'wb') as output:\n pickle.dump(obj, output, protocol=2)", "def save_object(obj, filename):\r\n with open(filename, 'wb') as output:\r\n pickle.dump(obj, output)", "def pickleSave(object, filename):\n #Todo: Handle exceptions from pickle\n filehandler = open(\"obj/\" + filename + \".obj\", 'wb')\n pickle.dump(object, filehandler)", "def save_to_disk(name, object):\n shortname = _dumpify(_compress_name(name) + '.pkl')\n print 'save_to_disk(%s)' % shortname\n pkl_file = open(shortname , 'wb')\n pickle.dump(object, pkl_file, -1) # Pickle the list using the highest protocol available.\n pkl_file.close()", "def save_object(obj, filename):\n with open(filename, 'wb') as output_file: # Overwrites any existing file.\n pickle.dump(obj, output_file, pickle.HIGHEST_PROTOCOL)", "def save_obj(obj, path: str):\n with open(path, 'wb') as h:\n pickle.dump(obj, h)", "def save_object(path,object):\r\n with open(path,\"wb\") as f:\r\n pickle.dump(object,f,pickle.HIGHEST_PROTOCOL)", "def pickle_object(obj, ofname: \"Path|str\"):\n ofname = Path(ofname)\n maybe_make_output_dir(ofname)\n with ofname.open(\"wb\") as f:\n pickle.dump(obj, f)", "def save(obj, filename):\n import pickle\n with open(filename, 'w') as f:\n pickle.dump(obj, f, protocol=pickle.HIGHEST_PROTOCOL)", "def save_object(obj, filename: str):\n with open(filename, 'wb') as save_file:\n pickle.dump(obj, save_file)", "def save_object(obj, fpath):\r\n with open(fpath, 'wb') as o:\r\n pickle.dump(obj, o)", "def save_object(self, name: str, object):\n file_path = self.__get_file_path(name)\n self.__serialize_object(file_path, object)", "def save_pickle(obj, filename):\n with open(filename, 'wb') as file:\n pickle.dump(obj, file)", "def save_object(o, fn):\n return dump_object(o, fn)", "def pickleLoad(filename):\n #Todo: Handle exceptions from pickle\n filehandler = open(\"obj/\" + filename + \".obj\", 'rb')\n object = pickle.load(filehandler)\n return object", "def picklesave(obj, path):\n with open(path, 'wb') as file:\n pickle.dump(obj, file)", "def save(obj, obj_name):\n try:\n _save(obj, os.path.join(KALE_DATA_DIRECTORY, obj_name))\n except KaleMarshalException as e:\n log.error(e)\n log.debug(\"Original Traceback\", exc_info=e.__traceback__)\n utils.graceful_exit(1)", "def load_object(filename):\n with open(filename, 'rb') as input_file: # Overwrites any existing file.\n obj = pickle.load(input_file)\n return obj", "def serialize(self): \n with open(self.path+self.name, \"wb\") as pfile:\n pickle.dump(self.pyObj, pfile)", "def save_obj_joblib(obj, obj_path,obj_name,supersede=False):\n\n obj_path=os.path.join(obj_path,obj_name)\n\n if os.path.isfile(obj_path):\n if supersede:\n try:\n os.remove(obj_path)\n joblib.dump(obj, obj_path)\n print(\"save_obj_joblib: \"+os.path.basename(obj_path)+\" is replaced and saved!\")\n except OSError:\n print(\"save_obj_joblib: Object couldn't be saved\")\n else:\n raise OSError(\"save_obj_joblib: There exists a object with the same name already.\")\n else:\n if os.path.isdir(os.path.dirname(obj_path)):\n pass\n else:\n os.mkdir(os.path.dirname(obj_path))\n joblib.dump(obj, obj_path)\n print(\"save_obj_joblib: \"+os.path.basename(obj_path)+\" is saved!\")", "def load_obj(path):\n with open(path, 'rb') as f:\n return pickle.load(f)", "def pickle_save(file_path, obj):\n with open(file_path, 'wb') as f:\n pickle.dump(obj, f)", "def load_object(filename):\r\n with open(filename, 'rb') as input:\r\n obj = pickle.load(input)\r\n return obj", "def write_to_file(name, obj):\n\n print 'writing structures to pickle'\n print '----------------------------'\n\n path = os.getcwd() + '/pickles/' + name + '.pkl'\n file = open(path, 'wb')\n pickle.dump(obj, file)\n file.close()", "def save(self):\n\t\tif os.path.exists(\"%s.obj\" % (self.name)):\n\t\t\tos.system(\"rm -f %s.obj\" % (self.name))\n\t\telse: pass\n\t\tfile = open(\"%s.obj\" % (self.name), \"wb\")\n\t\tif callable(self.obj):\n\t\t\tif \"dill\" in sys.modules:\n\t\t\t\ttry:\n\t\t\t\t\tpickle.dump(self.obj, file)\n\t\t\t\texcept:\n\t\t\t\t\twarnings.warn(\"\"\"\\\nCould not pickle function. The following attribute will not be saved with \\\nthis output: %s\"\"\" % (self.name), UserWarning)\n\t\t\t\t\tpickle.dump(None, file)\n\t\t\telse:\n\t\t\t\twarnings.warn(\"\"\"\\\nEncoding functions along with VICE outputs requires the package dill \\\n(installable via pip). The following attribute will not be saved with this \\\noutput: %s\"\"\" % (self.name), UserWarning)\n\t\t\t\ttry:\n\t\t\t\t\tpickle.dump(self._default, file)\n\t\t\t\texcept:\n\t\t\t\t\tpickle.dump(None, file)\n\t\telse:\n\t\t\ttry:\n\t\t\t\tpickle.dump(self.obj, file)\n\t\t\texcept:\n\t\t\t\twarnings.warn(\"\"\"Could not save object %s with this VICE \\\noutput.\"\"\" % (self.name), UserWarning)\n\t\t\t\tpickle.dump(None, file)\n\t\tfile.close()", "def load_object(path):\r\n with open(path,\"rb\") as f:\r\n object = pickle.load(f) \r\n return object", "def load_obj(path: str):\n with open(path, 'rb') as h:\n return pickle.load(h)", "def dump_in_pickle(py_obj, filepath):\n\t\twith open(filepath, 'wb') as pfile:\n\t\t\tpickle.dump(py_obj, pfile)", "def load_object(fpath):\r\n with open(fpath, 'rb') as i:\r\n return pickle.load(i)", "def save_pickle(obj, path):\n may_make_dir(osp.dirname(osp.abspath(path)))\n with open(path, 'wb') as f:\n pickle.dump(obj, f, protocol=2)", "def save_dictionary(filename, obj):\n filename = os.path.join(FILE_DIR, 'assets/obj/' + filename)\n with open(filename, 'wb') as output:\n pickle.dump(obj, output, pickle.HIGHEST_PROTOCOL)", "def serialize_obj(obj, filename):\n\n f = open(filename, 'wb')\n pickle.dump(obj, f)\n f.close()", "def loadFromFile(file_name=\"saved_object.pickle\"):\n\n try:\n with open(file_name, \"rb\") as inputToLoad:\n loaded_object = pickle.load(inputToLoad)\n return loaded_object\n except IOError:\n raise InvalidFilesPath\n except ImportError as e:\n raise InvalidFile(\n \"Structure of project has been changed since saving this object: %s\" % str(e))\n except TypeError:\n return pickle.load(file_name)", "def save(self, obj, filename):\n if not self.enabled:\n return\n\n # get unique filepath and filename\n index = 0\n while True:\n filepath = join(self.path, filename+\"_\"+str(index))\n if os.path.isfile(filepath):\n index = index + 1\n continue\n break\n\n # save object\n os.makedirs(os.path.dirname(filepath), exist_ok=True)\n with open(filepath, \"wb\") as f:\n try:\n pickle.dump(obj, f)\n except Exception as e:\n log.exception(e)\n log.warning(f\"save failed for {filename} {type(obj)}\")", "def load_from_disk(name):\n shortname = _dumpify(_compress_name(name) + '.pkl')\n print 'load_from_disk(%s)' % shortname\n pkl_file = open(shortname, 'rb')\n object = pickle.load(pkl_file)\n pkl_file.close()\n return object", "def save_object(self, filename, data):\n with open(filename, 'wb') as outp: # Overwrites any existing file.\n pickle.dump(data, outp, pickle.HIGHEST_PROTOCOL)", "def load_object(self, name: str):\r\n with open_(self._path_for_pickle(name), \"rb\") as f:\r\n return dill.load(f)", "def pickle_object(Object, file):\n filehandler = open(file, 'wb')\n pickle.dump(Object, filehandler)\n filehandler.close()", "def save_pickle(obj, path):\n may_make_dir(osp.dirname(path))\n with open(path, 'w') as f:\n pickle.dump(obj, f)", "def load_object(filepath):\n with open(filepath, 'rb') as f:\n obj = pickle.load(f)\n return obj", "def dump_pickle_data(obj, filename):\n path = \"../tmp/{}.pckl\".format(filename)\n f = open(path, 'wb')\n pickle.dump(obj, f)\n f.close()", "def save(self, dObj, sFilename):\n\n f = open(sFilename, \"w\")\n p = pickle.Pickler(f)\n p.dump(dObj)\n f.close()", "def save(self, dObj, sFilename):\n\n f = open(sFilename, \"w\")\n p = pickle.Pickler(f)\n p.dump(dObj)\n f.close()", "def save(self, dObj, sFilename):\n\n f = open(sFilename, \"w\")\n p = pickle.Pickler(f)\n p.dump(dObj)\n f.close()", "def pkl_dump(obj, name, path = 'obj'):\n if '.p' not in name:\n name = name + '.pkl'\n path = os.path.join(path, name)\n pickle.dump(obj, open(path, 'wb'))", "def obj2pickle(obj, file):\n # Ensure parent directory and necesary file structure exists\n pardir = os.path.dirname(file)\n if pardir.strip() != \"\": # ensure pardir is not an empty string\n if not os.path.exists(pardir):\n os.makedirs(pardir)\n\n with open(file, mode=\"wb\") as fileObj:\n pickle.dump(obj, fileObj, protocol=2) # compatible with py2.7 and 3.x", "def save(self, dObj, sFilename):\n\n f = open(sFilename, \"w\")\n p = pickle.Pickler(f)\n p.dump(dObj)\n f.close()", "def savePickle(object, name):\n epoch = time.time()\n filename = name + str(epoch) + \".pkl\" # Save name\n fullPath = path.join(PICKLE_DIR, filename) # Save path\n\n # Get permissions and save the file\n with open(fullPath, \"w\") as outfile:\n pickle.dump(object, outfile)", "def save_pickle(obj,path,mkdirs=True): \n if mkdirs:\n ensure_dir(path)\n with open(path,'wb') as file:\n pickle.dump(obj,file,protocol=pickle.HIGHEST_PROTOCOL)", "def _pickle_sub_obj(\n self,\n sub_obj,\n path\n ):\n with path.open(mode='wb') as f:\n pickle.dump(sub_obj, f, 2)\n return path", "def pkl_load(name, path = 'obj'):\n if '.p' not in name:\n name = name + '.pkl'\n path = os.path.join(path, name)\n try:\n obj = pickle.load(open(path, 'rb'))\n except FileNotFoundError:\n obj = None\n return obj", "def load(self,fname=None):\r\n if fname == None:\r\n fname = \"school_%s.save\" % self.name_full\r\n fname = fname.replace(\"'\", \"\")\r\n fname = fname.replace(\". \", \"_\")\r\n fname = fname.replace(\" \", \"_\")\r\n fname = fname.replace(\"'\", \"\")\r\n thisdir = os.getcwd()\r\n tempdir = \"TEMP\"\r\n\r\n if os.path.isdir(os.path.join(thisdir, \"TEMP\")):\r\n tempdir = os.path.join(thisdir, \"TEMP\")\r\n elif os.path.isdir(os.path.join(thisdir, \"..\", \"TEMP\")):\r\n tempdir = os.path.join(thisdir, \"..\", \"TEMP\")\r\n\r\n if os.path.isdir(tempdir):\r\n os.chdir(tempdir)\r\n\r\n if os.path.isfile(fname):\r\n infile = open(fname, \"rb\")\r\n #self = pickle.load(infile)\r\n if self.VERBOSE > 0:\r\n print \"\\tOpened file '%s' OK...\" % fname\r\n temp_school = School()\r\n temp_school = pickle.load(infile)\r\n #self = pickle.load(infile)\r\n #temp_school = pickle.load(fname)\r\n #temp_school.load(infile)\r\n infile.close()\r\n\r\n for prop in temp_school.__dict__.keys():\r\n if self.VERBOSE > 0:\r\n print(\"loading '%s'...\" % prop), \r\n self.__dict__[prop] = temp_school.__dict__[prop]\r\n if self.VERBOSE > 0:\r\n print \"\\t...OK\"\r\n\r\n else:\r\n print \"ERROR: CAN'T FIND SAVE FILE '%s'!\" % fname\r\n os.chdir(thisdir)\r\n return -1\r\n\r\n #rebuild the Font object we couldn't pickle\r\n if self.sign_font_name != None:\r\n if os.path.isdir(os.path.join(\"..\", \"fonts\")):\r\n if self.sign_font_size != None:\r\n if GRAPHICSMODE == \"PIL\":\r\n from PIL import ImageFont\r\n self.sign_font = ImageFont.truetype(os.path.join(\"..\", \"fonts\", self.sign_font_name), self.sign_font_size, encoding='unic')\r\n elif GRAPHICSMODE == \"PyGame\":\r\n self.sign_font = pygame.font.Font(os.path.join(\"..\", \"fonts\", self.sign_font_name), self.sign_font_size)\r\n elif os.path.isdir(\"fonts\"):\r\n if self.sign_font_size != None:\r\n if GRAPHICSMODE == \"PIL\":\r\n from PIL import ImageFont\r\n self.sign_font = ImageFont.truetype(os.path.join(\"fonts\", self.sign_font_name), self.sign_font_size, encoding='unic')\r\n elif GRAPHICSMODE == \"PyGame\":\r\n self.sign_font = pygame.font.Font(os.path.join(\"fonts\", self.sign_font_name), self.sign_font_size)\r\n\r\n #rebuild the self.badge image that we couldn't pickle\r\n if self.badge != None:\r\n badge_fname = self.badge\r\n if os.path.isfile(badge_fname):\r\n if GRAPHICSMODE == \"PyGame\":\r\n pygame.image.load(self.badge,badge_fname)\r\n elif GRAPHICSMODE == \"PIL\":\r\n self.badge = PilImage.open(badge_fname)\r\n\r\n #rebuild the self.sign image that we couldn't pickle\r\n if self.sign != None:\r\n sign_fname = self.sign\r\n if os.path.isfile(sign_fname):\r\n if GRAPHICSMODE == \"PyGame\":\r\n pygame.image.load(self.sign,sign_fname)\r\n elif GRAPHICSMODE == \"PIL\":\r\n self.badge = PilImage.open(sign_fname)\r\n\r\n os.chdir(thisdir)\r\n return None", "def _restore_sub_obj(\n self,\n attr_name: pathlib.Path\n ):\n return pickle.load(attr_name.open(mode=\"rb\"))", "def write_pickle(obj, file_name):\n with open(file_name, 'wb') as f:\n pickle.dump(obj, f, pickle.HIGHEST_PROTOCOL)", "def save(object, filename, protocol = 0):\n file = gzip.GzipFile(filename, 'wb')\n file.write(pickle.dumps(object, protocol))\n file.close()", "def dumpPickle(obj, filepath):\n\tf = open(filepath, 'wb')\n\tpickle.dump(obj, f, protocol=2)\n\tf.close()", "def save(self, obj):", "def dump_pkl(obj, path):\r\n f = open(path, 'wb')\r\n try:\r\n cPickle.dump(obj, f, protocol=cPickle.HIGHEST_PROTOCOL)\r\n finally:\r\n f.close()", "def save_pickle(obj, filename, use_dill=False, protocol=4, create_folder=True):\n if create_folder:\n _create_folder_if_not_exist(filename)\n\n # Save\n with open(filename, 'wb') as file:\n if not use_dill:\n pickle.dump(obj, file, protocol=protocol)\n else:\n dill.dump(obj, file)", "def save(self,fname=None):\r\n if fname == None:\r\n fname = \"school_%s.save\" % self.name_full\r\n fname = fname.replace(\"'\", \"\")\r\n fname = fname.replace(\". \", \"_\")\r\n fname = fname.replace(\" \", \"_\")\r\n fname = fname.replace(\"'\", \"\")\r\n thisdir = os.getcwd()\r\n tempdir = \"TEMP\"\r\n\r\n if os.path.isdir(os.path.join(thisdir, \"TEMP\")):\r\n tempdir = os.path.join(thisdir, \"TEMP\")\r\n #elif os.path.isdir(os.path.join(thisdir, \"..\", \"TEMP\")):\r\n # tempdir = os.path.join(thisdir, \"..\", \"TEMP\")\r\n\r\n if os.path.isdir(tempdir):\r\n os.chdir(tempdir)\r\n\r\n\r\n #if we try pickling as is we get following error message:\r\n #raise TypeError, \"can't pickle %s objects\" % base.__name__\r\n #TypeError: can't pickle Font objects\r\n\r\n #set sign_font to NONE to get around this\r\n #we have both self.sign_font_name and self.sign_font_size set,\r\n # so we should be able to reconstuct the FOnt object fron those on re-loading \r\n\r\n self.sign_font = None\r\n #school.sign_font_name = signFont[1]\r\n #school.sign_font_size = BIGFONT_SIZE\r\n\r\n #we can't pickle a pygame Surface\r\n #and pygame_sdl2 doesn's have the\r\n #pygame.image.tostring() method.\r\n #\r\n #We'll just have to save an image file, and\r\n #use the filename as the badge contents in\r\n #our save file\r\n #self.badge = pygame.image.tostring(self.badge)\r\n badge_fname = \"school_%s_badge.png\" % self.name_full\r\n badge_fname = string.replace(badge_fname, \" \", \"_\")\r\n badge_fname = string.replace(badge_fname, \"'\", \"\")\r\n self.badge_fname = badge_fname\r\n if GRAPHICSMODE == \"PyGame\":\r\n if type(self.badge) == StringType:\r\n pass # must have already saved it?\r\n else:\r\n print \"self.badge:\", self.badge\r\n print \"badge_fname:\", badge_fname\r\n pygame.image.save(self.badge,badge_fname)\r\n elif GRAPHICSMODE == \"PIL\":\r\n #not really needed, since we can pickle PIL objects,\r\n #but doing it to stay consistent with Pygame\r\n self.badge.save(badge_fname, \"PNG\")\r\n self.badge = badge_fname\r\n\r\n outfile = open(fname, \"wb\")\r\n #pickle.dump(self, outfile)\r\n try:\r\n pickle.dump(self, outfile)\r\n except:\r\n print \"!!! CANNOT FUCKING PICKLE !!!\"\r\n for q in self.__dict__.keys():\r\n print \"\\tremoving '%s'\"% q\r\n self.__dict__[q] = None\r\n try:\r\n pickle.dump(self, outfile)\r\n print \"\\t !!! IT WORKED !!!\"\r\n print \"'%s' WAS THE PROBLEM!\" % q\r\n print\r\n break\r\n except:\r\n print \"\\tNOPE.. STILL FAILED...\"\r\n \r\n pickle.dump(self, outfile)\r\n \r\n outfile.close()\r\n\r\n if self.VERBOSE > 0:\r\n print \"wrote file '%s' OK\" % fname\r\n\r\n os.chdir(thisdir)\r\n return fname", "def saveIntoFile(obj, path=\".\", file_name=\"saved_object.pickle\",\n folder_name=None):\n\n path_with_name = \"%s/%s\" % (path, file_name)\n if folder_name:\n os.makedirs(path_with_name + folder_name)\n path_with_name = \"%s/%s/%s\" % (path, folder_name, file_name)\n try:\n with open(path_with_name, \"wb\") as output:\n pickle.dump(obj, output, pickle.HIGHEST_PROTOCOL)\n print(\"Object has been saved into %s/%s\" % (path, file_name))\n except IOError:\n raise InvalidFilesPath(\"Path: %s\\tfile name: %s\" % (path, file_name))", "def load_object(self, filename):\n with open(filename, 'rb') as inp: # Overwrites any existing file.\n data = pickle.load(inp)\n return data", "def can(obj, file, protocol=2):\n if type(file) is str: f=open(file,'wb')\n else: f=file\n\n cPickle.dump(obj, f, protocol=protocol)\n\n if type(file) is str: f.close()", "def save_file_data(name, obj, input_path='/inputs'):\n filename = '{}/{}.pkl'.format(input_path, name)\n if not os.path.exists(os.path.dirname(filename)):\n try:\n os.makedirs(os.path.dirname(filename))\n except OSError as exc: # Guard against race condition\n if exc.errno != errno.EEXIST:\n raise\n with open(filename.format(name), 'wb+') as output:\n pickle.dump(obj, output)", "def pickle(file_path, obj, protocol=2):\n if not file_path.endswith('.pkl'):\n file_path += '.pkl'\n with open(file_path, 'wb') as opdwf:\n pk.dump(obj, opdwf, protocol=protocol)", "def test_save_load(self):\n tmpfile = os.path.join(os.path.dirname(__file__), \"tmp.pkl\")\n self.p.save(tmpfile)\n _ = Parameters.load(tmpfile)\n os.remove(tmpfile)\n with self.assertRaises(OSError):\n self.p.load(\"__________\")\n\n with open(tmpfile, \"wb\") as f:\n pickle.dump({}, f)\n with self.assertRaises(AttributeError):\n self.p.load(tmpfile)\n os.remove(tmpfile)", "def write_pickle(obj, path):\n with open(path, 'wb') as file:\n pickle.dump(obj, file)", "def save(object, filename, protocol = 0):\n file = gzip.GzipFile(filename, 'wb')\n file.write(pickle.dumps(object, protocol))\n file.close()", "def load_viz_object(filename: str) -> OrqVizObject:\n\n with open(filename, \"rb\") as f:\n loaded_object = pickle.load(f)\n\n return loaded_object", "def load(self, which):\n\t\tpath = os.path.join(self.storagedir, which)\n\t\tprint(\"Loading from\", path)\n\t\twith open(path, \"rb\") as handle:\n\t\t\tsetattr(self, which, _pickle.load(handle))", "def load(cls,filename):\n obj = None\n f = open(filename,'r')\n try:\n obj = pickle.load(f)\n obj.filename = filename\n finally:\n f.close()\n return obj", "def save_dict(obj, name, path):\n with open(os.path.join(path, name) + '.pkl', 'wb') as f:\n to_be_told = f.tell()\n for pair in obj.items():\n pickle.dump(pair, f, pickle.HIGHEST_PROTOCOL)", "def serialize(obj):\n serial = repr(obj)\n try:\n if eval(serial) == obj:\n return serial\n except:\n pass\n try:\n serial = pickle.dumps(obj)\n return 'pickle.loads(%s)' % repr(serial)\n except:\n raise Exception #unable to serialize", "def dump_pickle(obj, filename):\n with open(filename, 'wb') as handle:\n joblib.dump(obj, handle, protocol=pickle.HIGHEST_PROTOCOL)", "def pickle_to_file(obj, path):\n pickle.dump(obj, open(path, 'wb'))", "def save_object(obj, filename):\n\n result = pickle.dumps(obj)\n with gzip.GzipFile(filename, 'wb') as dest: dest.write(result)\n dest.close()", "def read_write(self,obj):\n rf.write(self.pickle_file,obj)\n self.assertEqual(rf.read(self.pickle_file), obj)", "def save_viz_object(viz_object: OrqVizObject, filename: str):\n\n with open(filename, \"wb\") as f:\n pickle.dump(viz_object, f)", "def get_object(fname, fmethod='rb'):\n with open(model_dir/fname, fmethod) as f:\n return pickle.load(f) if '.pkl' in fname else f.read()", "def save(self, obj):\n raise NotImplementedError", "def loadGame(self, save_name: str, save_location: str) -> None:\n self.save_location = (\n f\"{save_location}/{save_name}\"\n if save_name is not None and save_location is not None\n else f\"{DEFAULT_SAVE_LOCATION}/{DEFAULT_SAVE_NAME}\"\n )\n\n save_data_as_string = \"\"\n # Check to see if that save file exists, if not populate it with default data so that the game can start\n if not os.path.exists(self.save_location):\n with open(SAVE_TEMPLATE, \"r\") as File:\n save_data_as_string = File.read()\n\n with open(self.save_location, \"w\") as File:\n File.write(save_data_as_string)\n else:\n with open(self.save_location, \"r\") as File:\n save_data_as_string = File.read()\n\n # Now load the data into the object as a dictionary\n return json.loads(save_data_as_string)", "def read_object(filename: str) -> Any:\n with open(filename, 'rb') as read_file:\n obj = pickle.load(read_file)\n\n return obj", "def dump(self, obj, bin=1):\n f = self.open(\"wb\")\n import pickle\n\n try:\n error.checked_call(pickle.dump, obj, f, bin)\n finally:\n f.close()", "def dump(object, filename):\n import pickle\n\n filename = filename if filename.endswith('.pic') else (filename + '.pic')\n\n with open(filename, 'wb') as f:\n pickle.dump(object, f, protocol=pickle.HIGHEST_PROTOCOL)" ]
[ "0.81122315", "0.7685448", "0.7685448", "0.7588679", "0.75443125", "0.74489355", "0.74238276", "0.7387553", "0.730978", "0.730978", "0.7296159", "0.7197405", "0.71187323", "0.7099137", "0.7085125", "0.7082581", "0.70419323", "0.70103645", "0.70006555", "0.6940859", "0.6908531", "0.6907691", "0.6880611", "0.6829744", "0.6826534", "0.6794035", "0.6790917", "0.6768082", "0.674998", "0.6625734", "0.6614205", "0.65992874", "0.6588684", "0.6570708", "0.654935", "0.6541425", "0.6493149", "0.64887905", "0.6481155", "0.6478937", "0.6472468", "0.64581937", "0.64421374", "0.6437376", "0.6431249", "0.64172214", "0.63837767", "0.6369312", "0.6363702", "0.6343277", "0.6288247", "0.6283156", "0.62450993", "0.6234331", "0.62338203", "0.6221244", "0.6179953", "0.61770064", "0.61770064", "0.61770064", "0.6174123", "0.61284584", "0.612577", "0.6114803", "0.6105895", "0.60973525", "0.6087003", "0.60749507", "0.60572684", "0.603803", "0.6024563", "0.6011705", "0.60112125", "0.60044545", "0.59977525", "0.5980816", "0.5974147", "0.59636927", "0.5954716", "0.59423643", "0.59281707", "0.5925939", "0.5923063", "0.59215933", "0.5894972", "0.5891331", "0.5890183", "0.58745235", "0.5873195", "0.5872711", "0.5872497", "0.58686554", "0.58624315", "0.5837832", "0.58368593", "0.583431", "0.5814327", "0.58063513", "0.58018696", "0.5789967" ]
0.82277983
0
=========================================================== DateFormatedSQL(x) =========================================================== this function converts the the date read from a list to a datetime format
def DateFormatedSQL(x): x=[i[0] for i in x] x1=[] for i in x: if len(i)==19: x1.append(datetime.datetime(int(i[:4]),int(i[5:7]),int(i[8:10]),int(i[11:13]),int(i[14:16]),int(i[17:18]) )) # elif len(i)==13: # x1.append(datetime.datetime(int(i[:4]),int(i[5:7]),int(i[8:10]),int(i[11:13]),int(0),int(0) )) # else: # x1.append(datetime.datetime(int(i[:4]),int(i[5:7]),int(i[8:10]),int(0),int(0),int(0) )) # del i,x return x1
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def change_format_from_input_to_datetime(list_d_t_t):\n data_output = []\n\n for row in list_d_t_t:\n data_output.append([datetime.datetime.strptime(row[0] + \" \" + row[1], \"%Y-%m-%d %H:%M:%S\"),\n datetime.datetime.strptime(row[0] + \" \" + row[2], \"%Y-%m-%d %H:%M:%S\")])\n\n return data_output", "def sql_date(date):\n return \"to_date('{}', 'dd.mm.yyyy')\".format(date)", "def DateFormated(x):\n \n x1=[]\n for i in x:\n if len(i)==19:\n x1.append(datetime.datetime(int(i[:4]),int(i[5:7]),int(i[8:10]),int(i[11:13]),int(i[14:16]),int(i[17:18]) ))\n# elif len(i)==13:\n# x1.append(datetime.datetime(int(i[:4]),int(i[5:7]),int(i[8:10]),int(i[11:13]),int(0),int(0) ))\n# else:\n# x1.append(datetime.datetime(int(i[:4]),int(i[5:7]),int(i[8:10]),int(0),int(0),int(0) ))\n# del i,x\n return x1", "def datetime_to_sql(connection, obj):\n return connection.string_literal(datetime_to_str(obj))", "def _to_date(self, x):\n if isinstance(x, datetime.datetime):\n return x.date()\n return x", "def get_date():\n temp = pd.read_sql_query(_query['date'], connect())\n return temp.values", "def to_date(d, date_format = \"%Y-%m-%d %H:%M:%S.%f\"):\n if type(d) == pd.core.series.Series:\n d = list(d)\n if type(d) == list:\n return [datetime.strptime(date,date_format) if type(date) == str else date for date in d]\n elif type(d) == str:\n return datetime.strptime(d,date_format)\n else:\n raise ValueError(\"Either String or list of Strings is accepted.\")", "def convert_datetime_objs(list_of_dates):\n datetime_list = []\n for date in list_of_dates:\n date_obj = datetime.datetime.strptime(date, '%d.%m.%Y')\n datetime_list.append(date_obj)\n return datetime_list", "def datetimefstr(date_list, datetimeformat, longdatetimeformat):\n try:\n # including year\n parts = longdatetimeformat.count(' ') + 1\n dtstring = ' '.join(date_list[0:parts])\n dtstart = datetime.strptime(dtstring, longdatetimeformat)\n for _ in range(parts):\n date_list.pop(0)\n except ValueError:\n # without year\n parts = datetimeformat.count(' ') + 1\n dtstring = ' '.join(date_list[0:parts])\n dtstart = datetime.strptime(dtstring, datetimeformat)\n if dtstart.timetuple()[0] == 1900:\n dtstart = datetime(date.today().timetuple()[0],\n *dtstart.timetuple()[1:5])\n # if start date lies in the past use next year\n #if dtstart < datetime.today():\n #dtstart = datetime(dtstart.timetuple()[0] + 1,\n #*dtstart.timetuple()[1:6])\n for _ in range(parts):\n date_list.pop(0)\n return dtstart", "def timefstr(date_list, timeformat):\n time_start = time.strptime(date_list[0], timeformat)\n time_start = dtime(*time_start[3:5])\n day_start = date.today()\n dtstart = datetime.combine(day_start, time_start)\n date_list.pop(0)\n return dtstart", "def fixDate(weatherRDDRecord):\n fieldList = weatherRDDRecord.split(\",\")\n fieldList = [i.replace('\\\"', '') for i in fieldList] #remove quotation marks\n fieldList[0] = fieldList[0].replace('-', '/')\n \n swapDateOrder = fieldList[0].split('/')\n fieldList[0] = swapDateOrder[2] + '/' + swapDateOrder[1] + '/' + swapDateOrder[0]\n \n return (fieldList[0],(fieldList[1:]))", "def convert_column_dates2str(self, info_in, output='list'):\n if hasattr(info_in, 'keys'):\n items = [(el, el) for el in self._columns.keys()]\n elif hasattr(info_in, '__getitem__'):\n items = [(ii, el) for ii,el in enumerate(self._columns.keys())]\n else:\n raise Exception('Only accepts dict, dict or list')\n \n if output == 'dict':\n return dict([(el1, self.date2str(info_in[el0])) if self.column_is_date[el1] else (el1, info_in[el0]) for el0, el1 in items])\n elif output == 'list':\n return [self.date2str(info_in[el0]) if self.column_is_date[el1] else info_in[el0] for el0, el1 in items]\n else:\n raise Exception('output type %s unkown'%output)", "def convert_date_string(df,col_name):\n df[col_name] = pd.to_datetime(df[col_name], infer_datetime_format=True)\n return df", "def build_date():\n def r(x):\n return tuple(ord(i) for i in x)\n return r", "def __call__(self, x: Sequence[datetime]) -> Sequence[str]:\n if self.tz is not None:\n x = [d.astimezone(self.tz) for d in x]\n return [d.strftime(self.fmt) for d in x]", "def sas_date_converter(row, base_date='1960-01-01'):\n if row is None:\n return row\n return datetime.strptime(base_date, '%Y-%m-%d') + timedelta(int(row))", "def _convert(frame):\n frame = frame.convert_objects(convert_numeric=True)\n for column in frame:\n if column in c.dates:\n frame[column] = frame[column].astype('datetime64')\n return frame", "def _handle_sql_types(value):\n if type(value) is datetime:\n return value.isoformat()\n return str(value)", "def date_trans_z(x):\n \"\"\"2017.01.09->2017/01/09 \"\"\"\n date_list=x.split('.')\n return date_list[0]+'/'+date_list[1]+'/'+date_list[2]", "def transform_datetimes(data: Any) -> Any:\n\n if isinstance(data, (datetime, date)):\n return data.isoformat()\n\n if isinstance(data, (list, tuple)):\n tmp_data = [transform_datetimes(elem) for elem in data]\n\n return tuple(tmp_data) if isinstance(data, tuple) else tmp_data\n\n if isinstance(data, dict):\n for key, val in data.items():\n data[key] = transform_datetimes(val)\n\n return data", "def get_dt_string(type_list):\n output = ''\n for entry in type_list:\n output = output+entry+'64,'\n return output[0:-1]", "def serialize(date):\n # From database to client\n\n # Convert date-object to datetime\n # See: https://stackoverflow.com/questions/1937622/convert-date-to-datetime-in-python\n dt = datetime.combine(date, datetime.min.time())\n date_format = \"%Y-%m-%d\"\n return datetime.strftime(dt, date_format)", "def convert_column_str2dates(self, info_in, output='list'):\n if hasattr(info_in, 'keys'):\n items = [(el, el) for el in self._columns.keys()]\n elif hasattr(info_in, '__getitem__'):\n items = [(ii, el) for el in enumerate(self._columns.keys())]\n else:\n raise Exception('Only accepts dict, dict or list')\n \n if output == 'dict':\n return dict([(el1, self.str2date(info_in[el0])) if self.column_is_date[el1] else (el1, info_in[el0]) for el0, el1 in items])\n elif output == 'list':\n return [self.str2date(info_in[el0]) if self.column_is_date[el1] else info_in[el0] for el0, el1 in items]\n else:\n raise Exception('output type %s unkown'%output)", "def join_date_strings(dates, separator=\"','\", df=\"%d-%m-%Y\"):\n return separator.join([x.strftime(df) for x in dates])", "def change_format_to_database_index(self, date):\n year = date[0:4] + ','\n month = date[4:6]\n day = date[6:8]\n if month[0] == '0':\n month = month[1]\n\n if day[0] == '0':\n day = day[1]\n\n day = ' ' + day + ','\n month = ' ' + month\n\n return year + day + month", "def dateToString(self, date_objs: list) -> list:\n date_strings = []\n try:\n if isinstance(date_objs, list) == False:\n return date_strings\n\n for date_obj in date_objs:\n if isinstance(date_obj, datetime) == False:\n continue\n date_strings.append(datetime.strftime(date_obj, '%d %b %Y'))\n\n return date_strings\n except Exception as e:\n logging.error(e)", "def datetimeify(t):\n if type(t) in [datetime, Timestamp]:\n return t\n fmts = ['%Y-%m-%d %H:%M:%S', '%Y-%m-%d', '%Y %m %d %H %M %S',]\n for fmt in fmts:\n try:\n return datetime.strptime(t, fmt)\n except ValueError:\n pass\n raise ValueError(\"time data '{:s}' not a recognized format\".format(t))", "def _process_date(self, data):\n def helper(val):\n # Sometime the date has a (1) or (2) following it. Strip that off\n # so that we can successful convert to date.\n s = val.find(\" (\")\n if s >= 0:\n val = val[0:s]\n dv = dt.datetime.strptime(val, '%A, %b %d')\n dv = dv.replace(year=self.start_date.year)\n return dv\n data['Date'] = data['Date'].apply(helper)\n return data", "def format_datetimes(self, datetimes, format=\"%B %d %Y %I:%M %p\"):\n date, times, space_character = datetimes.split(\", \")\n start_time, end_time = times.split(\" - \")\n year = datetime.now().strftime(\"%Y\")\n return (\n datetime.strptime(\n date + \" \" + year + \" \" + start_time.replace(\".\", \"\"), format\n ),\n datetime.strptime(\n date + \" \" + year + \" \" + end_time.replace(\".\", \"\"), format\n ),\n )", "def _date_to_string(v):\n\n if not isinstance(v,(list,tuple)):\n raise InstrumentParameterException('Value %s is not a list, tuple.' % str(v))\n \n if not len(v)==3:\n raise InstrumentParameterException('Value %s is not length 3.' % str(v))\n \n months = ['Jan','Feb','Mar','Apr','May','Jun','Jul','Aug','Sep',\n 'Oct','Nov','Dec']\n day = v[0]\n month = v[1]\n year = v[2]\n \n if len(str(year)) > 2:\n year = int(str(year)[-2:])\n \n if not isinstance(day,int) or day < 1 or day > 31:\n raise InstrumentParameterException('Value %s is not a day of month.' % str(day))\n \n if not isinstance(month,int) or month < 1 or month > 12:\n raise InstrumentParameterException('Value %s is not a month.' % str(month))\n\n if not isinstance(year,int) or year < 0 or year > 99:\n raise InstrumentParameterException('Value %s is not a 0-99 year.' % str(year))\n \n return '%02i-%s-%02i' % (day,months[month-1],year)", "def dates_as_strings(self, dates):\n\n return [i.strftime('%Y-%m-%d') for i in dates]", "def date_to_operate_format(self, date):\n date = date.replace(\" \", \"\")\n date = date.split(',')\n day = date[1]\n month = date[2]\n\n day = self.check_and_repair_right_format(day)\n month = self.check_and_repair_right_format(month)\n\n right_format = date[0] + month + day\n return right_format", "def fix_dates(self, row):\r\n for field in self.date_fields:\r\n if field in row:\r\n if not type(row[field]) is datetime:\r\n try:\r\n row[field] = datetime.fromtimestamp(float(row[field]))\r\n except Exception as e:\r\n row[field] = None", "def calculate_date(x, now):\n\t#now = datetime.datetime.now()\n\tn = int(extract_only_number(x))\n\tif n > 0:\n\t\treturn (now - datetime.timedelta(n)).strftime(\"%d-%m-%Y\")\n\treturn now.strftime(\"%d-%m-%Y\")", "def date_to_list(data_index):\n results = []\n for row in data:\n results.append(datetime.strptime(row[data_index], '%Y-%m-%d'))\n return results", "def modis_to_from_pydatetime(date):\n \n if isinstance(date, (str, unicode)): \n return dt.datetime.strptime(date[1:], '%Y%j').date()\n return dt.datetime.strftime(date, 'A%Y%j')", "def date_trans_x(x):\n \"\"\"2017.01.09->2017.1.09 \"\"\"\n date_list=x.split('.')\n return date_list[0]+'.'+str(int(date_list[1]))+'.'+date_list[2]", "def sql(df_list: List[pd.DataFrame], query: str):\n # TODO [#8]: add example in docs for sql\n\n # Pandasql looks up tables by names given in query. Here we are passed a list of dfs without names.\n # Therefore we need to extract the names of the tables from the query, then assign\n # those names to the dfs in df_list in the locals dictionary.\n table_names = _extract_table_names_from_sql(query)\n for i, name in enumerate(table_names):\n locals().update({name: df_list[i]})\n\n # Get date variable column names\n datevars: List[str] = []\n for d in df_list:\n datevars += _get_datetime_cols(d)\n datevars = list(set(datevars)) # remove duplicates\n\n merged = PandaSQL()(query)\n\n # Convert back to datetime\n for date in [d for d in datevars if d in merged.columns]:\n merged[date] = pd.to_datetime(merged[date])\n return merged", "def reformatdate(self, date):\n# print('DATE', self.__str__())\n if 'dummy' in date:\n return '1970_01_01'\n# datesplit = date.split('/')\n datesplit = date.split('-') # Really? This had to be changed?!\n# print('DATE', date, datesplit)\n\n # dates used to be as follows\n# month = datesplit[0]\n# day = datesplit[1]\n# year = datesplit[2]\n\n # dates as of 12 June 2018 now done this way\n year = datesplit[0]\n month = datesplit[1]\n day = datesplit[2]\n\n return year + '_' + month + '_' + day", "def format_date(self, date_val):\n try:\n if type(date_val) is not datetime:\n d = date.fromisoformat(date_val[0:10])\n else:\n d = date_val\n return d.strftime('%Y-%m-%d')\n except Exception as e:\n self.error((str(e)))", "def convert_date(raw_date):\n if raw_date:\n date = datetime.strptime(raw_date, \"%Y-%m-%d\")\n return date.strftime(\"%m/%d/%YZ\")", "def format_date_sortkey(self, data):\n return self.input['start_date'].date().strftime('%Y%m%d')", "def convertSODate(datenum):\n #Date numbers seem to start with 0 = 2001-01-01\n base_date = datetime.date(2001, 1, 1)\n #add key from the spot on object to this base date to get the date\n record_date = base_date + datetime.timedelta(days=int(datenum))\n record_date = record_date.isoformat()\n return record_date", "def examples():\n lst = ['today +7d', '11:20 +4d', '2014-02-15 +1w', 'jan 10', \\\n 'jan 10 2pm', '10 jan at 10am', 'now-3d', '+7d', '7', '11:15',\\\n '11:15p', '11p', 'aug', 'jan 5', 'aug 2019', 'now', \"tomorrow 2pm\",\\\n \"now +4h\", 'today + 1w', '1w', '+3y', 'w', '1w']\n for tpl in zip(lst, [phrase_to_datetime(str_e) for str_e in lst]):\n print tpl", "def _reduce_datetimes(row):\n\n row = list(row)\n\n for i, val in enumerate(row):\n if hasattr(val, \"strftime\"):\n row[i] = val.strftime(\"%Y-%m-%d %H:%M:%S\")\n elif hasattr(val, 'isoformat'):\n row[i] = val.isoformat()\n return tuple(row)", "def to_datetime(*dts):\n return tuple([pd.to_datetime(x) for x in dts])", "def dt64_to_dt(dt64_list):\n\n from datetime import datetime\n ns = 1e-9\n dt_list = []\n for dt64 in dt64_list:\n dt_list.append(datetime.utcfromtimestamp(dt64.astype(int) * ns))\n\n return dt_list", "def singular_sql_dates(master_table, dates, date_col):\n\n dts = [parse_date(x) for x in dates]\n\n # Map the specific dates to the specific years isomg a default dict\n years_dict = defaultdict(list)\n for dt in dts:\n years_dict[dt.year].append(dt)\n\n # Iterate through each of the years and add the trading dates belonging\n # to each query to a specific query, yield this SQL string as a generator\n for year in years_dict:\n # Set up a custom string and then place it inside brackets for\n # Use in the query\n strings = join_date_strings(years_dict[year])\n date_string = \"('%s')\" % strings\n\n # The base query string to use\n query_string = \"\"\" SELECT * FROM %s_%s WHERE %s in %s\"\"\"\n\n # Substitute the values into the string\n SQL = query_string % (master_table, year, date_col, date_string)\n\n yield SQL", "def _reformat_date_jan_1999():\n reader = csv.reader(open(\"temperatures_1999.csv\"), delimiter=\";\")\n for (day, month, temp) in reader:\n date = datetime.datetime.strptime(\"-\".join([\"1999\", month, day]), \n \"%Y-%m-%d\")\n print \"%s; %s\" % (date.strftime(\"%Y-%m-%d\"), temp)", "def reformat_date(all_data, min_date):\n all_data[\"date\"] = [datetime.timedelta(x) for x in all_data[\"date\"]]\n all_data[\"date\"] = all_data[\"date\"] + min_date", "def timedelta_to_sql(connection, obj):\n return connection.string_literal(timedelta_to_str(obj))", "def make_date(cls, df: pd.DataFrame, date_field: str) -> pd.DataFrame:\n field_dtype = df[date_field].dtype\n if isinstance(field_dtype, pd.core.dtypes.dtypes.DatetimeTZDtype):\n field_dtype = np.datetime64\n if not np.issubdtype(field_dtype, np.datetime64):\n df[date_field] = pd.to_datetime(df[date_field], infer_datetime_format=True)\n return df", "def pretty_date(date):\r\n if not type(date) == datetime:\r\n raise ValueError('the mistake of time format :(')\r\n how_long_days = (NOW - date).days\r\n if how_long_days < 0:\r\n raise ValueError('back to the future :)')\r\n if how_long_days >= 2:\r\n return date.strftime('%m/%d/%y')\r\n how_long_seconds = (NOW - date).seconds + how_long_days * DAY\r\n for time_offset in TIME_OFFSETS:\r\n if how_long_seconds < time_offset.offset:\r\n if time_offset.divider:\r\n how_long_seconds = int(how_long_seconds // time_offset.divider)\r\n result = time_offset.date_str\r\n if '{}' in time_offset.date_str:\r\n result = result.format(how_long_seconds)\r\n break\r\n return result", "def _reformat_date(exp_dates):\n time_fmt = \"%Y-%m-%dT%H:%M:%S\"\n wrong_time_fmt = \"%Y-%m-%d %H:%M:%S\"\n if exp_dates == 'NN':\n return exp_dates\n if exp_dates != 'NN' and not isinstance(exp_dates, list):\n try:\n datetime.strptime(exp_dates, time_fmt)\n except ValueError:\n try:\n exp_dates = datetime.strptime(exp_dates,\n wrong_time_fmt).strftime(time_fmt)\n except ValueError:\n exp_dates = datetime.strptime(exp_dates,\n \"%m/%d/20 %H:%M\").strftime(time_fmt)\n\n if exp_dates != 'NN' and isinstance(exp_dates, list):\n try:\n datetime.strptime(exp_dates[0], time_fmt)\n except ValueError:\n exp_dates = [datetime.strptime(c, wrong_time_fmt).strftime(time_fmt)\n for c in exp_dates]\n\n return exp_dates", "def fix_dates(df, column=None):\n if isinstance(column, list):\n for x in column:\n df[x] = pd.to_datetime(df[x], errors='coerce')\n df[x] = df[x].dt.strftime('%m-%d-%Y')\n df[x].replace('NaT', np.nan, inplace=True)\n return df\n else:\n df[column] = pd.to_datetime(df[column], errors='coerce')\n df[column] = df[column].dt.strftime('%m-%d-%Y')\n df[column].replace('NaT', np.nan, inplace=True)\n return df", "def date_to_python(self, value):\r\n # this throws away fractions of a second\r\n return datetime(*strptime(value[:-5], \"%Y-%m-%dT%H:%M:%S\")[0:6])", "def df_multicolumn_date_to_datetime(row):\n year = row['arrival_date_year']\n month = row['arrival_date_month']\n day = row['arrival_date_day_of_month']\n # create datetime object from string of form \"YearMonthDay\" using full month name\n return datetime.datetime.strptime(f\"{year}{month}{day}\", '%Y%B%d').date()", "def dump_datetime(value):\n if value is None:\n return\n return [value.strftime(\"%Y-%m-%d\"), value.strftime(\"%H:%M:%S\")]", "def orm2date(value, tformat=ORM_DATE_FORMAT, default=None):\n if not value:\n return default\n return datetime.strptime(value, tformat).date()", "def _get_date(self):\n for fax_in in self:\n DATETIME_FORMAT = \"%Y-%m-%d %H:%M:%S\"\n date_planned = False\n if fax_in.date:\n from_dt = datetime.datetime.strptime(str(fax_in.date[:19]), DATETIME_FORMAT)\n # from_dt = from_dt + datetime.timedelta(hours=5 , minutes=30)\n date_planned = from_dt.strftime('%Y-%m-%d')\n fax_in.fax_date = date_planned", "def txfDate(date):\n return date.strftime('%m/%d/%Y')", "def convert_date_of_attendance(attendance):\n if isinstance(attendance,list):\n for a in attendance:\n a.date_of_att = datetime.datetime.strptime(a.DATE_OF_ATTENDANCE,'%d/%m/%Y').date()\n elif isinstance(attendance,models.AttendanceModel):\n attendance.date_of_att = datetime.datetime.strptime\\\n (attendance.DATE_OF_ATTENDANCE, '%d/%m/%Y').date()", "def _tr_cal_date(self, date):\n items = []\n for code in self._datefmt:\n if code == 'Y':\n items += [date.year_str]\n elif code == 'M':\n if '/' in self._datefmt or '.' in self._datefmt:\n month = date.month_num\n if month is not None:\n month = \"{:02d}\".format(month)\n else:\n month = self._monthName(date.month)\n if month is not None:\n items += [month]\n elif code == 'D':\n day = date.day\n if day is not None and ',' in self._datefmt:\n items += [str(\"{:02d},\".format(day))]\n elif day is not None:\n items += [\"{:02d}\".format(day)]\n if '/' in self._datefmt:\n sep = '/'\n elif '.' in self._datefmt:\n sep = '.'\n elif '-' in self._datefmt:\n sep = '-'\n else:\n sep = ' '\n return sep.join(items)", "def wsq_to_txt(table_name, date):\n\tif(table_name == 'visited'):\n\t\tquery = \"select date(f_date_time), substr(dayname(f_date_time),1,2), \" + \\\n\t\t\t\"f_lang_id, f_ns_id, count(*) \" + \\\n\t\t\t\"from Filtered where f_action_id is null \" + \\\n\t\t\t\"and date(f_date_time) = '\" + date.strftime('%Y-%m-%d') + \"' \" + \\\n\t\t\t\"group by date(f_date_time), f_lang_id, f_ns_id;\"\n\telif(table_name == 'saved'):\n\t\tquery = \"select date(f_date_time), substr(dayname(f_date_time),1,2), \" + \\\n\t\t\t\"f_lang_id, f_ns_id, count(*) \" + \\\n\t\t\t\"from Filtered where f_action_id = 2 \" + \\\n\t\t\t\"and date(f_date_time) = '\" + date.strftime('%Y-%m-%d') + \"' \" + \\\n\t\t\t\"group by date(f_date_time), f_lang_id, f_ns_id;\"\n\telif(table_name == 'actions'):\n\t\tquery = \"select date(f_date_time), substr(dayname(f_date_time),1,2), \" + \\\n\t\t\t\"f_action_id, f_lang_id, f_ns_id, count(*) \" + \\\n\t\t\t\"from Filtered where f_action_id in (0, 1, 3, 4) \" + \\\n\t\t\t\"and date(f_date_time) = '\" + date.strftime('%Y-%m-%d') + \"' \" + \\\n\t\t\t\"group by date(f_date_time), f_action_id, f_lang_id, f_ns_id;\"\n\n\tlog_msg4(\"Creando dump para \" + table_name)\n\n\texec_mysql(getConfig().db_name_squidlogs, query=query, dumped=True)\n\n\tlog_msg_ok4()", "def convert_date(date):\n\n if len(date) > 10: date = date[:date.rfind(\"-\")]\n return convf(date)", "def format_datetime(dt):\r\n return dateformat.format(make_naive(dt), 'r')", "def _try_date(set_list, index, nested_dict, dict_keys=[], try_func=_try_set):\n import datetime\n try:\n dt = try_func(None, None, nested_dict, dict_keys) # 2012-07-05T00:00:00+04:00\n dt = datetime.datetime.strptime(dt, \"%Y-%m-%dT%H:%M:%S%z\")\n try_func(set_list, index, str(dt.date()))\n print(str(dt.date())+\" sdfsdfsdf\")\n return dt.date() # Дата присвоения кадастрового номера\n except:\n return ''", "def format_datetime(self, data):\r\n data = make_naive(data)\r\n if self.datetime_formatting == 'rfc-2822':\r\n return format_datetime(data)\r\n\r\n return data.isoformat()", "def _format_value_date_32A(self, val):\n value_date = val.get('value_date')\n currency = val.get('currency')\n interbank_settled_amount = val.get('interbank_settled_amount')\n date_format = '%y%m%d'\n if value_date and currency and interbank_settled_amount:\n value_date = FSwiftWriterUtils.format_date(value_date, date_format)\n interbank_settled_amount = apply_currency_precision(currency, abs(float(interbank_settled_amount)))\n val = str(value_date) + str(currency) + str(FSwiftMLUtils.float_to_swiftmt(str(interbank_settled_amount)))\n return val", "def dformat(val):\n if isinstance(val, datetime):\n return val.strftime(DATE_FORMAT)", "def datemake(datestring):\n return dtt.datetime.strptime(datestring,'%m/%d/%Y')", "def translate_dates(dates):\r\n formatted_dates = list()\r\n year = dt.today().year\r\n for dat in dates:\r\n if dat == '':\r\n continue\r\n day = dat[:2]\r\n mont = dat[6:]\r\n if int(day) < 10:\r\n day = '0' + day[1]\r\n if mont != '':\r\n # Month from Comuniazo\r\n month = \\\r\n {'enero': '01', 'febrero': '02', 'marzo': '03', 'abril': '04',\r\n 'mayo': '05', 'junio': '06', 'julio': '07', 'agosto': '08',\r\n 'septiembre': '09', 'octubre': '10', 'noviembre': '11', 'diciembre': '12'}[mont]\r\n else:\r\n # Month from Comunio\r\n month = dat[3:5]\r\n\r\n if month + day == '0101' or (formatted_dates and int(month) > formatted_dates[-1].month):\r\n # One year less\r\n year -= 1\r\n\r\n p_date = datetime.strptime('%s-%s-%s' % (year, month, day), \"%Y-%m-%d\").date()\r\n formatted_dates.append(p_date)\r\n return formatted_dates", "def filter_simple_date(value: datetime) -> str:\n return value.strftime(\"%Y-%m-%d\")", "def __replaceDate(self, hql, date):\n #%%escapa\n hql = hql.replace(\"<date>\", date).replace('%', '%%')\n # gerp date-n\n #Re = re.compile(r'<date\\s*([-+]\\s*\\d+)')\n Re = re.compile(r'<date\\s*([-+]\\s*\\d+)\\|?(\\S*?\\s*\\S*?)>')\n l = Re.findall(hql)\n if not l:\n return hql\n\n l = map(lambda x: (int(x[0]), x[1]), l)\n for x in l:\n if x[1]:\n f = ''.join(\n map(lambda c: '%' + c if re.match('^[A-Za-z]', c) else c, x[1]))\n else:\n f = '%Y%m%d'\n stamp = int(time.mktime(time.strptime(\n date, '%Y%m%d'))) + 86400 * x[0]\n\n match = Re.search(hql)\n if not match:\n continue\n\n # replace <date-n|[Ymd]> to specific time.\n sdate = time.strftime(f, time.localtime(stamp))\n hql = hql.replace(match.group(), str(sdate))\n\n return hql", "def string_to_datetime(dataframe):\n\n print(\"Which variable would you like to convert from a date string to a python date?\")\n existing_variable = input()\n print(\"What would you like to call the new date variable?\")\n new_variable = input()\n\n dataframe[new_variable] = dataframe[existing_variable].dt.strftime('%Y-%m-%d')\n\n return dataframe", "def date_parser(dates):\n\n #splitting the dates(containing datetime data) list and returning only the datetime\n return([item.split()[0] for item in dates])\n pass", "def convert_date_to_string(date_input):\n if isinstance(date_input, date):\n return date_input.strftime(\"%Y-%m-%d\")\n else:\n raise TypeError(\"Input {0} is not a date object\".format(type(date_input)))", "def get_dates(raw_table) -> \"list of dates\":\n dates = []\n found_first = False\n for i, dstr in enumerate([raw_table[i][0] for i in range(0, len(raw_table))]):\n if dstr:\n if len(dstr.split(\"/\")) == 3:\n d = datetime.datetime.strptime(dstr, '%m/%d/%Y')\n elif len(dstr.split(\"-\")) == 3:\n d = datetime.datetime.strptime(dstr, '%Y-%m-%d')\n else:\n # Not necessarily an error, could just be a non-date cell\n logging.debug(\"unknown date-format: {}\".format(dstr))\n continue\n dates.append(d)\n if not found_first:\n found_first = True\n logging.debug(\"Found first date: '{}' at i: {}\".format(d.isoformat(), i))\n elif found_first:\n logging.debug(\"Last date: {}\".format(d))\n break\n return dates", "def _format_list_for_query(input_list):\n return (\n \", \".join(input_list).replace(\" \", \"\").replace(\"'\", \"\").replace(\",\", \"%2C\")\n )", "def encode_datetime(self, datetime_obj):\n if isinstance(datetime_obj, np.datetime64):\n datetime_obj = pd.Timestamp(datetime_obj).to_pydatetime()\n\n if isinstance(datetime_obj, dt.datetime):\n return datetime_obj.strftime(\"%Y%m%dT%H%M%S\")\n elif isinstance(datetime_obj, dt.date):\n return datetime_obj.strftime(\"%Y%m%d\")\n else:\n return datetime_obj", "def sqllist(lst):\n if isinstance(lst, basestring): \n return lst\n else:\n return ', '.join(lst)", "def transform_date(date):\n if type(date) == str:\n return date\n\n formatted_string = date.strftime(\"%d/%m/%Y\")\n\n return formatted_string", "def convert_date_column(datestring):\n return datetime.datetime.strptime(datestring.strip(), \"%b-%Y\").date()", "def datefixer(ds):\n\n\n\t# ========== create the new dates ==========\n\tyear = ds.Year\n\n\t# +++++ set up the list of dates +++++\n\tdates = OrderedDict()\n\ttm = [dt.datetime(int(year) , int(np.floor(tm)), int(tm%1*30+1)) for tm in ds.time]\n\tdates[\"time\"] = pd.to_datetime(tm)\n\n\tdates[\"calendar\"] = 'standard'\n\tdates[\"units\"] = 'days since 1900-01-01 00:00'\n\t\n\tdates[\"CFTime\"] = date2num(\n\t\ttm, calendar=dates[\"calendar\"], units=dates[\"units\"])\n\n\treturn dates", "def format_date(self, data):\r\n if self.datetime_formatting == 'rfc-2822':\r\n return format_date(data)\r\n\r\n return data.isoformat()", "def parse_date_and_time(row):\n datetime_regex = re.compile(r'\\d{8}T\\d{4}')\n date_and_time = datetime_regex.search(row)\n return '\\'' + date_and_time.group() + '\\''", "def construct_date_list(start=None, num=1):\n if not start:\n start = time.strftime(\"%Y%m%d\", time.gmtime(time.time() - num * 60 * 60 * 24))\n\n elif len(start) != 8:\n raise Exception(\"Date is not in expected format!\")\n\n startdatetime = datetime.datetime.strptime(start, '%Y%m%d')\n\n datelist = [startdatetime + datetime.timedelta(days=i) for i in range(0, num)]\n return [date.strftime('%Y%m%d') for date in datelist]", "def _datetime_to_query(dt):\n timestamp = timegm(dt.timetuple())\n return u\"{whole}.{part:06d}\".format(\n whole=timestamp,\n part=dt.microsecond,\n )", "def dump_datetime(value):\n if value is None:\n return None\n return [value.strftime(\"%Y-%m-%d\"), value.strftime(\"%H:%M:%S\")]", "def dump_datetime(value):\n if value is None:\n return None\n return [value.strftime(\"%Y-%m-%d\"), value.strftime(\"%H:%M:%S\")]", "def dump_datetime(value):\n if value is None:\n return None\n return [value.strftime(\"%Y-%m-%d\"), value.strftime(\"%H:%M:%S\")]", "def dump_datetime(value):\n if value is None:\n return None\n return [value.strftime(\"%Y-%m-%d\"), value.strftime(\"%H:%M:%S\")]", "def convert_date_time(self, dt):\n return datetime.fromtimestamp(dt).strftime(\"%Y-%m-%d\")", "def get_datetime_xs(df):\n\n # check the column exists\n if 'date' not in df.columns:\n raise RuntimeError(\"Couldn't find column 'date' in input df\")\n\n # safely read date column and convert to datetime objects\n try:\n xs = [datetime.datetime.strptime(d, '%Y-%m-%d').date() for d in df.date]\n except:\n # if the time series has been resampled the index is a TimeStamp object\n xs = [datetime.datetime.strptime(d._date_repr, '%Y-%m-%d').date() for d in df.date]\n\n return xs", "def format_date(d):\n if type(d) == str:\n d = dateutil_parse(d)\n return d.isoformat()", "def format_date(d):\r\n # workaround because Django's dateformat utility requires a datetime\r\n # object (not just date)\r\n dt = aware_datetime(d.year, d.month, d.day, 0, 0, 0)\r\n return dateformat.format(dt, 'j M Y')", "def numeric_date_recover(self):\n \n sp_time_zone, current_datetime = self.setup_datetime() \n converter2sptimezone = current_datetime.astimezone(sp_time_zone)\n \n return converter2sptimezone.strftime('%d-%m-%Y')", "def _get_date(self):\n for fax_out in self:\n DATETIME_FORMAT = \"%Y-%m-%d %H:%M:%S\"\n date_planned = False\n if fax_out.date:\n from_dt = datetime.datetime.strptime(str(fax_out.date[:19]), DATETIME_FORMAT)\n # from_dt = from_dt + datetime.timedelta(hours=5 , minutes=30)\n date_planned = from_dt.strftime('%Y-%m-%d')\n fax_out.fax_date = date_planned", "def get_datetime(df, col_name, dayfirst=False, yearfirst=False, use_format=False):\n if use_format:\n format = \"%d/%m/%Y\"\n return pd.to_datetime(df[col_name], dayfirst=dayfirst, yearfirst=yearfirst, format=format)", "def dateTimeConvert(date):\n dateTimeConvert = datetime.strftime(date ,'%d/%m/%Y %H:%M:%S')\n #Format -> d/m/Y H:M:S\n return dateTimeConvert" ]
[ "0.66734904", "0.65000284", "0.6259414", "0.59757656", "0.5600508", "0.5579302", "0.5578522", "0.5551475", "0.5513122", "0.54512274", "0.5435365", "0.52705914", "0.52298", "0.5214014", "0.5199284", "0.51939476", "0.5177129", "0.5144611", "0.51139647", "0.5111645", "0.5084982", "0.50844026", "0.50539654", "0.50317484", "0.5007062", "0.50055915", "0.5004682", "0.5003799", "0.49934232", "0.494639", "0.4941801", "0.49395546", "0.49319535", "0.49146414", "0.49083698", "0.48996976", "0.4896751", "0.489572", "0.4893967", "0.4892776", "0.48845044", "0.48837423", "0.48725617", "0.4872267", "0.48578805", "0.4848264", "0.48460853", "0.48317948", "0.48312882", "0.4831218", "0.48292738", "0.48240146", "0.4820597", "0.48179454", "0.48131344", "0.4812471", "0.48089874", "0.4803788", "0.4799662", "0.47903845", "0.4787351", "0.47846282", "0.47812128", "0.4771975", "0.4771897", "0.4769851", "0.4765369", "0.4758621", "0.47549957", "0.4750448", "0.47488025", "0.47460482", "0.47424024", "0.47404724", "0.47345945", "0.47286168", "0.47206166", "0.47182706", "0.47162825", "0.47155768", "0.4714964", "0.47103804", "0.47077122", "0.47053674", "0.470376", "0.47031957", "0.47007662", "0.4700221", "0.46952316", "0.46952316", "0.46952316", "0.46952316", "0.4693278", "0.46930197", "0.46915612", "0.46887812", "0.468706", "0.46755534", "0.46699256", "0.4665158" ]
0.7940835
0
=========================================================== dateformated(x) =========================================================== this function converts the the date read from a list to a datetime format
def DateFormated(x): x1=[] for i in x: if len(i)==19: x1.append(datetime.datetime(int(i[:4]),int(i[5:7]),int(i[8:10]),int(i[11:13]),int(i[14:16]),int(i[17:18]) )) # elif len(i)==13: # x1.append(datetime.datetime(int(i[:4]),int(i[5:7]),int(i[8:10]),int(i[11:13]),int(0),int(0) )) # else: # x1.append(datetime.datetime(int(i[:4]),int(i[5:7]),int(i[8:10]),int(0),int(0),int(0) )) # del i,x return x1
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def change_format_from_input_to_datetime(list_d_t_t):\n data_output = []\n\n for row in list_d_t_t:\n data_output.append([datetime.datetime.strptime(row[0] + \" \" + row[1], \"%Y-%m-%d %H:%M:%S\"),\n datetime.datetime.strptime(row[0] + \" \" + row[2], \"%Y-%m-%d %H:%M:%S\")])\n\n return data_output", "def DateFormatedSQL(x):\n x=[i[0] for i in x]\n \n x1=[]\n for i in x:\n if len(i)==19:\n x1.append(datetime.datetime(int(i[:4]),int(i[5:7]),int(i[8:10]),int(i[11:13]),int(i[14:16]),int(i[17:18]) ))\n# elif len(i)==13:\n# x1.append(datetime.datetime(int(i[:4]),int(i[5:7]),int(i[8:10]),int(i[11:13]),int(0),int(0) ))\n# else:\n# x1.append(datetime.datetime(int(i[:4]),int(i[5:7]),int(i[8:10]),int(0),int(0),int(0) ))\n# del i,x\n return x1", "def convert_datetime_objs(list_of_dates):\n datetime_list = []\n for date in list_of_dates:\n date_obj = datetime.datetime.strptime(date, '%d.%m.%Y')\n datetime_list.append(date_obj)\n return datetime_list", "def datetimefstr(date_list, datetimeformat, longdatetimeformat):\n try:\n # including year\n parts = longdatetimeformat.count(' ') + 1\n dtstring = ' '.join(date_list[0:parts])\n dtstart = datetime.strptime(dtstring, longdatetimeformat)\n for _ in range(parts):\n date_list.pop(0)\n except ValueError:\n # without year\n parts = datetimeformat.count(' ') + 1\n dtstring = ' '.join(date_list[0:parts])\n dtstart = datetime.strptime(dtstring, datetimeformat)\n if dtstart.timetuple()[0] == 1900:\n dtstart = datetime(date.today().timetuple()[0],\n *dtstart.timetuple()[1:5])\n # if start date lies in the past use next year\n #if dtstart < datetime.today():\n #dtstart = datetime(dtstart.timetuple()[0] + 1,\n #*dtstart.timetuple()[1:6])\n for _ in range(parts):\n date_list.pop(0)\n return dtstart", "def to_date(d, date_format = \"%Y-%m-%d %H:%M:%S.%f\"):\n if type(d) == pd.core.series.Series:\n d = list(d)\n if type(d) == list:\n return [datetime.strptime(date,date_format) if type(date) == str else date for date in d]\n elif type(d) == str:\n return datetime.strptime(d,date_format)\n else:\n raise ValueError(\"Either String or list of Strings is accepted.\")", "def timefstr(date_list, timeformat):\n time_start = time.strptime(date_list[0], timeformat)\n time_start = dtime(*time_start[3:5])\n day_start = date.today()\n dtstart = datetime.combine(day_start, time_start)\n date_list.pop(0)\n return dtstart", "def _to_date(self, x):\n if isinstance(x, datetime.datetime):\n return x.date()\n return x", "def _reformat_date(exp_dates):\n time_fmt = \"%Y-%m-%dT%H:%M:%S\"\n wrong_time_fmt = \"%Y-%m-%d %H:%M:%S\"\n if exp_dates == 'NN':\n return exp_dates\n if exp_dates != 'NN' and not isinstance(exp_dates, list):\n try:\n datetime.strptime(exp_dates, time_fmt)\n except ValueError:\n try:\n exp_dates = datetime.strptime(exp_dates,\n wrong_time_fmt).strftime(time_fmt)\n except ValueError:\n exp_dates = datetime.strptime(exp_dates,\n \"%m/%d/20 %H:%M\").strftime(time_fmt)\n\n if exp_dates != 'NN' and isinstance(exp_dates, list):\n try:\n datetime.strptime(exp_dates[0], time_fmt)\n except ValueError:\n exp_dates = [datetime.strptime(c, wrong_time_fmt).strftime(time_fmt)\n for c in exp_dates]\n\n return exp_dates", "def date_parser(dates):\n\n #splitting the dates(containing datetime data) list and returning only the datetime\n return([item.split()[0] for item in dates])\n pass", "def date_trans_z(x):\n \"\"\"2017.01.09->2017/01/09 \"\"\"\n date_list=x.split('.')\n return date_list[0]+'/'+date_list[1]+'/'+date_list[2]", "def reformatdate(self, date):\n# print('DATE', self.__str__())\n if 'dummy' in date:\n return '1970_01_01'\n# datesplit = date.split('/')\n datesplit = date.split('-') # Really? This had to be changed?!\n# print('DATE', date, datesplit)\n\n # dates used to be as follows\n# month = datesplit[0]\n# day = datesplit[1]\n# year = datesplit[2]\n\n # dates as of 12 June 2018 now done this way\n year = datesplit[0]\n month = datesplit[1]\n day = datesplit[2]\n\n return year + '_' + month + '_' + day", "def _tr_cal_date(self, date):\n items = []\n for code in self._datefmt:\n if code == 'Y':\n items += [date.year_str]\n elif code == 'M':\n if '/' in self._datefmt or '.' in self._datefmt:\n month = date.month_num\n if month is not None:\n month = \"{:02d}\".format(month)\n else:\n month = self._monthName(date.month)\n if month is not None:\n items += [month]\n elif code == 'D':\n day = date.day\n if day is not None and ',' in self._datefmt:\n items += [str(\"{:02d},\".format(day))]\n elif day is not None:\n items += [\"{:02d}\".format(day)]\n if '/' in self._datefmt:\n sep = '/'\n elif '.' in self._datefmt:\n sep = '.'\n elif '-' in self._datefmt:\n sep = '-'\n else:\n sep = ' '\n return sep.join(items)", "def date_to_operate_format(self, date):\n date = date.replace(\" \", \"\")\n date = date.split(',')\n day = date[1]\n month = date[2]\n\n day = self.check_and_repair_right_format(day)\n month = self.check_and_repair_right_format(month)\n\n right_format = date[0] + month + day\n return right_format", "def datemake(datestring):\n return dtt.datetime.strptime(datestring,'%m/%d/%Y')", "def txfDate(date):\n return date.strftime('%m/%d/%Y')", "def build_date():\n def r(x):\n return tuple(ord(i) for i in x)\n return r", "def translate_dates(dates):\r\n formatted_dates = list()\r\n year = dt.today().year\r\n for dat in dates:\r\n if dat == '':\r\n continue\r\n day = dat[:2]\r\n mont = dat[6:]\r\n if int(day) < 10:\r\n day = '0' + day[1]\r\n if mont != '':\r\n # Month from Comuniazo\r\n month = \\\r\n {'enero': '01', 'febrero': '02', 'marzo': '03', 'abril': '04',\r\n 'mayo': '05', 'junio': '06', 'julio': '07', 'agosto': '08',\r\n 'septiembre': '09', 'octubre': '10', 'noviembre': '11', 'diciembre': '12'}[mont]\r\n else:\r\n # Month from Comunio\r\n month = dat[3:5]\r\n\r\n if month + day == '0101' or (formatted_dates and int(month) > formatted_dates[-1].month):\r\n # One year less\r\n year -= 1\r\n\r\n p_date = datetime.strptime('%s-%s-%s' % (year, month, day), \"%Y-%m-%d\").date()\r\n formatted_dates.append(p_date)\r\n return formatted_dates", "def date_trans_x(x):\n \"\"\"2017.01.09->2017.1.09 \"\"\"\n date_list=x.split('.')\n return date_list[0]+'.'+str(int(date_list[1]))+'.'+date_list[2]", "def buildDate(date):\n parts = date.split(\"-\")\n yDate = parts[1] + \" \" + parts[2] + ', ' + parts[0]\n return yDate", "def convert_date_of_attendance(attendance):\n if isinstance(attendance,list):\n for a in attendance:\n a.date_of_att = datetime.datetime.strptime(a.DATE_OF_ATTENDANCE,'%d/%m/%Y').date()\n elif isinstance(attendance,models.AttendanceModel):\n attendance.date_of_att = datetime.datetime.strptime\\\n (attendance.DATE_OF_ATTENDANCE, '%d/%m/%Y').date()", "def convert_date(raw_date):\n if raw_date:\n date = datetime.strptime(raw_date, \"%Y-%m-%d\")\n return date.strftime(\"%m/%d/%YZ\")", "def trost2date(trost_date):\n year, month, day = (int(val) for val in trost_date.split('-'))\n return datetime.date(year, month, day)", "def datetimeify(t):\n if type(t) in [datetime, Timestamp]:\n return t\n fmts = ['%Y-%m-%d %H:%M:%S', '%Y-%m-%d', '%Y %m %d %H %M %S',]\n for fmt in fmts:\n try:\n return datetime.strptime(t, fmt)\n except ValueError:\n pass\n raise ValueError(\"time data '{:s}' not a recognized format\".format(t))", "def _reformat_date_jan_1999():\n reader = csv.reader(open(\"temperatures_1999.csv\"), delimiter=\";\")\n for (day, month, temp) in reader:\n date = datetime.datetime.strptime(\"-\".join([\"1999\", month, day]), \n \"%Y-%m-%d\")\n print \"%s; %s\" % (date.strftime(\"%Y-%m-%d\"), temp)", "def format_datetime(self, data):\r\n data = make_naive(data)\r\n if self.datetime_formatting == 'rfc-2822':\r\n return format_datetime(data)\r\n\r\n return data.isoformat()", "def format_date(self, data):\r\n if self.datetime_formatting == 'rfc-2822':\r\n return format_date(data)\r\n\r\n return data.isoformat()", "def convertDate(indate):\n a = datetime.datetime.fromtimestamp(indate / 1000.0)\n a_str = a.strftime('%m/%d/%y')\n return datetime.datetime.strptime(a_str, '%m/%d/%y').date()", "def fixDate(weatherRDDRecord):\n fieldList = weatherRDDRecord.split(\",\")\n fieldList = [i.replace('\\\"', '') for i in fieldList] #remove quotation marks\n fieldList[0] = fieldList[0].replace('-', '/')\n \n swapDateOrder = fieldList[0].split('/')\n fieldList[0] = swapDateOrder[2] + '/' + swapDateOrder[1] + '/' + swapDateOrder[0]\n \n return (fieldList[0],(fieldList[1:]))", "def format_datetimes(self, datetimes, format=\"%B %d %Y %I:%M %p\"):\n date, times, space_character = datetimes.split(\", \")\n start_time, end_time = times.split(\" - \")\n year = datetime.now().strftime(\"%Y\")\n return (\n datetime.strptime(\n date + \" \" + year + \" \" + start_time.replace(\".\", \"\"), format\n ),\n datetime.strptime(\n date + \" \" + year + \" \" + end_time.replace(\".\", \"\"), format\n ),\n )", "def _process_date(self, data):\n def helper(val):\n # Sometime the date has a (1) or (2) following it. Strip that off\n # so that we can successful convert to date.\n s = val.find(\" (\")\n if s >= 0:\n val = val[0:s]\n dv = dt.datetime.strptime(val, '%A, %b %d')\n dv = dv.replace(year=self.start_date.year)\n return dv\n data['Date'] = data['Date'].apply(helper)\n return data", "def gen_date(date):\n date = date.split(',')\n month, day = [x.strip() for x in date[0].split(' ')]\n year = date[1].strip()\n day, year = map(int, [day, year])\n date = datetime.date(year, MONTHS[month.capitalize()], day)\n return date", "def __call__(self, x: Sequence[datetime]) -> Sequence[str]:\n if self.tz is not None:\n x = [d.astimezone(self.tz) for d in x]\n return [d.strftime(self.fmt) for d in x]", "def convert_str2date(date):\n import datetime\n date = str(date)\n year = int(date[0:4])\n month = int(date[4:6])\n day = int(date[6:8])\n return datetime.datetime(year,month,day)", "def date_to_python(self, value):\r\n # this throws away fractions of a second\r\n return datetime(*strptime(value[:-5], \"%Y-%m-%dT%H:%M:%S\")[0:6])", "def convert(date):\n converted_date = datetime.datetime.strptime(date, \n \"%Y-%m-%d\").date()\n return converted_date", "def convert_date_type(dates):\n try:\n return datetime.strptime(dates, '%Y-%m-%d')\n except ValueError:\n return datetime.strptime(dates, '%d/%m/%Y')", "def dateTimeConvert(date):\n dateTimeConvert = datetime.strftime(date ,'%d/%m/%Y %H:%M:%S')\n #Format -> d/m/Y H:M:S\n return dateTimeConvert", "def str2date(date):\n return datetime.datetime.strptime(date, \"%m/%d/%Y\").date()", "def dateToString(self, date_objs: list) -> list:\n date_strings = []\n try:\n if isinstance(date_objs, list) == False:\n return date_strings\n\n for date_obj in date_objs:\n if isinstance(date_obj, datetime) == False:\n continue\n date_strings.append(datetime.strftime(date_obj, '%d %b %Y'))\n\n return date_strings\n except Exception as e:\n logging.error(e)", "def transform_date(date):\n if type(date) == str:\n return date\n\n formatted_string = date.strftime(\"%d/%m/%Y\")\n\n return formatted_string", "def examples():\n lst = ['today +7d', '11:20 +4d', '2014-02-15 +1w', 'jan 10', \\\n 'jan 10 2pm', '10 jan at 10am', 'now-3d', '+7d', '7', '11:15',\\\n '11:15p', '11p', 'aug', 'jan 5', 'aug 2019', 'now', \"tomorrow 2pm\",\\\n \"now +4h\", 'today + 1w', '1w', '+3y', 'w', '1w']\n for tpl in zip(lst, [phrase_to_datetime(str_e) for str_e in lst]):\n print tpl", "def modis_to_from_pydatetime(date):\n \n if isinstance(date, (str, unicode)): \n return dt.datetime.strptime(date[1:], '%Y%j').date()\n return dt.datetime.strftime(date, 'A%Y%j')", "def reformat_subway_dates(date):\n date_formatted = datetime.datetime.strptime(date, '%m-%d-%y')\n date_formatted = date_formatted.strftime('%Y-%m-%d')\n return date_formatted", "def format_date(self, date_val):\n try:\n if type(date_val) is not datetime:\n d = date.fromisoformat(date_val[0:10])\n else:\n d = date_val\n return d.strftime('%Y-%m-%d')\n except Exception as e:\n self.error((str(e)))", "def transform_datetimes(data: Any) -> Any:\n\n if isinstance(data, (datetime, date)):\n return data.isoformat()\n\n if isinstance(data, (list, tuple)):\n tmp_data = [transform_datetimes(elem) for elem in data]\n\n return tuple(tmp_data) if isinstance(data, tuple) else tmp_data\n\n if isinstance(data, dict):\n for key, val in data.items():\n data[key] = transform_datetimes(val)\n\n return data", "def construct_date_list(start=None, num=1):\n if not start:\n start = time.strftime(\"%Y%m%d\", time.gmtime(time.time() - num * 60 * 60 * 24))\n\n elif len(start) != 8:\n raise Exception(\"Date is not in expected format!\")\n\n startdatetime = datetime.datetime.strptime(start, '%Y%m%d')\n\n datelist = [startdatetime + datetime.timedelta(days=i) for i in range(0, num)]\n return [date.strftime('%Y%m%d') for date in datelist]", "def preprocess_date(date_):\n if 'JAN' in date_:\n date_ = date_.replace('JAN', '01')\n elif 'FEB' in date_:\n date_ = date_.replace('FEB', '02')\n elif 'MAR' in date_:\n date_ = date_.replace('MAR', '03')\n elif 'APR' in date_:\n date_ = date_.replace('APR', '04')\n elif 'MAY' in date_:\n date_ = date_.replace('MAY', '05')\n elif 'JUN' in date_:\n date_ = date_.replace('JUN', '06')\n elif 'JUL' in date_:\n date_ = date_.replace('JUL', '07')\n elif 'AUG' in date_:\n date_ = date_.replace('AUG', '08')\n elif 'SEP' in date_:\n date_ = date_.replace('SEP', '09')\n elif 'OCT' in date_:\n date_ = date_.replace('OCT', '10')\n elif 'NON' in date_:\n date_ = date_.replace('NON', '11')\n elif 'DEC' in date_:\n date_ = date_.replace('DEC', '12')\n if date_[-2:] > '17':\n date_ = date_[:6] + '19' + date_[-2:]\n else:\n date_ = date_[:6] + '20' + date_[-2:]\n return datetime.strptime(date_, '%d-%m-%Y')", "def calculate_date(x, now):\n\t#now = datetime.datetime.now()\n\tn = int(extract_only_number(x))\n\tif n > 0:\n\t\treturn (now - datetime.timedelta(n)).strftime(\"%d-%m-%Y\")\n\treturn now.strftime(\"%d-%m-%Y\")", "def convert_date(date):\n\n if len(date) > 10: date = date[:date.rfind(\"-\")]\n return convf(date)", "def convert_date(date):\n date = get_nummeric_only(date) \n \n \n if len(date) == 8:\n\n year = int(date[:4]) \n month = int(date[4:6])\n day = int(date[6:8])\n \n date_time = dt.datetime(year,month,day)\n \n return date_time\n \n if len(date) == 12 or len(date) == 14:\n\n year = int(date[:4]) \n month = int(date[4:6])\n day = int(date[6:8])\n hour = int(date[8:10])\n minute = int(date[10:12])\n \n date_time = dt.datetime(year,month,day, hour, minute)\n \n return date_time\n else:\n return 0", "def deconstruct_datetime(self, date: datetime) -> List[int]:\n year, month, day, hour, _, _, _, _, _ = date.timetuple()\n return [year, month, day, hour]", "def get_formatted_date(self, date):\n\n formatted_date = date\n\n possible_datetime_formats = [\n \"%Y-%m-%dT%H:%M:%S%z\", # \"2021-10-19T16:46:02Z\"\n \"%a, %d %b %Y %H:%M:%S %z\", # \"Tue, 19 Oct 2021 21:00:13 +0300\"\n \"%a, %d %b %Y %H:%M:%S %Z\", # \"Tue, 19 Oct 2021 18:54:00 GMT\"\n \"%a, %d %b %Y %H:%M:%S\", # \"Tue, 19 Oct 2021 18:54:00\"\n ]\n\n for format in possible_datetime_formats:\n try:\n formatted_date = datetime.strptime(date, format).strftime(\"%Y%m%d\")\n except:\n pass\n return formatted_date", "def dates_as_strings(self, dates):\n\n return [i.strftime('%Y-%m-%d') for i in dates]", "def _date_to_datetime(value):\r\n assert isinstance(value, datetime.date)\r\n return datetime.datetime(value.year, value.month, value.day)", "def _get_date(self):\n for fax_in in self:\n DATETIME_FORMAT = \"%Y-%m-%d %H:%M:%S\"\n date_planned = False\n if fax_in.date:\n from_dt = datetime.datetime.strptime(str(fax_in.date[:19]), DATETIME_FORMAT)\n # from_dt = from_dt + datetime.timedelta(hours=5 , minutes=30)\n date_planned = from_dt.strftime('%Y-%m-%d')\n fax_in.fax_date = date_planned", "def serialize_date(value):\n return datetime.strptime(value, '%d %b %Y')", "def format_date(d):\n if type(d) == str:\n d = dateutil_parse(d)\n return d.isoformat()", "def rebuildDate(date):\n parts = date.split(\" \")\n parts[1] = parts[1][:-1]\n eDate = parts[2] + '-' + parts[0] + '-' + parts[1]\n return eDate", "def as_date(inp):\n \n out = datetime.datetime.strptime(str(inp), \"%Y%m\")\n out = out.replace(day = 28) + datetime.timedelta(days=4)\n \n return out - datetime.timedelta(days = out.day)", "def _get_date(self):\n for fax_out in self:\n DATETIME_FORMAT = \"%Y-%m-%d %H:%M:%S\"\n date_planned = False\n if fax_out.date:\n from_dt = datetime.datetime.strptime(str(fax_out.date[:19]), DATETIME_FORMAT)\n # from_dt = from_dt + datetime.timedelta(hours=5 , minutes=30)\n date_planned = from_dt.strftime('%Y-%m-%d')\n fax_out.fax_date = date_planned", "def any2datetime_date(d):\n return datetime.date(d.year, d.month, d.day)", "def date_parser(dates):\n final_date = []\n for date in dates:\n final_date = final_date + [date[0:10]]\n return final_date", "def convert_date(adate):\n\tprint \"date given: \" + adate\n\t# stuff\n\tprint \"epoch time for date: \"", "def parseDate(self, date):\n\n temp = dateparser.parse(date)\n temp_date = temp.strftime(\"%Y-%m-%d\")\n\n return temp_date", "def date_to_list(data_index):\n results = []\n for row in data:\n results.append(datetime.strptime(row[data_index], '%Y-%m-%d'))\n return results", "def convert_date(self, date=None):\n if date is not None:\n format_str = '%d/%m/%Y'\n converted_date = datetime.strptime(date, format_str)\n return converted_date.date()", "def format_date(date):\n try:\n start_date = datetime.strftime(date, '%m/%d/%Y')\n except (TypeError, ValueError) as e:\n start_date = date\n pass\n return start_date", "def str_2_date(str_date):\n str_format = \"%m/%d/%y\"\n return datetime.strptime(str_date, str_format)", "def fix_date(oldfmt):\n dval = oldfmt.split('/')[-1]\n datev = datetime.strptime(dval, \"%Y-%m-%d\")\n return datev.strftime(\"%B %-d, %Y\")", "def text2date(text):\n text = text.strip()\n text = text.replace('&nbsp;', '')\n time_tuple = time.strptime(text + '10', '%b %d, %Y')\n return datetime.date(*(time_tuple[0:3]))", "def format_dates(date, from_sep=\"/\", to_sep=\"-\"): # Tested [N]\n nums = date.split(from_sep)\n return to_sep.join(nums)", "def date_trans_y(x):\n \"\"\"2017.1.09->2017/ 1/09 \"\"\"\n date_list=x.split('.')\n if(int(date_list[1])<10):\n return date_list[0]+'/ '+str(int(date_list[1]))+'/'+date_list[2]\n else:\n return date_list[0]+'/'+date_list[1]+'/'+date_list[2]", "def _dateFmt(self, string):\n return time.strftime('%m/%d', time.strptime(string, '%B %d, %Y'))", "def get_dates(txt):\n txt = re.sub(r'[^\\w\\s]', '', txt)\n txt_token = txt.split()\n return get_dates_from_token_list(txt_token)", "def convert_to_date(dt, format):\n d_datetime = datetime.strptime(dt, format)\n d_date = date(int(d_datetime.strftime('%Y')), \n int(d_datetime.strftime('%m')), \n int(d_datetime.strftime('%d'))) # I know this is awful, I'm sorry\n return d_date", "def format_datetime(dt):\r\n return dateformat.format(make_naive(dt), 'r')", "def date_parser(dates): \n new_dates = []\n for i in range(len(dates)):\n a = dates[i][:10] \n new_dates.append(a)\n return new_dates", "def convert_date(dt):\n if not isinstance(dt, str):\n dt = str(dt)\n parts = re.split(r'\\.|\\_|\\-', dt)\n if len(parts) == 1:\n parts = str(dt).split('.')\n if len(parts[0]) == 4:\n return '.'.join(reversed(parts))\n return str(dt)", "def unify_date_format(date):\n if type(date) == str:\n try:\n date = dateutil.parser.parse(date) \n except:\n pass\n return date", "def bytes_to_dates(self, date_str):\r\n return mpldates.datestr2num(date_str.decode('utf-8'))", "def convertSODate(datenum):\n #Date numbers seem to start with 0 = 2001-01-01\n base_date = datetime.date(2001, 1, 1)\n #add key from the spot on object to this base date to get the date\n record_date = base_date + datetime.timedelta(days=int(datenum))\n record_date = record_date.isoformat()\n return record_date", "def dump_datetime(value):\n if value is None:\n return\n return [value.strftime(\"%Y-%m-%d\"), value.strftime(\"%H:%M:%S\")]", "def parseDate(date):\n formats = [\n \"D MMM YY, hh:mm a\", \n \"YYYY-MM-DDTHH:mm:ss+00:00\", \n \"ddd, D MMM YYYY HH:mm:ss +0530\", # NDTV\n \"ddd, D MMM YYYY HH:mm:ss +0100\", # skynews\n \"ddd, D MMM YYYY HH:mm:ss -0400\", # reuters\n \"D MMM, YYYY\", # espn cricket\n \"ddd, D MMM YYYY HH:mm:ss GMT\", # times of india\n \"ddd, D MMM YYYY HH:mm:ss +0200\", # lifrea\n \"ddd, D MMM YYYY HH:mm:ss +0000\", # linux, ubuntu\n \"ddd, D MMM YYYY HH:mm:ss -0700\", # iTunes\n ]\n\n for f in formats:\n try:\n parsed_date = tryDateFormat(date, f)\n return parsed_date.format(\"D MMM YY, hh:mm a\")\n except Exception as e:\n pass\n else:\n return \"Invalid date\"", "def date_prettyfier(self, date):\n units = 'days since 1900-01-01 00:00'\n date = date * 365.25\n date = cftime.num2date(date, units)\n pretty_date = str(date.day)+'/'+str(date.month)+'/'+str(date.year-1900) \n return pretty_date", "def numeric_date_recover(self):\n \n sp_time_zone, current_datetime = self.setup_datetime() \n converter2sptimezone = current_datetime.astimezone(sp_time_zone)\n \n return converter2sptimezone.strftime('%d-%m-%Y')", "def fix_dates(line, date_names, headers):\n date_idxs = [headers.index(date_name) for date_name in date_names]\n for date_idx in date_idxs:\n val = line[date_idx]\n if val:\n # Forget times if they appear\n val = val.split(' ')[0]\n\n # Sometimes, miraculously, the val is *not* in American format:\n try:\n datetime.datetime.strptime(val, '%Y-%m-%d')\n # In the correct format!\n line[date_idx] = val\n continue\n except ValueError:\n # In the American format\n pass\n\n try:\n val = datetime.datetime.strptime(val, '%m/%d/%Y')\n except ValueError:\n # No idea what format this is in. Warn and return None\n print(\"Unreadable date {}\".format(val))\n line[date_idx] = None\n continue\n\n # Sometimes people write dates like 4/1/15. Bump the years to the modern era\n if val.year < 50:\n val = datetime.datetime(val.year + 2000, val.month, val.day)\n elif val.year < 100:\n val = datetime.datetime(val.year + 1900, val.month, val.day)\n val = val.strftime('%Y-%m-%d')\n line[date_idx] = val", "def convert_date(date_str):\n return datetime.strptime(date_str, \"%d/%m/%Y\")", "def clean_date(raw_time):\n time_stamp = raw_time.split(\" \")\n time_stamp = str(time_stamp[1]+' '+time_stamp[2]+' '+time_stamp[3]+' '+time_stamp[5])\n clean_date_time = parser.parse(time_stamp)\n return clean_date_time", "def test_convert_date(self):\n self.assertEqual(convert_to_date('2015-11-03 13:21:02.071381',\n FORMAT_DATETIME), date(2015, 11, 3))\n self.assertEqual(convert_to_date('03.11.2015', FORMAT_CALENDAR), date(2015, 11, 3))", "def get_date_from_utterance(tokenized_utterance: List[Token],\n year: int = 1993) -> List[datetime]:\n\n dates = []\n\n utterance = ' '.join([token.text for token in tokenized_utterance])\n year_result = re.findall(r'199[0-4]', utterance)\n if year_result:\n year = int(year_result[0])\n trigrams = ngrams([token.text for token in tokenized_utterance], 3)\n for month, tens, digit in trigrams:\n # This will match something like ``september twenty first``.\n day = ' '.join([tens, digit])\n if month in MONTH_NUMBERS and day in DAY_NUMBERS:\n try:\n dates.append(datetime(year, MONTH_NUMBERS[month], DAY_NUMBERS[day]))\n except ValueError:\n print('invalid month day')\n\n bigrams = ngrams([token.text for token in tokenized_utterance], 2)\n for month, day in bigrams:\n if month in MONTH_NUMBERS and day in DAY_NUMBERS:\n # This will match something like ``september first``.\n try:\n dates.append(datetime(year, MONTH_NUMBERS[month], DAY_NUMBERS[day]))\n except ValueError:\n print('invalid month day')\n\n fivegrams = ngrams([token.text for token in tokenized_utterance], 5)\n for tens, digit, _, year_match, month in fivegrams:\n # This will match something like ``twenty first of 1993 july``.\n day = ' '.join([tens, digit])\n if month in MONTH_NUMBERS and day in DAY_NUMBERS and year_match.isdigit():\n try:\n dates.append(datetime(int(year_match), MONTH_NUMBERS[month], DAY_NUMBERS[day]))\n except ValueError:\n print('invalid month day')\n if month in MONTH_NUMBERS and digit in DAY_NUMBERS and year_match.isdigit():\n try:\n dates.append(datetime(int(year_match), MONTH_NUMBERS[month], DAY_NUMBERS[digit]))\n except ValueError:\n print('invalid month day')\n return dates", "def date_to_ddmmyyyy(dat=\"1981_01_24\", separator=\".\"):\n return f'{dat.split(\"_\")[2]}{separator}{(str(int(dat.split(\"_\")[1]))).zfill(2)}{separator}' \\\n f'{(str(int(dat.split(\"_\")[0]))).zfill(2)}'", "def pretty_date(self, date):\r\n return time.strftime(\"%a, %b %d, %Y\", time.strptime(date,\"%Y%m%d\"))", "def yt_datetime(yt_date_time):\n time_obj = time.strptime(yt_date_time, \"%Y-%m-%dT%H:%M:%S.%fZ\")\n locale_date = time.strftime(\"%x\", time_obj)\n # strip first two digits of four digit year\n short_date = re.sub(r\"(\\d\\d\\D\\d\\d\\D)20(\\d\\d)$\", r\"\\1\\2\", locale_date)\n return time_obj, short_date", "def interpret_date( text ):\n try:\n as_arrow = arrow.get(text, \"MM/DD/YYYY\").replace(\n tzinfo=tz.tzlocal())\n except:\n flask.flash(\"Date '{}' didn't fit expected format 12/31/2001\")\n raise\n return as_arrow.isoformat()", "def convert_time(self, t_variable):\n date_list = []\n times = self.dataset[t_variable].values\n\n for time in times:\n try:\n time = pd.to_datetime(str(time))\n date_list.append(time.strftime('%Y-%m-%dT%H:%M:%SZ'))\n except ValueError as ve:\n print(\"Error parsing and converting '%s' variable object to CovJSON compliant string.\" % (t_variable), ve)\n\n return date_list", "def format_datetime(str_date):\n date_fromat = \"%Y-%m-%dT%H:%M:%S\"\n formated_datetime = None\n try:\n datetime_obj = datetime.fromtimestamp(float(str_date)/1000.)\n formated_datetime = datetime_obj.strftime(date_fromat)\n except Exception as exp:\n logger.error('Exception: {} occured while converting date {} into format {}'.format(\n exp,str_date, date_fromat))\n\n return formated_datetime", "def datetime_to_date(element):\r\n try:\r\n result = element.date()\r\n except AttributeError:\r\n result = element\r\n return result", "def _try_date(set_list, index, nested_dict, dict_keys=[], try_func=_try_set):\n import datetime\n try:\n dt = try_func(None, None, nested_dict, dict_keys) # 2012-07-05T00:00:00+04:00\n dt = datetime.datetime.strptime(dt, \"%Y-%m-%dT%H:%M:%S%z\")\n try_func(set_list, index, str(dt.date()))\n print(str(dt.date())+\" sdfsdfsdf\")\n return dt.date() # Дата присвоения кадастрового номера\n except:\n return ''", "def dt64_to_dt(dt64_list):\n\n from datetime import datetime\n ns = 1e-9\n dt_list = []\n for dt64 in dt64_list:\n dt_list.append(datetime.utcfromtimestamp(dt64.astype(int) * ns))\n\n return dt_list", "def date_parser(dates):\n # extract the date only from dates: Olwethu\n date_list = []\n for i in dates:\n i = i.split(' ')\n # append each date to a new list: Olwethu\n date_list.append(i[0])\n \n return date_list" ]
[ "0.73220545", "0.6644235", "0.64673054", "0.63785565", "0.6323779", "0.63159305", "0.6256937", "0.6174311", "0.5986844", "0.58866596", "0.5878423", "0.58775616", "0.58339506", "0.579193", "0.57800907", "0.57769805", "0.5757611", "0.57572365", "0.57409817", "0.5732213", "0.5722158", "0.5721827", "0.56974846", "0.5678983", "0.5675491", "0.5659683", "0.5645013", "0.56444377", "0.56371844", "0.5635159", "0.56315583", "0.5629016", "0.5616264", "0.5613257", "0.5612828", "0.5610813", "0.5604483", "0.5594918", "0.559276", "0.55886567", "0.55878246", "0.55791926", "0.55681765", "0.5558971", "0.5556539", "0.55489814", "0.55321366", "0.55134255", "0.55066586", "0.5479266", "0.54757655", "0.54693776", "0.54598063", "0.54452217", "0.544382", "0.5437103", "0.54230577", "0.54188156", "0.5415565", "0.54024065", "0.5401016", "0.5391929", "0.5386448", "0.5379607", "0.5375183", "0.5365078", "0.53602034", "0.53576064", "0.53491765", "0.5345659", "0.53360564", "0.5327258", "0.5324848", "0.53232974", "0.5314031", "0.53136456", "0.53120357", "0.5311964", "0.5307527", "0.5297301", "0.52962315", "0.5287995", "0.5286612", "0.52853173", "0.52823305", "0.5282284", "0.52821493", "0.52799404", "0.5278009", "0.5275524", "0.5273218", "0.52646196", "0.5253517", "0.5251564", "0.525042", "0.524789", "0.5240238", "0.52376276", "0.52244353", "0.52240235" ]
0.75249213
0
If there exists a record like dbsilomongomaster.alpha.xplain.io, returns the next number (4)
def get_new_service_num(route53_zone, service_name): # Match records belonging to the service for particular service and # environment. match_regex = "(?<={})\d+(?=\.{}\.?)" \ .format(service_name, route53_zone.name) # Initialize with 0 because we want 1-indexing service_nums = [0] for record in route53_zone.get_records(): match = re.search(match_regex, record.name) if match: service_num = int(match.group(0)) service_nums.append(service_num) return max(service_nums) + 1
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_next_match_id():\n conn = get_connect()\n cursor = conn.execute(\"SELECT matchId FROM match WHERE isSearched = 0 LIMIT 1\")\n result_list = cursor.fetchone()\n conn.close()\n if result_list is None:\n print(\"no more matchId to be searched\")\n return None\n else:\n match_id = result_list[0]\n return match_id", "def __next_index():\n return redis_store.incr(String.__name__.lower() + '-index')", "def get_next_account_id():\n conn = get_connect()\n cursor = conn.execute(\"SELECT accountId FROM account WHERE isSearched = 0 LIMIT 1\")\n result_list = cursor.fetchone()\n conn.close()\n if result_list is None:\n print(\"no more accountId to be searched\")\n return None\n else:\n account_id = result_list[0]\n return account_id", "def next_available_number(cls):\n try:\n return cls.objects.latest().number + 1\n except cls.DoesNotExist:\n return 1", "def getNextID(self, d):\n try:\n listOrdered = d.keys()\n listOrdered = funcs.sortStringList(listOrdered)\n lastID = int(listOrdered[-1])\n nextID = str(lastID + 1)\n for i in range(1,int(nextID)):\n if str(i) not in listOrdered:\n return str(i)\n return nextID\n except:\n return '1'", "def get_next_id():\n with open(WORK_LOG_FILENAME, 'r') as work_log:\n work_log_reader = csv.DictReader(work_log)\n entry_id = 0\n for entry in work_log_reader:\n if int(entry['id']) > entry_id:\n entry_id = int(entry['id'])\n entry_id += 1\n return entry_id", "def get_next_identifier(self) -> int:\n if self.items:\n return self.items[-1].identifier + 1\n else:\n return 1", "def get_next_page(url):\n match = re.search(r\".*/(\\d+)_p/\", url)\n if match:\n next_number = int(match.group(1)) + 1\n next_url = urljoin(BEGIN_RENT_LISTINGS, f\"{next_number}_p/\")\n return next_url\n else:\n # the first page has no page index\n return urljoin(BEGIN_RENT_LISTINGS, \"2_p/\")", "def get_first(network):\r\n\treturn IPAddress(IPNetwork(network).first + 1)", "def get_next_if_any(self):\n try:\n ret = self.work[deepcopy(self.i)]\n self.i += 1\n # print \"Trickling item\", self.i\n return ret\n except Exception:\n return None", "def test_uidnext(self):\n d = self._examineOrSelect()\n self._response(b'* OK [UIDNEXT 4392] Predicted next UID')\n self.assertEqual(\n self.successResultOf(d),\n {'READ-WRITE': False, 'UIDNEXT': 4392})", "def _ns_nextid(self):\n return self._ns(\"nextid\")", "def getSlipNum():\n query = Slip.query()\n results = query.fetch(limit = MAX_SLIPS)\n temp = 0\n for result in results:\n if result.number > temp:\n temp = result.number\n slipNum = temp\n slipNum += 1\n return slipNum", "def next_id(self):\n self.id_counter += 1\n return self.id_counter - 1", "def next_id(self):\n try:\n return Report.objects.filter(id__gt=self.id).order_by(\"id\").first().id\n except Exception:\n return False", "def get_next_id():\n global _lock, _counter\n with _lock:\n if _counter == 65535:\n _counter = 1\n else:\n _counter += 1\n\n return str(_counter)", "async def _find_next_scene():\n next_scene = 20\n scenes = await async_get_scenes()\n while next_scene in scenes:\n next_scene += 1\n return next_scene", "def get_next_id(self):\n con = self.c._connect()\n last_id = self.c.get_last_id(con.cursor())\n con.close()\n return last_id + 1", "def test_get_next_sequence():\n app = create_app()\n seq = utils.get_next_sequence(\"test_name123\")\n unique_string = uuid.uuid4().hex[:6].upper()\n\n #Check if the next sequence a valid sequence. \n\n # It must start with number 0.\n assert utils.get_next_sequence(unique_string) == 0\n\n # It must return allways next sequence.\n assert utils.get_next_sequence(\"test_name123\") == seq + 1", "def advance(cls, seq_name, first=1):\n doc = cls.objects.coll.find_and_modify(\n {'name':seq_name},\n {'$inc': {'_next': 1}},\n upsert=True,\n new=True)\n return doc['_next']", "def getNextOrderNum(cur,vID):\n orderNum = execute_query(cur,\"\"\"SELECT Count(*) FROM OpenTasks where vID = ?\"\"\", [vID])[0][0]\n orderNum = int(orderNum) + 1\n return orderNum", "def get_next_alternate_shot_number(self, shot_number):\n \n # get the shot_number list\n alternate_letters = 'ABCDEFGHIJKLMNOPRSTUVWXYZ'\n \n for letter in alternate_letters:\n #check if the alternate is in the list\n \n new_shot_number = str(shot_number) + letter\n \n shot_from_db = Shot.query().\\\n filter(Shot.sequence_id==self.id).\\\n filter(Shot.number==new_shot_number).\\\n first()\n \n if not shot_from_db:\n return new_shot_number\n \n return None", "def load_next_posting_id():\n if os.path.exists(\"next_posting_id.txt\"):\n f = file(\"next_posting_id.txt\", \"r\")\n s = f.read().strip()\n f.close()\n try:\n return int(s)\n except ValueError:\n return None\n else:\n return None", "def getrecord():\n global nrecord, totrecords, EOF\n \n nrecord = nrecord + 1\n if nrecord > totrecords:\n EOF = True\n return ()\n return records[nrecord-1]", "def _get_next_sequence_number(self):\n cur = self._next_sequence_number\n self._next_sequence_number += 1\n return cur", "def _get_next_venue_id():\n VenueCrawler._venue_id += 1\n return VenueCrawler._venue_id", "def alexa_rank(url):\n return ALEXA_MAP.index(url)", "def get_next_position(self):\n return self.record_manager.get_max_record_id() or 0", "def generate_next_invoice_number(obj):\n queryset = obj.__class__.objects.filter(year=obj.year, company=obj.company)\n max = queryset.aggregate(Max('number')).values()[0]\n if max is None:\n max = 0\n return (max + 1)", "def _GetNextId(self):\r\n ret = self.next_id\r\n self.next_id += 1\r\n return str(self.next_id)", "def get_next_sequence(sequence_name):\n\n client = pymongo.MongoClient()\n db = client[settings.DB_NAME]\n\n # Create a new sequence if needed\n db.sequences.find_one_and_update(\n {'_id': sequence_name},\n {'$setOnInsert': {'seq': 0}},\n upsert=True\n )\n\n ret = db.sequences.find_one_and_update(\n {'_id': sequence_name},\n {'$inc': {'seq': 1}},\n projection={'seq': True, '_id': False},\n return_document=pymongo.ReturnDocument.AFTER\n )\n\n return ret['seq']", "def getNextFixOrderNum(cur,vID):\n orderNum = execute_query(cur, \"\"\"SELECT Count(*) FROM OpenTasks where vID = ? and fixTask = 1\"\"\", [vID])[0][0]\n orderNum = int(orderNum) + 1\n return orderNum", "def _next_id(self, identifier: Identifier) -> Optional['Identifier']:\n next_id = None\n if identifier.year is not None and \\\n identifier.month is not None and \\\n identifier.num is not None:\n new_year = identifier.year\n new_month = identifier.month\n new_num = identifier.num + 1\n if (identifier.is_old_id and new_num > 999) \\\n or (not identifier.is_old_id\n and identifier.year < 2015\n and new_num > 9999) \\\n or (not identifier.is_old_id\n and identifier.year >= 2015 and new_num > 99999):\n new_num = 1\n new_month = new_month + 1\n if new_month > 12:\n new_month = 1\n new_year = new_year + 1\n\n if identifier.is_old_id:\n next_id = '{}/{:02d}{:02d}{:03d}'.format(\n identifier.archive, new_year % 100, new_month, new_num)\n else:\n if new_year >= 2015:\n next_id = '{:02d}{:02d}.{:05d}'.format(\n new_year % 100, new_month, new_num)\n else:\n next_id = '{:02d}{:02d}.{:04d}'.format(\n new_year % 100, new_month, new_num)\n try:\n return Identifier(arxiv_id=next_id)\n except IdentifierException:\n return None\n else:\n return None", "def FindNext():\r\n return _hiew.HiewGate_FindNext()", "def get_next_id(identifier: Identifier) -> Optional[Identifier]:\n return current_session().get_next_id(identifier)", "def next_id(self):\n next_id = self._nextid\n self._nextid += 1\n return next_id", "def countPlayers():\n c.execute(\"SELECT count(*) FROM players\");\n num = c.fetchone()[0]\n print num\n if num == 1:\n print \"There is only %d player registered\" % (num)\n else:\n print \"There are %d players registered\" % (num)\n return num\n # except IndexError:\n # if num == 1:\n # print \"There is only %d player registered\" % (num)\n # else:\n # print \"There are %d players registered\" % (num)", "def next_identity(self) -> PublicationId:\n ...", "def _get_next_pk(self, last_pk):\n ans = self.execute(self.commands.next_pk(\n self.name,\n self.primary_key_column,\n last_pk,\n self.chunk_size\n ))[0][0]\n return ans", "def has_next():", "def has_next():\n\n return True", "def _get_song_index(self, song_index): \n if self.randomise:\n song_index = randint(1, self.num_songs) - 1\n else:\n if (song_index + 1) == self.num_songs:\n if self.loop_songs:\n song_index = 0\n else:\n return None\n else:\n song_index += 1\n return song_index", "def next_node_id(self) -> int:\n i = 1\n while True:\n if i not in self.session.nodes:\n break\n i += 1\n return i", "def next_num(self):\n return self.page + 1", "def next_song(sid):\n try:\n # Get the host data from the database\n db = sqlite3.connect('/home/tropius/TROPIUS/TROPIUS.db')\n host = hosts.get_detail(db, sid)\n spotify.next(host['ip'])\n return jsonify({})\n except:\n abort(400)", "def next_address():\n\t\tkeylist = vessel_list.keys()\n\t\tcurrentkey = keylist.index(str(node_id))\n\t\treturn vessel_list[keylist[(currentkey+1)%len(keylist)]]", "def placaLliure():\n lliures = []\n con = lite.connect('parking.db')\n cur = con.cursor()\n try:\n cur.execute(\"SELECT placa FROM parking ORDER BY placa ASC\")\n rows = cur.fetchall()\n for row in rows:\n lliures.append(row[0])\n print lliures\n for i in range(1,len(lliures)+1):\n if i != lliures[i-1]:\n result= i\n break\n except lite.IntegrityError:\n pass\n con.close()\n return result", "def __find_session_num():\n\n # figure out number of sessions that have already been recorded\n for (root, dirs, files) in os.walk(CONFIG['recording_path'], topdown=True):\n nextSession = 1\n\n for d in dirs:\n try:\n\n if int(d.split('Session')[1]) >= nextSession:\n nextSession = int(d.split('Session')[1]) + 1\n\n except ValueError as verr:\n print('Directory ' + str(d) + ' does not end in a number!')\n\n if nextSession > 99:\n return -1\n return nextSession", "def has_at_index(self, index):\n count = 0\n start = self.head\n while start:\n if count==index:\n return start\n start = start.getLink()\n count+=1\n return None", "def _next_rId(self):\n tmpl = 'rId%d'\n next_rId_num = 1\n for relationship in self._values:\n if relationship._num > next_rId_num:\n return tmpl % next_rId_num\n next_rId_num += 1\n return tmpl % next_rId_num", "def get_next_Brow_mark(Bn, cur):\n cur.execute(\"SELECT value, mass FROM %s WHERE exists=1 LIMIT 1;\" % Bn)\n return cur.fetchone()", "def next_id(self):\n return self.max_id + 1", "def next_invoice_number(cls, user):\n cur_max = cls.query.filter_by(user_id=user.id).count()\n cur_max += 1\n\n return str(cur_max)", "def get_next_index(self, current_index_string):\n # current index is a string, so cast to int\n current_index = int(current_index_string)\n\n return current_index+1", "def _get_next_event_id():\n VenueCrawler._event_id += 1\n return VenueCrawler._event_id", "def get_most_surfed_page(records):\n uris = {}\n for r in records:\n if r.code != 408:\n uris[r.uri] = uris.get(r.uri, 0) + 1\n max_req = 0\n max_uri = None\n for k,v in uris.items():\n if v > max_req:\n max_req, max_uri = v, k\n print(max_req)\n return max_uri", "def test_nonIntegerUIDNEXT(self):\n d = self._examineOrSelect()\n self._response(b'* OK [UIDNEXT foo] Predicted next UID')\n self.failureResultOf(d, imap4.IllegalServerResponse)", "def get_number(sequence):\n for i in sequence:\n if i.isnumeric() and sequence.count(i) % 2 != 0:\n return i\n return None", "def url_index(row):\n for index, data in enumerate(row):\n if 0 < len(urlparse(data)[1]) and 'www' in data or 'http' in data:\n return index\n\n # No url was found, so we use id\n return -2", "def find(self, number):\n cur_node = self.head\n while cur_node is not None:\n if number == cur_node.data.number():\n return cur_node.data\n cur_node = cur_node.next\n return -1", "def getOccurence(self) -> int:\n ...", "def next_id(arr):\n for x in range(len(arr) + 1):\n if x not in arr:\n return x", "def findRecord(db, col, query):\n\n # result = result.to_dict(\"records\")\n conn = MongoClient(\"localhost\", 27017)\n connObj = conn[db][col]\n exist_count = connObj.find(query, {'_id': 0}).count()\n conn.close()\n if exist_count >= 1:\n return True\n else:\n return False", "def test_business_next_identifier(session, business_type, expected):\n identifier = BusinessIdentifier.next_identifier(business_type)\n\n if expected:\n legal_type = identifier[:re.search(r\"\\d\", identifier).start()]\n assert legal_type in BusinessType\n assert identifier[identifier.find(legal_type) + len(legal_type):].isdigit()\n else:\n assert identifier is None", "def getNextId(db, idName, tableName, labId):\n\n # create db cursor\n cursor = db.cursor()\n \n # Find existing keys in the valid range for this lab\n # If not found, return the lowest id, ie. the laboratory offset\n # else get the highest id and add 1 and care for range overflow\n sql = \"\"\"\n SELECT IF(max(%s.%s) >= Laboratory.offset, \n IF(max(%s.%s) < Laboratory.offset + Laboratory.range, max(%s.%s)+1, -1),\n NULL )\n FROM %s , Laboratory \n WHERE Laboratory.lab_id = %s AND \n %s.%s >= Laboratory.offset AND\n %s.%s <= Laboratory.offset+Laboratory.range;\n \"\"\"\n cursor.execute(sql % (tableName, idName,\n tableName, idName,\n tableName, idName,\n tableName, labId,\n tableName, idName,\n tableName, idName))\n\n nextId = cursor.fetchone()[0]\n if nextId is None:\n # no id found, we start from the offset value of the lab\n sql = \"SELECT Laboratory.offset FROM Laboratory WHERE Laboratory.lab_id = %s\"\n cursor.execute(sql % labId)\n return cursor.fetchone()[0]\n else:\n if nextId < 0:\n message = \"Range overflow for %s in the %s table for laboratory %s \" % (idName, tableName, labId)\n raise Exception(message)\n else:\n return nextId", "def new_id(self):\n self.next += 1\n return self.next", "def getNextIndex(self, indexName):\n self._db._c.execute(\"SELECT {} FROM {}\".format(indexName,\n self.tableName))\n IDs = self._db._c.fetchall()\n\n if len(IDs) == 0:\n return '1'\n\n # Generate a number one larger than the largest current ID.\n newID = 0\n for ID in IDs:\n newID = max(newID, int(ID[0]))\n newID += 1\n\n # Return that.\n return str(newID)", "def look_up(self, val):\n index = 0\n if self.head is None:\n print(\"List is empty\")\n start = self.head\n while start is not None:\n if start.data == val:\n return index\n start = start.next\n index += 1\n return \"No such element\"", "def _get_next_cust_id():\n # print('Customer roster: ' + str(customers))\n key_list = []\n for customer_key in customers:\n stripped_prefix = customer_key[1:]\n # print('Adding key: ' + str(stripped_prefix))\n key_list.append(stripped_prefix)\n key_list.sort()\n last_id = int(key_list[-1])\n return 'C' + str(last_id + 1)", "def next_num():\r\n CHModuleFactory.num += 1\r\n return CHModuleFactory.num", "def next_num():\r\n CHModuleFactory.num += 1\r\n return CHModuleFactory.num", "def next(self):\n res = self._cursor.fetchone()\n if res:\n self.id = res[0]\n self.book = res[1]\n self.pos = res[2]\n self.content = res[3]\n return True\n\n return False", "def get_next_client_index(self, write=True):\r\n if write or len(self._server) == 1:\r\n return 0\r\n\r\n return random.randint(1, len(self._server) - 1)", "def _determine_next_ott_id(self):\n if self._doc_counter_lock is None:\n self._doc_counter_lock = Lock()\n with self._doc_counter_lock:\n _LOG.debug('Reading \"{}\"'.format(self._id_minting_file))\n noi_contents = self._read_master_branch_resource(self._id_minting_file, is_json=True)\n if noi_contents:\n self._next_ott_id = noi_contents['next_ott_id']\n else:\n raise RuntimeError('Stored ottid minting file not found (or invalid)!')", "def getUrlRank(trail_id,userEmail,url):\n sql = \"\"\" SELECT rank\n FROM datawake_url_rank\n WHERE trail_id = %s and userEmail = %s AND url = %s\n \"\"\"\n params = [trail_id,userEmail,url]\n rows = dbGetRows(sql, params)\n if len(rows) == 0:\n return 0\n else:\n return rows[0][0]", "def get_account_number(raw_data):\r\n return len(raw_data)//4", "def _next_id(self):\n # modulo to keep within int32 (signed)\n self.correlation_id = (self.correlation_id + 1) % 2**31\n return self.correlation_id", "def reserve_next_agent_id(self):\n query = \"SELECT NEXTVAL(pg_get_serial_sequence('agents', 'agent_id'))\"\n cur = self.conn.cursor()\n cur.execute(query)\n self.conn.commit()\n return cur.fetchone()[0]", "def next():", "def next():", "def next_sparse_incremental(sparse_number):\n\n\n # limit is arbitrary in Python\n # http://stackoverflow.com/questions/5470693/python-number-limit\n limit = 2 ** 32\n for possible_sparse in range(sparse_number + 1, limit):\n if is_sparse(possible_sparse):\n return possible_sparse\n return None", "def getNext(nID):\n\n # find children \n query = q.format(nID)\n neoNodes, metadata = cypher.execute(G, query)\n # randomly choose 1\n r = random.randrange(0,len(neoNodes))\n return neoNodes[r][1]._id", "def next_link(self) -> Optional[str]:\n return pulumi.get(self, \"next_link\")", "def next_index(state):\n node = state\n for key in (\"layers\", \"index\"):\n node = node.get(key, {})\n indices = [key for key in node.keys()]\n if len(indices) == 0:\n return 0\n else:\n return max(indices) + 1", "def go_next(i: int, z_result: list[int], s: str) -> bool:\n return i + z_result[i] < len(s) and s[z_result[i]] == s[i + z_result[i]]", "def get_first_n_pending_links(self, number):\n try:\n conn = psycopg2.connect(\"dbname='{0}'\".format(DATABASE))\n cur = conn.cursor(cursor_factory=psycopg2.extras.RealDictCursor)\n cur.execute(\"SELECT link FROM link WHERE chunk_id IS NULL AND state = 'pending' ORDER BY index LIMIT %s;\", (number,))\n results = cur.fetchall()\n cur.close()\n return results\n except Exception as e:\n print(e)", "def has_next(self):\n if self._count is not None:\n # If count is available, use it\n return bool(self._count)\n else:\n # otherwise we have no idea\n return True", "def next():\n iraw = 0\n run = 0\n seqno = 0\n with db_connection() as conn:\n with conn.cursor() as curs:\n try:\n curs.execute(\"\"\"SELECT rawdata.id,rawdata.run,rawdata.seqno,\n slices.block1,slices.block2,\n jobs.cluster,jobs.process\n FROM rawdata\n LEFT JOIN bindings\n ON bindings.iraw = rawdata.id\n INNER JOIN slices\n ON slices.iraw = rawdata.id\n LEFT JOIN jobs\n ON slices.ijob = jobs.id\n AND jobs.exitcode = 0\n WHERE bindings.id IS NULL\n ORDER BY rawdata.id,slices.block1\n LIMIT 2000;\n \"\"\")\n slices_missing = 1\n for row in curs.fetchall():\n i = int(row[0])\n if i != iraw:\n if slices_missing == 0:\n break\n else:\n run = int(row[1])\n seqno = int(row[2])\n slices_missing = 0\n slices = []\n iraw = i\n block1 = int(row[3])\n block2 = int(row[4])\n if row[5] is not None and row[6] is not None:\n cluster = int(row[5])\n process = int(row[6])\n slices.append((block1,block2,cluster,process))\n else:\n print(\"slices missing on\", row[5], row[6])\n slices_missing += 1\n if slices_missing:\n return 0\n else:\n curs.execute(\"SELECT TIMEZONE('GMT', NOW());\")\n now = curs.fetchone()[0]\n curs.execute(\"\"\"INSERT INTO bindings\n (iraw,starttime)\n VALUES (%s,%s)\n RETURNING id;\n \"\"\", (iraw, now))\n row = curs.fetchone()\n if row:\n ibind = int(row[0])\n else:\n return 0\n except:\n iraw = 0\n if iraw == 0:\n time.sleep(random.randint(1,30))\n return -9 # collision\n\n workdir = str(iraw)\n os.mkdir(workdir)\n os.chdir(workdir)\n badslices = []\n for sl in slices:\n sdir = str(sl[0]) + \",\" + str(sl[1])\n os.mkdir(sdir)\n tarfile = \"job_{0}_{1}.tar.gz\".format(sl[2], sl[3])\n tarpath = input_area + \"/\" + tarfile\n try:\n subprocess.check_output([\"gfal-copy\", src_url + tarpath,\n \"file://\" + os.getcwd() + \"/\" + tarfile])\n except:\n sys.stderr.write(\"Error -999 on rawdata id {0}\".format(iraw) +\n \" - job output \" + tarfile + \" is missing!\\n\")\n sys.stderr.flush()\n badslices.append(sdir)\n continue\n try:\n subprocess.check_output([\"tar\", \"zxf\", tarfile, \"-C\", sdir])\n except:\n sys.stderr.write(\"Error -999 on rawdata id {0}\".format(iraw) +\n \" - job output \" + tarfile + \" is not readable!\\n\")\n sys.stderr.flush()\n badslices.append(sdir)\n finally:\n os.remove(tarfile)\n if len(badslices) > 0:\n with db_connection() as conn:\n with conn.cursor() as curs:\n curs.execute(\"SELECT TIMEZONE('GMT', NOW());\")\n now = curs.fetchone()[0]\n curs.execute(\"\"\"UPDATE bindings\n SET endtime=%s, \n exitcode=%s,\n details=%s\n WHERE id = %s;\n \"\"\", (now, -999, \":\".join(badslices), ibind))\n os.chdir(\"..\")\n shutil.rmtree(workdir)\n return 1\n\n badslices += merge_evio_skims(run, seqno, slices)\n badslices += merge_hddm_output(run, seqno, slices)\n badslices += merge_job_info(run, seqno, slices)\n badslices += merge_root_histos(run, seqno, slices)\n exitcode = -len(badslices)\n with db_connection() as comm:\n with conn.cursor() as curs:\n curs.execute(\"SELECT TIMEZONE('GMT', NOW());\")\n now = curs.fetchone()[0]\n curs.execute(\"\"\"UPDATE bindings\n SET endtime=%s,\n exitcode=%s,\n details=%s\n WHERE id = %s;\n \"\"\", (now, exitcode, \":\".join(badslices), ibind))\n os.chdir(\"..\")\n shutil.rmtree(workdir)\n return 1", "def getNextId(pre, now, dest):\r\n if now == 1:\r\n return FRONT, 2\r\n else:\r\n return RIGHT, 3", "def name_to_pindex(session, name):\n for i, player in enumerate(session.players):\n if name == player.name:\n return i\n return 'Not found.'", "def locateObjNumber(data, questionDict):\n how = questionDict['how']\n many = questionDict['many']\n for t in range(data.shape[0] - 2):\n if data[t, 0] == how and \\\n data[t + 1, 0] == many:\n return data[t + 2, 0]\n print 'not found'", "def alloc_docid(self):\n self.docid = hex(self.client.incr(self.dbprefix + 'nextid'))[2:]\n return self.docid", "def next_num(cls):\r\n cls.num += 1\r\n return cls.num", "def test_get_next_address_interconnect(site):\n net_24 = models.Network.objects.create(site=site, cidr=u'10.20.30.0/24')\n net_31 = models.Network.objects.create(site=site, cidr=u'10.20.30.0/31')\n net_64 = models.Network.objects.create(site=site, cidr=u'2001:db8::/64')\n net_127 = models.Network.objects.create(site=site, cidr=u'2001:db8::/127')\n\n for obj in (net_24, net_31, net_64, net_127):\n obj.refresh_from_db()\n\n ## IPv4\n # /24 should return .1 and .2\n slash24 = [u'10.20.30.1/32', u'10.20.30.2/32']\n expected = [ipaddress.ip_network(n) for n in slash24]\n assert net_24.get_next_address(num=2) == expected\n\n # /31 should return .0 and .1\n slash31 = [u'10.20.30.0/32', u'10.20.30.1/32']\n expected = [ipaddress.ip_network(n) for n in slash31]\n assert net_31.get_next_address(num=2) == expected\n\n ## IPv6\n # /64 should return :1 and :2\n slash64 = [u'2001:db8::1/128', u'2001:db8::2/128']\n expected = [ipaddress.ip_network(n) for n in slash64]\n assert net_64.get_next_address(num=2) == expected\n\n # /127 should return :0 and :1\n slash127 = [u'2001:db8::/128', u'2001:db8::1/128']\n expected = [ipaddress.ip_network(n) for n in slash127]\n assert net_127.get_next_address(num=2) == expected", "def get_next():\n try:\n app.logger.info(\"GET request to get_next action\")\n data = request.get_json(force=True)\n\n if \"key\" not in data:\n app.logger.info(f\"No key present in request body\")\n return jsonify({\n \"success\": False,\n \"message\": \"No key present in request body\"\n }), 401\n\n key = data[\"key\"]\n\n if os.getenv(\"KEY\") != key:\n app.logger.info(f\"Invalid key {key} for retrieving queue number\")\n return jsonify({\n \"success\": False,\n \"message\": \"Invalid key\"\n }), 401\n\n app.logger.info(f\"Generating number...\")\n\n counter = Counter.get_counter()\n\n number_key = generate_random_key()\n number = Number(counter.get(), number_key)\n\n # Save Number in the database\n db.session.add(number)\n db.session.commit()\n\n # Increment counter\n counter.increment()\n db.session.commit()\n\n app.logger.info(\n f\"Number {number.position} generated with key {number_key}\"\n )\n\n return jsonify({\n \"success\": True,\n \"number\": number.position,\n \"key\": number_key\n }), 200\n\n except Exception as err:\n app.logger.error(err)\n return jsonify({\"success\": False}), 500", "def get_or_add(self, s):\n if s not in self.str2Id:\n self.id2Str[self.nextId] = s\n self.str2Id[s] = self.nextId\n self.id2freq[self.nextId] = 1\n self.nextId += 1\n else:\n self.id2freq[self.str2Id[s]] += 1\n return self.str2Id[s]", "def next_sequence_num(buf=[0]): # use list as default value to make sure it is\n # initialized only once\n val = buf[0]\n buf[0] += 1\n return val", "def next(self) -> int:\n self.idx += 1\n return self.m[self.idx]", "def next(self) -> int:\n self.idx += 1\n return self.m[self.idx]", "def next_identity(self) -> OrganisationId:\n ...", "def next(self) -> int:\n value = self.inorder[self.index]\n self.index = self.index + 1\n return value" ]
[ "0.6664259", "0.61382604", "0.60998386", "0.5928929", "0.5735149", "0.5713186", "0.5704618", "0.55747485", "0.5572003", "0.55479425", "0.55387664", "0.5533251", "0.5508651", "0.54968625", "0.5493598", "0.54908", "0.5483527", "0.5479922", "0.54581887", "0.5456278", "0.5432718", "0.5421172", "0.54131186", "0.5404709", "0.53998244", "0.5390621", "0.5366867", "0.53556365", "0.53539586", "0.5342823", "0.53400534", "0.53392285", "0.5321633", "0.5320651", "0.5314987", "0.53042394", "0.52971506", "0.5284966", "0.52779394", "0.52776647", "0.5269982", "0.52519375", "0.5249897", "0.5244664", "0.52261424", "0.52250576", "0.522472", "0.52213466", "0.5213312", "0.5211256", "0.52091503", "0.5191712", "0.5185061", "0.5175434", "0.51665163", "0.5161963", "0.51553494", "0.51448303", "0.51401097", "0.51398546", "0.5135811", "0.51332957", "0.5110309", "0.5108172", "0.510458", "0.5097648", "0.5089549", "0.50884277", "0.5087955", "0.50810236", "0.50810236", "0.50715476", "0.50685436", "0.5054922", "0.50315243", "0.50300825", "0.5015593", "0.5000254", "0.49961272", "0.49961272", "0.49843213", "0.49838987", "0.49814937", "0.4981453", "0.49728394", "0.4971808", "0.4965311", "0.49621567", "0.49599063", "0.49587882", "0.49541292", "0.495352", "0.49471173", "0.4941537", "0.49386856", "0.4938622", "0.49336576", "0.49297062", "0.49297062", "0.49292302", "0.49290264" ]
0.0
-1
Check if a record exists matching the service pattern with the current host's ip
def record_exists(route53_zone, service_name, ip): # Match records belonging to the service for particular service and # environment. match_regex = "{}\d+\.{}\.?".format(service_name, route53_zone.name) for record in route53_zone.get_records(): match = re.match(match_regex, record.name) if match and ip in record.resource_records: return True return False
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def cusip_exists(record):\n cusips = helper.query_db('instruments', 'cusip')\n assert record['cusip'] in cusips", "def name_matches_ip(name, ip, state):\n for client in state['clients']:\n if client['name'] == name:\n if client['ip'] == ip:\n return True\n else:\n return False\n return False", "def _check_host_existence(self, hostname: str) -> bool:\n with self.lock:\n hosts = self.hosts.all()\n for host in hosts:\n if host['hostname'] == hostname:\n return True\n return False", "def member_exists(self, service, bigip):\n pool = self.service_adapter.get_pool(service)\n member = self.service_adapter.get_member(service)\n part = pool[\"partition\"]\n try:\n p = self.pool_helper.load(bigip,\n name=pool[\"name\"],\n partition=part)\n\n m = p.members_s.members\n if m.exists(name=urllib.quote(member[\"name\"]), partition=part):\n return True\n except Exception as e:\n # log error but continue on\n LOG.error(\"Error checking member exists: %s\", e.message)\n return False", "def _host_exists(self, host_name):\n hosts = self.host_obj.search_by_name(host_name)\n\n if len(hosts) > 0:\n for host in hosts:\n hostname = host['match']\n if host_name == hostname:\n return hostname\n return hostname\n LOG.debug(\"no host found for:\" + host_name)\n return None", "def match_api_keys(key, ip):", "def exist(self, key):\n record = self._storage.get(key, None)\n if record:\n return record.ttl >= time.time()\n return False", "def canDo_url(self, url):\n hostname = urlparse.urlsplit(url)[1]\n for hostEnd in self.highwireHosts:\n if hostname.endswith(hostEnd):\n logging.log(5, 'url hostname %s ends with %s -> highwire' % (hostname, hostEnd))\n return True\n\n if hostname in self.hostCache:\n ipAddr = self.hostCache[hostname]\n else:\n logging.debug('Looking up IP for %s' % hostname)\n try:\n ipAddr = socket.gethostbyname(hostname)\n self.hostCache[hostname] = ipAddr\n except socket.gaierror:\n raise pubGetError('Illegal hostname %s in link' % hostname, 'invalidHostname', hostname)\n\n ipParts = ipAddr.split('.')\n ipParts = [ int(x) for x in ipParts ]\n result = ipParts[0] == 171 and ipParts[1] in range(64, 68)\n if result == True:\n logging.log(5, 'hostname %s is highwire host' % hostname)\n return result", "def has_host(self, host):\n assert type(host) is str, 'Wrong type for [host], should be a string [was {0}]'.format(type(host))\n assert 'scan' in self._scan_result, 'Do a scan before trying to get result !'\n\n if host in list(self._scan_result['scan'].keys()):\n return True\n\n return False", "def match(self, _ip):\n try:\n return bool(ip_address(_ip) in self.network)\n except ValueError:\n return False", "def _check_queryinfo_existence(self, hostname: str, job: str) -> bool:\n with self.lock:\n hosts = self.host_query_info.all()\n for host in hosts:\n if host['hostname'] == hostname and host['job'] == job:\n return True\n return False", "def check_table(self, ip:str='127.0.0.1', date:str='2000-01-01'):\n stmt=\"SELECT COUNT(*) FROM aws_ip_list WHERE ip='%s' AND create_date='%s'\" % (ip, date)\n self.cur.execute(stmt)\n return self.cur.fetchall()[0][0]", "def record_exists(self, record):\n record_exists = False\n\n logging.debug('Check if record exists in table')\n if not self._dbconnect or not self._cursor:\n raise Exception('Invalid call to Context Manager method!')\n\n date = record.get('date', '')\n time = record.get('time', '')\n location = record.get('location', '')\n node_id = record.get('nodeID', '')\n\n self._cursor.execute(\"\"\"SELECT count(*) FROM {} WHERE \\\n date == ? and time = ? and location = ? and nodeID = ?\"\"\".format(self._name), (date, time, location, node_id))\n\n if self._cursor.fetchone()[FIRST_ROW] == SINGLE_RECORD:\n record_exists = True\n\n logging.debug('Record exists? : {}'.format(record_exists))\n return record_exists", "def query_host(self, name):\n z = dns.zone.from_xfr(dns.query.xfr(self.server_address, self.domain))\n try:\n z.find_node(name)\n return True\n except KeyError:\n return False", "def exists(self, conn, key):\n return conn.exists(key)", "def contains_addr(self, addr):\n return self.find_loadable_containing(addr) is not None", "def find_by_status(self, host, state):", "def raw_exist(self, table: str, data: dict) -> bool:\n table_check_limits = {'temperature': 2, 'pressure': 1}\n request = '''SELECT * \n FROM %s \n WHERE datetime=:datetime \n AND service=:service \n ORDER BY timestamp DESC \n LIMIT %s''' % (table, table_check_limits[table])\n self.c.execute(request, {\n 'table': table,\n 'datetime': data['datetime'],\n 'service': data['service'],\n })\n db_data = self.c.fetchall()\n\n if len(db_data) == 1:\n if data['value'] == db_data[0][3]:\n return True\n else:\n return False\n\n if len(db_data) == 2:\n result = False\n # comparing timestamps.\n if db_data[0][1][0:16] == db_data[1][1][0:16]:\n for raw in db_data:\n if data['value'] == raw[3]:\n result = True\n else:\n if data['value'] == db_data[0][3]:\n result = True\n return result", "def containsip(url):\r\n try:\r\n if ip.ip_address(url):\r\n return 1\r\n except:\r\n return 0", "def check_input(data):\n if data.has_key('fqdn') and data.has_key('ip'):\n\n try:\n socket.inet_aton(data['ip'])\n return True\n except socket.error:\n return False", "def exists(self):\n query = db.session.query(Farmer.btc_addr)\n return query.filter(Farmer.btc_addr == self.btc_addr).count() > 0", "def _has_endpoint(self, endpoint):\n return self.endpoints.filter(pk=endpoint.pk).exists()", "def isOverlappingWithAnyDynamicEntry(ipAddress):\n\n config_db = ConfigDBConnector()\n config_db.connect()\n\n ip = int(ipaddress.IPv4Address(ipAddress))\n nat_pool_dict = config_db.get_table('NAT_POOL')\n\n if not nat_pool_dict:\n return False\n\n for values in nat_pool_dict.values():\n global_ip = values[\"nat_ip\"]\n ipAddr = global_ip.split('-')\n if (len(ipAddr) == 1):\n startIp = int(ipaddress.IPv4Address(ipAddr[0]))\n endIp = int(ipaddress.IPv4Address(ipAddr[0]))\n else:\n startIp = int(ipaddress.IPv4Address(ipAddr[0]))\n endIp = int(ipaddress.IPv4Address(ipAddr[1]))\n\n if ((ip >= startIp) and (ip <= endIp)):\n return True\n\n return False", "def matchIP(self, ip):\n return self._ip == ip", "def record_exists(self, date):\n for record in self.records:\n if self.date_str == record[\"date\"]:\n return True\n return False", "def test_getdnsrecord_notfound(kasserver):\n assert not kasserver.get_dns_record(\"www.example.com\", \"MX\")", "def has(self, hostname: str) -> bool:\n for hostinfo in self.hostinfo_list:\n if hostinfo.hostname == hostname:\n return True\n return False", "def is_service_endpoint(path):\n return re.match(r'^[a-zA-Z0-9.-]+:\\d+$', path)", "def checkHost(host):\n if \"192.168.\" in host:\n return False\n elif \"169.254.\" in host: #APIPA (Automatic Private Internet Protocol Addressing)\n return False\n elif re.match(\"^(127\\.)\",host):\n return False\n elif re.match(\"^(10\\.)\",host):\n return False\n elif re.match(\"^(172\\.1[6-9]\\.)|(172\\.2[0-9]\\.)|(172\\.3[0-1]\\.)\",host):\n return False\n else:\n return True", "def test_ip_addresses_exists():\n load_ips()\n validate_names()", "def isIpOverlappingWithAnyStaticEntry(ipAddress, table):\n\n config_db = ConfigDBConnector()\n config_db.connect()\n\n static_dict = config_db.get_table(table)\n\n if not static_dict:\n return False\n\n for key,values in static_dict.items():\n global_ip = \"---\"\n nat_type = \"dnat\"\n\n if table == 'STATIC_NAPT':\n if isinstance(key, tuple) is False:\n continue\n\n if (len(key) == 3):\n global_ip = key[0]\n else:\n continue\n elif table == 'STATIC_NAT':\n if isinstance(key, str) is True:\n global_ip = key\n else:\n continue\n\n local_ip = values[\"local_ip\"]\n\n if \"nat_type\" in values:\n nat_type = values[\"nat_type\"]\n\n if nat_type == \"snat\":\n global_ip = local_ip\n\n if global_ip == ipAddress:\n return True\n\n return False", "def public_ip():\n found_public_ip = False\n try:\n metadata = get_instance_metadata()\n for key, value in metadata.items():\n LOG.info(\"{0}: {1}\".format(key, value))\n\n if metadata['public-ipv4'] is not None:\n try:\n socket.inet_aton(metadata['public-ipv4'])\n found_public_ip = True\n except socket.error:\n found_public_ip = False\n\n except Exception:\n LOG.exception('check_database_connection')\n return False\n return found_public_ip", "def match_found(result_data):\n return True if len(result_data[\"results\"][\"bindings\"]) > 0 else False", "def matches_hostname(self, hostname):\n return hostname in self.hostnames", "async def exists(self, payload: TPayload) -> bool:", "def domain_in_ip_whois_match(self, domain, ip):\n try:\n domain_ip_desc = self.get_name_by_ip(ip).lower().split(' ')\n domain_list = domain.lower()\n return domain_list in domain_ip_desc\n except Exception as e:\n return e", "def find(ctx, pattern):\n config = buildConfig(ctx.obj[\"HOST\"], ctx.obj[\"PORT\"])\n clientList = getClientList(config)\n for client in clientList:\n srvFound = 0\n for id in slabIds(client.stats(\"items\")):\n for key in client.stats('cachedump', id, '0').keys():\n if re.search(pattern, key.decode('utf-8')):\n click.echo(key)\n srvFound += 1\n click.echo(f\"********** SERVER {client.server} contained {srvFound} matching results\")\n print()", "def has_client(self, ip):\n for cli in self.clients:\n if cli.ip == ip:\n return cli\n return None", "def exists(self, name):\n return self.endpoint.exists(name)", "def __contains__(self, record):\n with self.session as session:\n query = session.query(IndexRecord)\n query = query.filter(IndexRecord.did == record)\n\n return query.exists()", "def check_access(ident):\n resource = data_service.resource_load(uniq = ident)\n log.debug('Result from the database: %s'%resource)\n if resource is None:\n return False\n return True", "def test_computeRouteExists(self):\n response = self.client.get('/api/v1/compute/25544/?time=20170825200000')\n self.assertEquals(response.status_code, status.HTTP_200_OK)", "def get_services(self, srv_record, host_name, service_name):\n ans = None\n\n # Form service record query: _radiovis._tcp at example.com\n # becomes _radiovis._tcp.example.com\n query = '.'.join([srv_record, host_name])\n\n self.log(\"Querying: \" + query)\n\n try:\n ans = self._resolver.query(query, 'SRV')\n\n except dns.resolver.NoAnswer as e:\n self.log(\"No answer\")\n except dns.resolver.NXDOMAIN as e:\n pass\n except dns.exception.DNSException as e:\n self.log(\"Exception: \" + str(type(e)))\n\n services = []\n\n if ans is not None and len(ans) > 0:\n for record in ans:\n # Remove last (blank) field from hostname then create\n # hostname string by joining with \".\".\n target = record.target.labels[0:-1]\n target = map(lambda s: str(s, 'utf-8'), target)\n target = \".\".join(target)\n\n self.log(\"Found: \" + target + \", port \" + str(record.port))\n\n service_record = ServiceRecord(name = service_name,\n query = query,\n port = record.port,\n priority = record.priority,\n target = target,\n weight = record.weight)\n services.append(service_record)\n else:\n self.log(\"No services\")\n\n return services", "def nslookup(self):\n if len(self.hostnames) == 0:\n st, out = commands.getstatusoutput('get_instance_by_service %s' % self.bns)\n assert st == 0, \"Failure:'get_instance_by_service %s', errno=%d\" % (self.bns, st)\n self.hostnames = out.split('\\n')\n assert self.hostnames, 'No hosts found for bns: \"%s\"' % self.bns", "async def _exists(self, key):\n with await self._connect() as redis:\n exists = await redis.exists(key)\n return True if exists > 0 else False", "def check(self, args):\n host, server = args\n\n try:\n host_addr = socket.gethostbyname(host)\n except socket.error:\n return\n\n # Reverse ip addr\n addr_parts = string.split(host_addr, '.')\n addr_parts.reverse()\n host_addr = string.join(addr_parts, '.')\n\n check_host = '{0}.{1}'.format(host_addr, server)\n\n try:\n check_addr = socket.gethostbyname(check_host)\n except socket.error:\n check_addr = None\n\n if check_addr is not None and \"127.0.0.\" in check_addr:\n self.blacklisted.append(server)", "def findRecord(db, col, query):\n\n # result = result.to_dict(\"records\")\n conn = MongoClient(\"localhost\", 27017)\n connObj = conn[db][col]\n exist_count = connObj.find(query, {'_id': 0}).count()\n conn.close()\n if exist_count >= 1:\n return True\n else:\n return False", "def exist_identity_match(client, table_id):\n try:\n client.get_table(table_id)\n return True\n except NotFound:\n return False", "def exists( self, path ):\n res = self.__checkArgumentFormat( path )\n if not res['OK']:\n return res\n urls = res['Value']\n successful = {}\n failed = {}\n serviceClient = RPCClient( self.url )\n for url in urls:\n gLogger.debug( \"DIPStorage.exists: Determining existence of %s.\" % url )\n res = serviceClient.exists( url )\n if res['OK']:\n successful[url] = res['Value']\n else:\n failed[url] = res['Message']\n resDict = {'Failed':failed, 'Successful':successful}\n return S_OK( resDict )", "def check_id_existence(self, id:str):\n\n oc_prefix = id[:(id.index(':')+1)]\n\n if oc_prefix == 'doi:':\n vldt = doi.DOIManager() # you can use removeprefix(oc_prefix) from Python 3.9+\n return vldt.exists(id.replace(oc_prefix, '', 1)) # todo: use id.replace(oc_prefix, '', 1) for Python < v.3.9\n if oc_prefix == 'isbn:':\n vldt = isbn.ISBNManager()\n return vldt.exists(id.replace(oc_prefix, '', 1))\n if oc_prefix == 'issn:':\n vldt = issn.ISSNManager()\n return vldt.exists(id.replace(oc_prefix, '', 1))\n if oc_prefix == 'orcid:':\n vldt = orcid.ORCIDManager()\n return vldt.exists(id.replace(oc_prefix, '', 1))\n if oc_prefix == 'pmcid:':\n vldt = pmcid.PMCIDManager()\n return vldt.exists(id.replace(oc_prefix, '', 1))\n if oc_prefix == 'pmid:':\n vldt = pmid.PMIDManager()\n return vldt.exists(id.replace(oc_prefix, '', 1))\n if oc_prefix == 'ror:':\n vldt = ror.RORManager()\n return vldt.exists(id.replace(oc_prefix, '', 1))\n if oc_prefix == 'url:':\n vldt = url.URLManager()\n return vldt.exists(id.replace(oc_prefix, '', 1))\n if oc_prefix == 'viaf:':\n vldt = viaf.ViafManager()\n return vldt.exists(id.replace(oc_prefix, '', 1))\n if oc_prefix == 'wikidata:':\n vldt = wikidata.WikidataManager()\n return vldt.exists(id.replace(oc_prefix, '', 1))\n if oc_prefix == 'wikipedia:':\n vldt = wikipedia.WikipediaManager()\n return vldt.exists(id.replace(oc_prefix, '', 1))", "def is_service_name_correct(self, service):\r\n return service in self.services", "def check_services(self):\n for service in self.services:\n try:\n self.cloud.search_services(service)[0]\n except Exception: # pylint: disable=broad-except\n self.is_skipped = True\n break", "def test_check_ip_on_whitelist_true(self):\n\n ip_name = 'mail-ed1-f51.google.com'\n\n result = check_ip_on_whitelist(ip_name, self.pattern_ip)\n\n self.assertTrue(result)", "def singularity_exists(self):\n instances = Client.instances(quiet=self.quiet)\n for instance in instances:\n if self.pid in instance.name:\n return True\n return False", "def upsert_record(route53_zone, record_name, ip):\n\n # Only upsert the dns record if it doesn't resolve to us.\n try:\n record_ip = socket.gethostbyname(record_name)\n except socket.error:\n # Ignore if we can't connect to the host\n pass\n else:\n if ip == record_ip:\n return\n\n print str(dt.now()), \"Registering host as\", record_name\n record = route53_zone.get_a(record_name)\n\n if record and ip not in record.resource_records:\n route53_zone.update_a(record_name, ip)\n elif not record:\n route53_zone.add_a(record_name, ip)", "def id_exists(host_id: int,\n database_connection: mysql.connector.connect) -> bool:\n return validate_id(host_id, database_connection)", "def is_ncar_host():\n hostname = socket.getfqdn()\n \n return any([re.compile(ncar_host).search(hostname) \n for ncar_host in ['cheyenne', 'casper', 'hobart']])", "def DB_Exists(logfile, timestamp):\n cursor.execute('''SELECT hit FROM master\n WHERE logfile = \"%s\"\n AND date = \"%s\"''' % (logfile, timestamp))\n exists = [row[0] for row in cursor.fetchall()]\n return exists", "async def exists(self, field, **kwargs):\n return await self._client_conn.hexists(key=self.name, field=field)", "def exists(self, proxy):\n return not self.database.zscore(self.key, proxy) == None", "def __some_alive(self):\n for service in self.__services.values():\n if service.is_alive():\n return True\n return False", "def lookup(self,hostname):\n\t\tfor block in self.blockchain.chain:\n\t\t\ttransactions = block['transactions']\n\t\t\tfor transaction in transactions:\n\t\t\t\t# print(transaction)\n\t\t\t\tif 'hostname' in transaction and transaction['hostname'] == hostname:\n\t\t\t\t\treturn (transaction['ip'],transaction['port'])\n\t\traise LookupError('No existing entry matching hostname')", "def check_scanned(fname):\n try:\n for line in open(fname):\n if \"(1 host up)\" in line:\n return True\n except IOError: return False\n return False", "def matches_host(self, host: str, requires_data_uri: bool = False) -> bool:\n return (\n self.url\n and self.site_host\n and self.site_host in host\n and (self.data_uri if requires_data_uri else True)\n )", "def should_respond(head_str):\n reqline = httputil.RE_REQLINE.match(head_str)\n if reqline is not None and reqline.group(1) == \"IOT-SEARCH\":\n headers = httputil.parse_headers(head_str)\n if \"host\" in headers and \"sv\" in headers and \"return\" in headers:\n return (headers[\"host\"] == \"239.255.255.250:1900\"\n and headers[\"sv\"] == \"iotscp:discover\"\n and headers[\"return\"] == \"device; type=basedevice\")\n return False", "def host_discover(self):\n self._scanned = True\n return self._scanner.scan(self._ips, arguments='-sP')", "def has_address(self, address):\n\n if self.filter(to_address=address).count() > 0: # @@@ is there a better way?\n return True\n else:\n return False", "def rpc_match():", "def url_is_in_db(url):\n return bool(find_url(url).first())", "def globusLocalEndpointExistence(self):\n\t\tsuccess = False\n\t\ttry:\n\t\t\tlocal_id = self.transfer_client.endpoint_search(socket.gethostname())[0]['name']\n\t\t\tself.transfer_client.operation_ls(local_id)\n\t\t\tself.local_ep_id = local_id\n\t\t\tsuccess = True\n\t\texcept:\n\t\t\tpass\n\t\treturn success", "def exists(ccp_id):\n exists = cache.get(\"location_exists_%s\" % ccp_id)\n if exists is not None:\n return True\n else:\n exists_db = Location.objects.filter(pk=ccp_id).exists()\n if exists_db:\n cache.set(\"location_exists_%s\" % ccp_id, True, timeout=3600)\n return exists_db", "def checkIP(self, ip = None):\n\t\treturn os.system(\"ping -c 1 -w2 \" + ip + \" > /dev/null 2>&1\") == 0", "def _Exists(self):\n cmd = util.GcloudCommand(self, 'beta', 'bigtable', 'instances', 'list')\n cmd.flags['format'] = 'json'\n cmd.flags['project'] = self.project\n # The zone flag makes this command fail.\n cmd.flags['zone'] = []\n stdout, stderr, retcode = cmd.Issue(\n suppress_warning=True, raise_on_failure=False)\n if retcode != 0:\n # This is not ideal, as we're returning false not because we know\n # the table isn't there, but because we can't figure out whether\n # it is there. This behavior is consistent without other\n # _Exists methods.\n logging.error('Unable to list GCP Bigtable instances. Return code %s '\n 'STDOUT: %s\\nSTDERR: %s', retcode, stdout, stderr)\n return False\n result = json.loads(stdout)\n instances = {instance['name'] for instance in result}\n full_name = 'projects/{}/instances/{}'.format(self.project, self.name)\n return full_name in instances", "def dns_lookup(self, hostname, aux):\n\n resolver = Resolver()\n\n # If the host doesn't have the A record (IPv4),\n # trying to find its AAAA record (IPv6).\n try:\n addr = resolver.query(hostname, \"A\")[0] # <---+\n ver = 4 # |\n except Exception as e: # From the dnspython lib. --------+\n try: # |\n addr = resolver.query(hostname, \"AAAA\")[0] # <---+\n ver = 6\n except Exception as e:\n addr = ver = aux._ERR_PREFIX\n\n return (addr, ver)", "def valid_host(host):\n if host in ACCEPTED_HOSTS:\n return True\n return False", "def exists(\n instance_id=None,\n name=None,\n tags=None,\n region=None,\n key=None,\n keyid=None,\n profile=None,\n in_states=None,\n filters=None,\n):\n instances = find_instances(\n instance_id=instance_id,\n name=name,\n tags=tags,\n region=region,\n key=key,\n keyid=keyid,\n profile=profile,\n in_states=in_states,\n filters=filters,\n )\n if instances:\n log.info(\"Instance exists.\")\n return True\n else:\n log.warning(\"Instance does not exist.\")\n return False", "async def _exists(self, key):\n return key in SimpleMemoryBackend._cache", "def _is_skyserve(ip_port):\n ip, skyserve_port = ip_port\n url = 'http://{ip}:{skyserve_port}/discover'.format(ip=ip, skyserve_port=DEFAULT_SKYSERVE_PORT)\n\n try:\n resp = requests.get(url, timeout=1.5).json()\n if resp.get('success'):\n return ip\n except: # noqa: E722\n return", "def matches(self, url):\n split = urlparse.urlsplit(url)\n return self.host == split.hostname", "def is_ip_contained(a):\n for cc, cc_rt in rt.iteritems():\n matching_pfx = cc_rt.search_best(network=a, masklen=32)\n if matching_pfx is not None:\n return [cc, matching_pfx.prefix]\n\n return None", "def exists(self):\n self.cursor.execute(f\"\"\"\n SELECT 1\n FROM {self.table_name}\n WHERE {self.lookup_type}='{self.word}'\n \"\"\")\n return True if self.cursor.fetchone() else False", "def rule_exists(rule, table=None):\n cmdline = [IPTABLES_PATH]\n if table:\n cmdline += [\"-t\", table]\n cmdline += [\"-C\"] + rule\n return call(cmdline, stdout=PIPE, stderr=PIPE) == 0", "def __contains__(self, address):\n return any([\n ipaddress.ip_address(address) in network\n for network in self.networks\n ])", "def _find_filter(keyword):\n db = get_service_collection()\n result = db.find({\"name\": {\"$regex\": keyword}})\n service_endpoint = ''\n for item in result:\n service_endpoint = item[\"value\"][\"url\"]\n break\n return service_endpoint", "def _do_check(self):\n try:\n #breakpoint()\n ApplicationsItem.objects.exists()\n #print (\"Checking\")\n return True\n\n except Exception:\n client.captureException()\n return False", "def exist(self):", "def icmp_probe(self, ip):\n\n\t\tcmd = 'ping %s -n 10' % ip\n\t\tp = Popen(cmd, shell=True, stdin=PIPE, stderr=PIPE, stdout=PIPE)\n\t\tres = p.stdout.read()\n\n\t\tres = res.decode()\n\t\tif len(p.stderr.read()) == 0:\n\t\t\tif 'Destination host unreachable' in res:\n\t\t\t\treturn False\n\t\t\treturn True\n\t\telse:\n\t\t\treturn False", "def member_in_database(uniqname, conn):\n with conn.cursor() as cur:\n cur.execute(\n 'SELECT * '\n 'FROM members '\n 'WHERE uniqname = %s',\n (uniqname,)\n )\n member_exists = cur.rowcount > 0\n\n return member_exists", "def _mapped_to_this_conductor(self, node_uuid, driver):\n try:\n ring = self.ring_manager[driver]\n except exception.DriverNotFound:\n return False\n\n return self.host in ring.get_hosts(node_uuid)", "def business_exists(yelp_id, conn):\n return conn.execute(Business.select().where(Business.c.yelp_id == yelp_id))\\\n .first() is not None", "def is_found(uuid):\n articles = mongo.db[app.config['ARTICLES_COLLECTION']]\n hit = articles.find_one({'uuid': uuid})\n if not hit:\n return False\n return hit", "def resourceExists(self, uri):\r\n return uri in self.cache", "def vt_ip_check(ip, vt_api):\n if not is_IPv4Address(ip):\n return None\n\n url = 'https://www.virustotal.com/vtapi/v2/ip-address/report'\n parameters = {'ip': ip, 'apikey': vt_api}\n response = requests.get(url, params=parameters)\n try:\n return response.json()\n except ValueError:\n return None", "def dev_ip():\n while True:\n d_ip = raw_input(\"\\nEnter the IP Address of the device you need to access: \")\n if re.match(\"^\\d{1,3}\\.\\d{1,3}\\.\\d{1,3}\\.\\d{1,3}$\", d_ip):\n return d_ip\n else:\n print(\"\\nThat is not a valid IP Address!\\nTry again.\")", "def test_data_source_soaps_id_exists_get(self):\n pass", "def pvExistTest(self, context, addr, fullname):\n if fullname in self.pcas_manager.pvf:\n return pcaspy.pverExistsHere\n else:\n return pcaspy.pverDoesNotExistHere", "def search_query(self, num, query, table, field):\n try:\n self.db_connection()\n if self.connection is not None: # If the database connection was established successfully\n with self.connection.cursor() as cursor:\n cursor.execute(query)\n result = cursor.fetchone()\n print(type(result))\n if result is not None and result[field] == num: # check if query result is None and pk equal to num\n print(table + str(num), \"was found\")\n return True\n else:\n print(table + str(num), \"wasn't found\")\n return False\n except IntegrityError:\n print(\"A new record couldn't be added\")\n exit()\n finally:\n self.close_connection() # Closing the opened connection", "def exists (self, uuid):\n return self.read (uuid) is not None", "def is_valid_ip(ip):\n ...", "def test_udp_no_records():\n assert dnsck_query(\"8.8.8.8\", \"test.google.com\", \"A\", 1) == 0" ]
[ "0.6178328", "0.6014736", "0.59269434", "0.58188164", "0.56742144", "0.56725705", "0.56573004", "0.5653265", "0.56383586", "0.56271714", "0.5579105", "0.5578969", "0.55783236", "0.5542878", "0.55369085", "0.55349123", "0.5531589", "0.5526352", "0.5510327", "0.5509592", "0.54834443", "0.5450964", "0.5449097", "0.54182833", "0.5385543", "0.53623646", "0.5361515", "0.5353488", "0.53499764", "0.533174", "0.5325779", "0.5313576", "0.5298396", "0.52919465", "0.52763045", "0.52709556", "0.5263308", "0.52556", "0.5226443", "0.52246076", "0.52038383", "0.5203626", "0.51967794", "0.51828927", "0.51771533", "0.5165192", "0.51650405", "0.5161316", "0.51580936", "0.5152692", "0.5144976", "0.5132138", "0.51313704", "0.5130448", "0.51285034", "0.5123988", "0.511817", "0.5113798", "0.51128787", "0.51125365", "0.5109374", "0.51030844", "0.50952387", "0.50923294", "0.50704414", "0.50699455", "0.5067453", "0.50673085", "0.50643", "0.5061624", "0.50593966", "0.5056513", "0.50563264", "0.5045002", "0.50413376", "0.5039878", "0.50317043", "0.5030511", "0.50242835", "0.5014944", "0.5013517", "0.50028497", "0.50008845", "0.49934328", "0.4983647", "0.4983374", "0.49780592", "0.4975667", "0.49754086", "0.4972086", "0.49682894", "0.49636883", "0.49577725", "0.49549696", "0.49533072", "0.49478135", "0.49437827", "0.49437723", "0.49381527", "0.49332213" ]
0.7733625
0
Creates record with record_name and ip; updates record if it already exists with different ip does nothing if record already exists with same ip
def upsert_record(route53_zone, record_name, ip): # Only upsert the dns record if it doesn't resolve to us. try: record_ip = socket.gethostbyname(record_name) except socket.error: # Ignore if we can't connect to the host pass else: if ip == record_ip: return print str(dt.now()), "Registering host as", record_name record = route53_zone.get_a(record_name) if record and ip not in record.resource_records: route53_zone.update_a(record_name, ip) elif not record: route53_zone.add_a(record_name, ip)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _update_record(self, record_name, ip):\n if ((not hasattr(self, '_current_zone')) or (not self._current_zone)) or ((not hasattr(self, '_new_zone_version_number')) or (not self._new_zone_version_number)):\n raise GandiApiException(\"Can't update record, no cloned zone available.\")\n \n list_record = self._api.domain.zone.record.list(self._api_key, self._current_zone['id'], \n self._new_zone_version_number)\n for record in list_record:\n if record['name'] == record_name:\n myrecord = record\n # Create new record\n self._api.domain.zone.record.update(self._api_key, self._current_zone['id'], \n self._new_zone_version_number, {'id': myrecord['id']}, \n {\n 'name': myrecord['name'],\n 'type': myrecord['type'],\n 'value': ip,\n 'ttl': myrecord['ttl']\n })\n logging.info('Update record %s with ip %s successfully.' % (record_name, ip))", "def saw_ip(self, ip):\n from sqlalchemy.exc import IntegrityError\n c = self.ipSurvey.columns\n v = {\n c[\"ipAddress\"]: ip,\n c[\"lastSeen\"]: \"now()\",\n }\n # Update if already in table, otherwise insert new row\n if self.session.execute(self.ipSurvey.update(c[\"ipAddress\"] == ip, values=v)).rowcount == 0:\n self.session.execute(self.ipSurvey.insert(values=v))", "def insert_or_update(self, table, record):\n try:\n request = s.query(table=table, query={'sys_id': record['sys_id']})\n #request.get_single()\n response = request.update(record)\n print >> sys.stderr, 'update'\n except NoResults:\n # Record does not exist so create it\n response = self.snow.insert(table=table, payload=record)\n print >> sys.stderr, 'create'\n return response", "def create(self, key, record, overwrite=False):\n if key in self.db and not overwrite:\n raise ValueError(\"A record for key \\\"%s\\\" already exists.\" % key)\n self.db[key] = copy(record)", "def add_record(self):\n if not self.record_exists(self.args.date):\n record = self.create_record()\n self.records.append(record)\n self.write_json_file(self.records_file, self.records)\n return True\n return False", "def new_ip(self, ip):\n if not ip in self.ip_list:\n self.ip_list.add(ip)\n host = self.hs.id_to_object(ip)\n host.add_tag('sniffer')\n host.save()\n print_success(\"New ip address: {}\".format(ip))", "def upload_record(self,\n record: Optional[Record] = None,\n style: Optional[str] = None,\n name: Optional[str] = None,\n model: Union[str, io.IOBase, DM, None] = None,\n workspace: Union[str, pd.Series, None] = None,\n overwrite: bool = False,\n verbose: bool = False):\n if record is None:\n record = load_record(style, model, name=name)\n \n try:\n self.remote_database.add_record(record=record, workspace=workspace,\n verbose=verbose) \n except ValueError as e:\n if overwrite:\n self.remote_database.update_record(record=record, workspace=workspace,\n verbose=verbose)\n else:\n raise ValueError('Matching record already exists: use overwrite=True to change it') from e", "def cli_add_record(record_data):\n new_record = None\n try:\n new_record = api.insert_record( record_data)\n except DuplicateRecord as error:\n debug(\"%(error)s\" % locals())\n print \"Adding new record failed. %(error)s\" % locals()\n return None\n except MissingRequiredInformaton as error:\n debug(\"%(error)s\" % locals())\n print \"Adding new record failed. %(error)s\" % locals()\n return None\n\n return new_record", "def update_record(self):\n new_record = self.create_record()\n for record in self.records:\n if self.date_str == record[\"date\"] and not record == new_record:\n record.update(new_record)\n self.write_json_file(self.records_file, self.records)\n return True\n return False", "def add_record():\n if 'json' not in request.files:\n # use an HTML record that seems appropriate\n return \"no json file in the request!\", 400\n try:\n # can't assume that JSON file is valid\n _record = json.loads(request.files['json'].read())\n except ValueError:\n return \"failed to parse JSON file correctly!\", 400\n if type(_record) is not dict or 'name' not in _record:\n return \"expecting a dictionary with identifier, post failed!\", 400\n with RECORD_LOCK:\n # just check if the name already exists in the global RECORD list\n if len([r for r in RECORDS if r.get('name') == _record['name']]):\n return \"already in the records!\", 409\n RECORDS.append(_record)\n return \"OK\"", "def create_record(self, context, record):\n record = self.dns_manager.create_record(context, record)\n return record", "def test_2_resource_records_actions(self):\n record_type = 'AAAA'\n name = 'test.example.com'\n ttl = 60\n rdata = {\n 'ip': '2001::1'\n }\n # create resource record\n resp = self.record.create_resource_record(\n instance_id=self.instance_id, dnszone_id=self.zone_id, type=record_type, ttl=ttl, name=name, rdata=rdata)\n assert resp is not None\n assert resp.status_code == 200\n record_id = resp.get_result().get(\"id\")\n\n # update resource record\n name = 'test.example43.com'\n ttl = 120\n rdata = {\n 'ip': '2002::2'\n }\n resp = self.record.update_resource_record(\n instance_id=self.instance_id, dnszone_id=self.zone_id, record_id=record_id,\n type=record_type, ttl=ttl, name=name, rdata=rdata)\n assert resp is not None\n assert resp.status_code == 200\n\n # get resource record\n resp = self.record.get_resource_record(\n instance_id=self.instance_id, dnszone_id=self.zone_id, record_id=record_id)\n assert resp is not None\n assert resp.status_code == 200\n\n # delete resource record\n resp = self.record.delete_resource_record(\n instance_id=self.instance_id, dnszone_id=self.zone_id, record_id=record_id)\n assert resp is not None\n assert resp.status_code == 204", "def insert_update(record,name):\n try:\n conn = db_conection()\n cur = conn.cursor()\n except Exception as db:\n __log(2, \"DataBase Connection Error\" + db.args[1])\n\n try:\n query_insert = \"INSERT into \"+name+\" VALUES ('\" + str(record['Id']) + \"', '\" + record[\n 'First name'] + \"', '\" + record['Last Name'] + \"', '\" + record['deparment'] + \"', '\" + str(\n int(record['salary'])) + \"')\"\n cur.execute(query_insert)\n conn.commit()\n # print (\"Insert Success\",record['Id'])\n __log(1, 'Insert Success. ID: ' + str(record['Id']))\n\n except pymysql.Error:\n # print ('Duplicate Error Found ID: ',record['Id'])\n __log(2, 'Duplicate Error Found ID: ' + str(record['Id']))\n\n query_update = \"UPDATE \"+name+\" SET `First name` = %s , `Last Name`= %s,`deparment`= %s,`salary` = \" \\\n \"'%s' WHERE `Id` = '%s' \"\n val = (record[1], record[2], record[3], record[4], record[0])\n cur.execute(query_update, val)\n conn.commit()\n # print (\"Update Success, ID\",record['Id'])\n __log(1, 'Duplicate Error Updated with New Values, ID: ' + str(record['Id']))\n\n except Exception as e:\n # print (e)\n __log(2, 'Unknown Error, Skipping Record having id' + str(record['Id']))", "def insert_record(self, record, session):\n try:\n session.add(record)\n session.commit()\n session.close()\n return True\n except:\n\n logging.exception(\"http record cannot be added to db \" \":Time: \" + str(datetime.datetime.now()))\n return False", "def create(self, ip): # pylint: disable=invalid-name\n return self.request(\"POST\", data={\"ip\": ip})", "def update_record():\n if 'json' not in request.files:\n return \"no json file in the request!\", 400\n try:\n _record = json.loads(request.files['json'].read())\n except ValueError:\n return \"failed to parse JSON file correctly!\", 400\n if type(_record) is not dict or 'name' not in _record:\n return \"expecting a dictionary with a name, post failed!\", 400\n with RECORD_LOCK:\n for _index, _rec in enumerate(RECORDS):\n if _rec['name'] == _record['name']:\n RECORDS[_index] = _record\n return \"OK\"\n return \"Failed to update record!\", 500", "def save_record(self,\n record: Optional[Record] = None,\n style: Optional[str] = None,\n name: Optional[str] = None,\n model: Union[str, io.IOBase, DM, None] = None,\n overwrite: bool = False,\n verbose: bool = False):\n if record is None:\n record = load_record(style, model, name=name)\n \n try:\n self.local_database.add_record(record=record, verbose=verbose)\n except ValueError as e:\n if overwrite:\n self.local_database.update_record(record=record,\n verbose=verbose)\n else:\n raise ValueError('Matching record already exists: use overwrite=True to change it') from e", "def add_record(self, record: Dict, src_name: SourceName) -> None:\n concept_id = record[\"concept_id\"]\n record[\"src_name\"] = src_name.value\n label_and_type = f\"{concept_id.lower()}##identity\"\n record[\"label_and_type\"] = label_and_type\n record[\"item_type\"] = \"identity\"\n try:\n self.batch.put_item(Item=record)\n except ClientError as e:\n logger.error(\n \"boto3 client error on add_record for \"\n f\"{concept_id}: {e.response['Error']['Message']}\"\n )\n for attr_type, item_type in ITEM_TYPES.items():\n if attr_type in record:\n value = record.get(attr_type)\n if not value:\n continue\n if isinstance(value, str):\n items = [value.lower()]\n else:\n items = {item.lower() for item in value}\n for item in items:\n self._add_ref_record(\n item, record[\"concept_id\"], item_type, src_name\n )", "def save(self, ip='', result='', dt=datetime.datetime.now()):\n self.ping_table.insert({\"host\": ip, \"result\": result, \"datetime\": str(dt)})\n return", "def add_remote_duplicate_entry(self, ip):\n site2 = Session(SITE2_URL, SITE2_LOGIN, SITE2_PASSWORD)\n resp = site2.login()\n self.assertTrue(resp.ok)\n\n tenant = Tenant('intersite-testsuite-remote')\n l3out = OutsideL3('l3out', tenant)\n other_epg = OutsideEPG('other', l3out)\n subnet = OutsideNetwork(ip, other_epg)\n subnet.ip = ip + '/32'\n\n resp = tenant.push_to_apic(site2)\n self.assertTrue(resp.ok)", "def add_remote_duplicate_entry(self, ip):\n site2 = Session(SITE2_URL, SITE2_LOGIN, SITE2_PASSWORD)\n resp = site2.login()\n self.assertTrue(resp.ok)\n\n tenant = Tenant('intersite-testsuite-remote')\n l3out = OutsideL3('l3out1', tenant)\n other_epg = OutsideEPG('other', l3out)\n subnet = OutsideNetwork(ip, other_epg)\n subnet.ip = ip + '/32'\n\n resp = tenant.push_to_apic(site2)\n self.assertTrue(resp.ok)", "def add_host(self, name, ip):\n rdataa = dns.rdata.from_text(dns.rdataclass.IN,dns.rdatatype.A,str(ip))\n rdataseta = dns.rdataset.from_rdata(300,rdataa)\n self.update.add(name,rdataseta)\n return dns.query.tcp(self.update,self.server_address)", "def update_dns(self):\n if self.ptr:\n which_zone = None\n zones = dns.models.Zone.objects.all()\n for zone in zones:\n if self.ptr.endswith(zone.name) or self.ptr.endswith(zone.name + '.'):\n which_zone = zone\n break\n\n if which_zone:\n zone_name = which_zone.name\n record_name = self.ptr[:-len(zone_name)] if not self.ptr.endswith('.') else self.ptr[:-len(zone_name) - 1]\n if record_name.endswith('.'):\n record_name = record_name[:-1]\n record_type = 'A' if self.family == 4 else 'AAAA'\n\n dns.models.Record.objects.get_or_create(\n name=record_name,\n record_type=record_type,\n zone=which_zone,\n address=self\n )", "def update_A_record(self, heroku_host_ip, dns_a_record):\n r = self.api.post_update_record(\n record_id = dns_a_record.get('id'),\n prio = dns_a_record.get('prio'),\n content = heroku_host_ip,\n ttl = dns_a_record.get('ttl'))\n dns_a_record = self.extract_A_records(r[\"record\"])\n return dns_a_record", "def create_record(self, zone_id, record, record_type, data, ttl=60):\r\n self.record.createObject({\r\n 'domainId': zone_id,\r\n 'ttl': ttl,\r\n 'host': record,\r\n 'type': record_type,\r\n 'data': data})", "def add_route53_record(emr_internal_ips, cr):\n\n conn = connect_route53(aws_access_key_id = cr.get_config(\"aws_access_key\"), aws_secret_access_key = cr.get_config(\"aws_secret_key\"))\n\n zone = conn.get_zone(\"alpinenow.local\")\n\n print \"Adding DNS Records for: {0}\".format(emr_internal_ips)\n for ip in emr_internal_ips:\n internal_dns = \"ip-\" + ip.replace(\".\", \"-\") + \".alpinenow.local\"\n response = zone.add_a(internal_dns, ip) # TODO: Do something with response", "def test_zone_cant_have_duplicate_records(self):\n zone = Zone('test.example.com')\n recordA = Record(zone, 'test-record', {'type': 'A', 'ttl': 300})\n recordB = Record(zone, 'test-record', {'type': 'A', 'ttl': 300})\n zone.add_record(recordA)\n with self.assertRaises(DuplicateException):\n zone.add_record(recordB)", "def test_6_resource_records_actions(self):\n\n record_type = 'A'\n name = 'example76'\n content = '2.2.2.2'\n ttl = 60\n rdata = {\n 'ip': content\n }\n # create resource record\n resp = self.record.create_resource_record(\n instance_id=self.instance_id, dnszone_id=self.zone_id, type=record_type, ttl=ttl, name=name, rdata=rdata)\n assert resp is not None\n assert resp.status_code == 200\n\n record_type = 'A'\n name = 'example43'\n content = '2.2.2.3'\n ttl = 60\n rdata = {\n 'ip': content\n }\n # create resource record\n resp = self.record.create_resource_record(\n instance_id=self.instance_id, dnszone_id=self.zone_id, type=record_type, ttl=ttl, name=name, rdata=rdata)\n assert resp is not None\n assert resp.status_code == 200\n\n record_type = 'PTR'\n name = '2.2.2.2'\n ttl = 60\n rdata = {\n \"ptrdname\": \"example76.test.example36.com\"\n }\n # create resource record\n resp = self.record.create_resource_record(\n instance_id=self.instance_id, dnszone_id=self.zone_id, type=record_type,\n ttl=ttl, rdata=rdata, name=name)\n assert resp is not None\n assert resp.status_code == 200\n record_id = resp.get_result().get(\"id\")\n\n # update resource record\n ttl = 120\n resp = self.record.update_resource_record(\n instance_id=self.instance_id, dnszone_id=self.zone_id, record_id=record_id, ttl=ttl)\n assert resp is not None\n assert resp.status_code == 200\n\n # get resource record\n resp = self.record.get_resource_record(\n instance_id=self.instance_id, dnszone_id=self.zone_id, record_id=record_id)\n assert resp is not None\n assert resp.status_code == 200\n\n # delete resource record\n resp = self.record.delete_resource_record(\n instance_id=self.instance_id, dnszone_id=self.zone_id, record_id=record_id)\n assert resp is not None\n assert resp.status_code == 204", "def post(self, record_type, record_id, record, metadata):\n \n if not self.cache.get(record_type, None):\n self.cache[record_type] = {}\n\n if not self.cache[record_type].get(record_id, None):\n self.cache[record_type][record_id] = {}\n\n \n self.cache[record_type][record_id]['record'] = record\n self.cache[record_type][record_id]['metadata'] = metadata\n\n \n d = Date()\n self.cache[record_type][record_id]['last_updated'] = d.now()\n\n # Check space, remove old items if not enough space", "def test_5_resource_records_actions(self):\n record_type = 'SRV'\n name = 'test.example76.com'\n ttl = 60\n rdata = {\n \"priority\": 100,\n \"weight\": 100,\n \"port\": 8000,\n \"target\": \"test.example76.com\"\n }\n service_name = \"_sip\"\n protocol = \"udp\"\n # create resource record\n resp = self.record.create_resource_record(\n instance_id=self.instance_id, dnszone_id=self.zone_id, type=record_type,\n ttl=ttl, name=name, rdata=rdata, service=service_name, protocol=protocol)\n assert resp is not None\n assert resp.status_code == 200\n record_id = resp.get_result().get(\"id\")\n\n # update resource record\n name = 'test.example43.com'\n ttl = 120\n rdata = {\n \"priority\": 200,\n \"weight\": 200,\n \"port\": 8001,\n \"target\": \"test.example43.com\"\n }\n service_name = \"_sip\"\n protocol = \"tcp\"\n resp = self.record.update_resource_record(\n instance_id=self.instance_id, dnszone_id=self.zone_id, record_id=record_id,\n type=record_type, ttl=ttl, name=name, rdata=rdata, service=service_name, protocol=protocol)\n assert resp is not None\n assert resp.status_code == 200\n\n # get resource record\n resp = self.record.get_resource_record(\n instance_id=self.instance_id, dnszone_id=self.zone_id, record_id=record_id)\n assert resp is not None\n assert resp.status_code == 200\n\n # delete resource record\n resp = self.record.delete_resource_record(\n instance_id=self.instance_id, dnszone_id=self.zone_id, record_id=record_id)\n assert resp is not None\n assert resp.status_code == 204", "def test_add_record_to_zone(self):\n zone = Zone('test.example.com')\n record = Record(zone, 'test-record', {'type': 'A', 'ttl': 300})\n zone.add_record(record)\n self.assertEqual(zone.records.get('test-record'), record)", "def __create_record(self, timestamp, url):\n if not timestamp in self.record_d.keys():\n self.record_d[timestamp] = {}\n\n # increment url count\n if not url in self.record_d[timestamp].keys():\n self.record_d[timestamp][url] = 1\n else:\n self.record_d[timestamp][url] += 1", "def post_virtual_DNS_record_create(self, resource_dict):\n pass", "def _validate_create(context, db_api, create_data, model_name):\n ipaddrlist = utils.get_addresses(create_data['ip_address'])\n\n if not ipaddrlist:\n errors = (_(\"Failed to register (%s)\" +\n \". The (%s) IP Address (%s) could not \"\n \"be resolved.\")\n % (model_name, model_name, create_data['ip_address']))\n raise exception.AddressResolutionFailure(reason=errors)\n LOG.info(\"IP/FQDN for the \" + model_name + \" %s is %s\" % (\n create_data['ip_address'],\n ipaddrlist))\n try:\n get_all = getattr(db_api, \"get_all_%ss\" % model_name)\n res_data = get_all(context)\n if not res_data:\n # No registered resources\n LOG.info(\"No registered %s\" % model_name)\n return\n except Exception:\n errors = (_(\"Failed to retrieve data for (%s) %s\")\n % (model_name, create_data.get('ip_address')))\n raise exception.InternalFailure(reason=errors)\n name = create_data.get(\"name\")\n valid_name = _validate_duplicate_names(res_data, name)\n if not valid_name:\n msg = (_(\"Two different (%s) with same \"\n \"name cannot be registered\") % model_name)\n raise exception.ResourceExists(reason=msg)\n registered_data = []\n for data in res_data:\n registered_data.append(data['ip_address'])\n\n if set(ipaddrlist).intersection(set(registered_data)):\n errors = (_(\"(%s) by ip_address (%s) already exists.\")\n % (model_name, create_data['ip_address']))\n raise exception.ResourceExists(reason=errors)", "def insert_record(record, table):\n\n\ttups = [(key, val) for key, val in record.iteritems()]\n\tkeys = [key for key, val in tups]\n\tvals = [val for key, val in tups]\n\n\tconn = get_database_connection(port = 2001)\n\tcursor = conn.cursor()\n\n\tnum_cols = len(keys)\n\tkey_str = ','.join(keys)\n\tval_str = ','.join(['%s'] * num_cols)\n\n\tqry = \"REPLACE INTO %s (%s) VALUES (%s)\" % (table, key_str, val_str)\n\tcursor.execute(qry, vals)\n\n\tconn.commit()\n\tcursor.close()\n\tconn.close()", "def test_3_resource_records_actions(self):\n record_type = 'CNAME'\n name = 'test.example56.com'\n ttl = 60\n rdata = {\n 'cname': 'test.example56.com'\n }\n # create resource record\n resp = self.record.create_resource_record(\n instance_id=self.instance_id, dnszone_id=self.zone_id, type=record_type, ttl=ttl, name=name, rdata=rdata)\n assert resp is not None\n assert resp.status_code == 200\n record_id = resp.get_result().get(\"id\")\n\n # update resource record\n name = 'test.example43.com'\n ttl = 120\n rdata = {\n 'cname': 'test.example43.com'\n }\n resp = self.record.update_resource_record(\n instance_id=self.instance_id, dnszone_id=self.zone_id, record_id=record_id,\n type=record_type, ttl=ttl, name=name, rdata=rdata)\n assert resp is not None\n assert resp.status_code == 200\n\n # get resource record\n resp = self.record.get_resource_record(\n instance_id=self.instance_id, dnszone_id=self.zone_id, record_id=record_id)\n assert resp is not None\n assert resp.status_code == 200\n\n # delete resource record\n resp = self.record.delete_resource_record(\n instance_id=self.instance_id, dnszone_id=self.zone_id, record_id=record_id)\n assert resp is not None\n assert resp.status_code == 204", "def create_record(self, name, zone, type, data, extra=None):\n id = \"id-%s\" % (name)\n\n zone = self.get_zone(zone_id=zone.id)\n\n if id in self._zones[zone.id][\"records\"]:\n raise RecordAlreadyExistsError(record_id=id, value=None, driver=self)\n\n record = Record(id=id, name=name, type=type, data=data, extra=extra, zone=zone, driver=self)\n self._zones[zone.id][\"records\"][id] = record\n return record", "def save_record(record):\n record. save_details()", "def add_record(self, record):\n # Store the domain as the key, and the rest as value.\n new_key = \"{0},{1}\".format(record.get_domain(), record.get_record_type())\n self._records[new_key] = record", "def db_add_device_record(db_path, rec_name, rec_info):\n path_exist = os.path.exists(db_path)\n if path_exist is False:\n print '!!!Error, database does not exist.'\n return\n\n try:\n with db.connect(db_path) as conn:\n cursor = conn.cursor()\n # Prepare and execute SQL statement.\n # The REPLACE statement is an alias for the \"INSERT OR REPLACE\"\n # variant of the INSERT statement. This alias is provided for\n # compatibility with other SQL database engines.\n tup = (rec_name, rec_info)\n sql = (\"REPLACE INTO Devices VALUES (?,?)\")\n cursor.execute(sql, tup)\n conn.commit()\n except (db.OperationalError) as e:\n print(\"!!!Error, %s\" % repr(e))", "def add_record(self, record):\n pass", "def add_a_record(self, record):\n '''\n doc = { \"P/N\": record,#record.get_PN(),\n \"supplier\": \"\",\n \"inventory\": \"\",\n \"specification\": \"\",\n \"description\": \"\",\n \"OEM\": \"\",\n \"tags\": [\"mongodb\", \"python\", \"pymongo\"],\n \"date\": datetime.datetime.utcnow()}'''\n self.collection.insert(record)", "def create_access_entry(\n self,\n key: str,\n ip: str,\n path: str,\n already_requested: int = 0,\n max_requests: int = 100,\n ) -> None:\n self.model.objects.create(\n key=key,\n ip=ip,\n path=path,\n already_requested=already_requested,\n max_requests=max_requests,\n )", "def add_record(self, record):\n logging.debug('Adding new entry to table')\n if not self._dbconnect or not self._cursor:\n raise Exception('Invalid call to Context Manager method!')\n\n date = record.get('date', '')\n time = record.get('time', '')\n location = record.get('location', '')\n node_id = record.get('nodeID', '')\n\n if '' in (date, time, node_id, location):\n raise Exception('Invalid SecuritySystemDB record!')\n\n self._cursor.execute(\"insert into {} values(?, ?, ?, ?)\".format(self._name),\n (date, time, location, node_id))", "def test_4_resource_records_actions(self):\n record_type = 'MX'\n name = 'test.example76.com'\n ttl = 60\n rdata = {\n 'exchange': 'mail.test.example76.com',\n 'preference': 256\n }\n # create resource record\n resp = self.record.create_resource_record(\n instance_id=self.instance_id, dnszone_id=self.zone_id, type=record_type, ttl=ttl, name=name, rdata=rdata)\n assert resp is not None\n assert resp.status_code == 200\n record_id = resp.get_result().get(\"id\")\n\n # update resource record\n name = 'test.example43.com'\n ttl = 120\n rdata = {\n 'exchange': 'mail.test.example43.com',\n 'preference': 256\n }\n resp = self.record.update_resource_record(\n instance_id=self.instance_id, dnszone_id=self.zone_id, record_id=record_id,\n type=record_type, ttl=ttl, name=name, rdata=rdata)\n assert resp is not None\n assert resp.status_code == 200\n\n # get resource record\n resp = self.record.get_resource_record(\n instance_id=self.instance_id, dnszone_id=self.zone_id, record_id=record_id)\n assert resp is not None\n assert resp.status_code == 200\n\n # delete resource record\n resp = self.record.delete_resource_record(\n instance_id=self.instance_id, dnszone_id=self.zone_id, record_id=record_id)\n assert resp is not None\n assert resp.status_code == 204", "def addRecord(self):\n\n ## Saving recorded entries to the CRM and Mailings Database\n print(\"Saving entries to the CRM and Mailings database...\")\n db_connection.executeQuery(\"INSERT INTO dbo.CRM (f_name, l_name, company, address, city, county, state, zip, primary_phone, secondary_phone, email_address) VALUES ('\" + self.first_name.replace(\"\\'\", \"\\'\\'\").title() + \"', '\" + self.last_name.replace(\"\\'\", \"\\'\\'\").title() + \"', '\" + self.crm_company_name.replace(\"\\'\", \"\\'\\'\").title() + \"', '\" + self.address.title() + \"', '\" + self.city.replace(\"\\'\", \"\\'\\'\").title() + \"', '\" + self.county.replace(\"\\'\", \"\\'\\'\").title() + \"', '\" + self.state_code.upper() + \"', '\" + str(self.zip_code) + \"', '\" + self.phone_number + \"', '\" + self.phone_number_2 + \"' , '\" + self.email_address + \"'); COMMIT\")\n db_connection.executeQuery(\"INSERT INTO dbo.Mailings (name, company, address) VALUES ('\" + self.first_name.replace(\"\\'\", \"\\'\\'\").title() + \" \" + self.last_name.replace(\"\\'\", \"\\'\\'\").title() + \"', '\" + self.company_name.replace(\"\\'\", \"\\'\\'\").title() + \"','\" + self.address + \" \" + self.city.title() + \" \" + self.county.title() + \" \" + self.state_code.upper() + \" \" + str(self.zip_code) + \"'); COMMIT\")", "def upsertToDB(self,table,records):\r\n\t\ttry:\r\n\t\t\t#engine, meta = self.connectToDB(dbName)\r\n\t\t\tconn = engine.connect()\r\n\t\t\t#table = Table(tableName, meta)\r\n\r\n\t\t\t# Check data type of records\r\n\t\t\tif isinstance(records, pd.DataFrame):\r\n\t\t\t\trecords = records.to_dict('records')\r\n\t\t\telif isinstance(records, dict):\r\n\t\t\t\tpass\r\n\t\t\telse:\r\n\t\t\t\traise Exception(\"Record Type {} is wrong\".format(type(records)))\r\n\r\n\t\t\t# Check if there is any column not in table\r\n\t\t\t\"\"\"\r\n\t\t\tselect_stmt = select([table])\r\n\t\t\tresult = conn.execute(select_stmt)\r\n\t\t\t\r\n\t\t\tresult.close()\r\n\t\t\t\"\"\"\r\n\r\n\t\t\t# To do, check if batch upsert is possible\r\n\t\t\tfor record in records:\r\n\t\t\t\tinsert_stmt = insert(table).values(record)\r\n\t\t\t\t#record.pop(pk)\r\n\t\t\t\tupsert_stmt = insert_stmt.on_duplicate_key_update(**record)\r\n\t\t\t\tconn.execute(upsert_stmt)\r\n\r\n\t\t\t#res.close()\r\n\t\t\tconn.close()\r\n\t\t\tself.logger.info(\"{} reocrds have been upsert into table {}\".format(len(records),table.__table__))\r\n\t\texcept Exception as e:\r\n\r\n\t\t\tif re.search('Unconsumed column names',str(e)):\r\n\t\t\t\tself.logger.error('On line {} - {}'.format(sys.exc_info()[2].tb_lineno, e))\r\n\t\t\telse:\r\n\t\t\t\tself.logger.error('On line {} - {}'.format(sys.exc_info()[2].tb_lineno, e))\r\n\t\t\t\texit(1)", "def insert_or_update_records(db_conn, items, db_table, ref_field=None):\n\n if ref_field is not None:\n pk = ref_field\n else:\n pk = DB_TABLE_STRUCTURE[db_table]['primary_key']\n to_update = [item for item in items if item.get('storage_mode', '') == 'update']\n to_insert = [item for item in items if item.get('storage_mode', '') != 'update']\n for item in to_update:\n if item.get('storage_mode'):\n del item['storage_mode']\n for item in to_insert:\n if item.get('storage_mode'):\n del item['storage_mode']\n try:\n if to_insert:\n rows = list()\n columns = [field for field in to_insert[0]]\n for item in to_insert:\n rows.append([item[field] for field in columns])\n db_conn.insertInTable(db_table, columns, rows)\n if to_update:\n for item in to_update:\n fields = [field for field in item if item[field] is not None]\n values = [[item[pk]] + [item[field] for field in fields]]\n db_conn.setField(db_table, pk, fields, values)\n except BaseException as e:\n if 'Duplicate' in str(e):\n print('Duplicate')\n # if recent_data:\n # stored_data = recent_data\n # elif db_table != 'texts':\n # db_logger.debug(f'Duplicate entry in table {db_table}. Reloading stored data in memory...')\n # stored_data = {'bids': get_db_bid_info(), 'orgs': get_data_from_table('orgs')}\n # elif db_table == 'bids':\n # items = list()\n # for item in to_insert:\n # if 'deleted_at' in item:\n # if not deleted_bid(item[pk], item, stored_data):\n # items.append(item)\n # else:\n # if is_new_or_update(item[pk], item['last_updated'], item['last_updated_offset'], item,\n # stored_data):\n # items.append(item)\n # elif db_table == 'texts':\n # items = list()\n # for item in to_insert:\n # item['bid_id'] += '_1'\n # items.append(item)\n # data = item_to_database(items, db_table)\n # if data:\n # return data\n # else:\n # return stored_data\n elif 'Data too long' in str(e):\n print('Data too long')\n if db_table == 'texts':\n # Error indicating that the text we are trying to store is way bigger than mysql maximum allowed size.\n # Split item into 2 and try again recursively until text fits\n text = items[0]['texto_original'].split()\n text_1, text_2 = ' '.join(text[:len(text) // 2]), ' '.join(text[len(text) // 2:])\n item_1 = items[0].copy()\n item_2 = items[0].copy()\n item_1['bid_id'] += '_1'\n item_2['bid_id'] += '_2'\n item_1['texto_original'] = text_1\n item_2['texto_original'] = text_2\n insert_or_update_records([item_1], 'texts', )\n insert_or_update_records([item_2], 'texts', )\n else:\n for item in to_insert:\n item['nombre'] = re.sub('\\d{2}\\)', '', item['nombre'])\n if len(item['nombre']) > 250:\n item['nombre'] = item['nombre'][:250]\n insert_or_update_records(to_insert, db_table, )\n elif 'Incorrect string value' in str(e):\n items[0]['pliego_tecnico'] = unidecode(items[0]['pliego_tecnico'])\n insert_or_update_records(items, db_table, )\n else:\n print(str(e))", "def add_data(self, site: str, pid: int, formatted_pid: str, image_url: str) -> bool:\n try:\n self._cursor.execute(f\"INSERT INTO {site} VALUES ({pid}, '{formatted_pid}', '{image_url}', {datetime.now().timestamp()});\")\n self._connection.commit()\n return True\n \n except sqlite3.IntegrityError:\n print(f'Failed to add {pid} ({formatted_pid}) - PID already exists in table \"{site}\".')\n \n return False", "def _associate_floating_ip(self, context, domain_id, extra, floating_ip_id, floating_ip, port_id):\n\n addresses = [{\n 'version': 4,\n 'address': floating_ip,\n }]\n try:\n names = self._create(context=context,\n addresses=addresses,\n name_format=cfg.CONF[self.name].format,\n extra=extra,\n domain_id=domain_id,\n managed_extra='portid:%s' % (port_id),\n resource_type='a:floatingip',\n resource_id=floating_ip_id)\n except (designate.exceptions.DuplicateRecord, CirrusRecordExists):\n LOG.warn('Could not create record for %s using default format, '\n 'trying fallback format' % (extra['instance_name']))\n names = self._create(context=context,\n addresses=addresses,\n name_format=cfg.CONF[self.name].format_fallback,\n extra=extra,\n domain_id=domain_id,\n managed_extra='portid:%s' % (port_id),\n resource_type='a:floatingip',\n resource_id=floating_ip_id)\n LOG.info(\"Created %s to point at %s\" % (','.join(names), floating_ip))", "def new_record(self, name: str = None, new_record: dict = None, notes: str = None, created_by: str = None,\n my_conn: Optional[dict] = None, t_log: Optional[TimeLogger] = None, verbose: bool = False):\n\n if my_conn is None:\n my_conn = self.my_conn\n else:\n self.my_conn = my_conn\n\n table_name = self.table_name\n self.all_records = \\\n generic_new_record_db(\n table_name=table_name, name=name, notes=notes, new_record=new_record, my_conn=my_conn, t_log=t_log,\n data_df=self.all_records, created_by=created_by, verbose=verbose\n )", "def case_duplicate(item):\n\n data = item.data\n case_number = data.get(\"case_number\")\n person_id = data.get(\"person_id\")\n\n table = item.table\n if case_number:\n query = (table.case_number == case_number) & \\\n (table.deleted != True)\n else:\n disease_id = data.get(\"disease_id\")\n if person_id and disease_id:\n query = (table.disease_id == disease_id) & \\\n (table.person_id == person_id) & \\\n (table.deleted != True)\n else:\n return\n\n duplicate = current.db(query).select(table.id,\n table.person_id,\n limitby=(0, 1)).first()\n if duplicate:\n item.data.person_id = duplicate.person_id\n item.id = duplicate.id\n item.method = item.METHOD.UPDATE", "def insertData(p):\n try:\n if IP in p and p[IP].dst == RD_ADRRESS and p[Ether].src != GW_MAC_ADRRESS and p[\n Ether].dst == GW_MAC_ADRRESS and TCP in p:\n hash = hashlib.sha256(bytes(p[TCP])).hexdigest()\n key = (p[IP].src, p[TCP].sport)\n mutex.acquire()\n if key in checkIP_dict and hash not in checkIP_dict[key]:\n checkIP_dict[key] = checkIP_dict[key] + [hash]\n elif key not in checkIP_dict:\n checkIP_dict[key] = [hash]\n mutex.release()\n print(len(checkIP_dict[key]), key, \"insert\")\n except Exception as e:\n print(e, \"error in insertData\")\n finally:\n if mutex.locked():\n mutex.release()\n sys.exit()", "def _inmate_record_get_or_create(self):\n raise NotImplementedError('_inmate_record_get_or_create needs to be implemented with the new format')", "def _add_record(self, holder_barcode, plate, holder_img, pins_img):\n guid = str(uuid.uuid4())\n self._store_writer.to_image(pins_img, holder_img, guid)\n img_path = self._store_writer.get_img_path()\n holder_image_path = self._store_writer.get_holder_img_path()\n record = Record.from_plate(holder_barcode, plate, img_path, holder_image_path)\n\n self.records.append(record)\n self._process_change()", "def insert(self, name, address, city, state, zipcode, hour, phone, rating, image):\r\n pass", "def add_record(self, record: Optional[Record] = None, **kwargs):\n\n if record is None:\n record = Record(**kwargs)\n else:\n record.update(**kwargs)\n\n return self.db.insert_record(record=record)", "def put_record(self, record):\r\n row = [record.get(field) for field in self.fields.names()]\r\n\r\n self.put(row)", "def add_to_db(name, email_id):\n conn = None\n try:\n conn = connect_to_db()\n cur = conn.cursor()\n # This is the best way that I found to do an 'upsert' in a database agnostic way.\n # Try to update the data first, and if no records get updated, insert them.\n cur.execute(UPDATE_STMT.format(nm=name, em=email_id))\n if cur.rowcount == 0:\n cur.execute(INSERT_STMT.format(nm=name, em=email_id))\n conn.commit()\n print('Successfully added/updated record!')\n except Exception as e:\n print(str(e))\n disconnect_from_db(conn)\n raise e\n finally:\n disconnect_from_db(conn)", "def create_new_record(account,userName,password):\n new_record = Records(account,userName,password)\n return new_record", "def update_record(self, context, record):\n record = self.dns_manager.update_record(context, record)\n return record", "def test_create_domain_with_a_record(self):\n fake_dns_instance = FakeDnsInstance()\n t = template_format.parse(domain_only_template)\n a_record = [{\n \"type\": \"A\",\n \"name\": \"ftp.example.com\",\n \"data\": \"192.0.2.8\",\n \"ttl\": 3600\n }]\n t['Resources']['domain']['Properties']['records'] = a_record\n instance = self._setup_test_cloud_dns_instance('dnsinstance_create', t)\n create_args = self._get_create_args_with_comments(a_record)\n self._stubout_create(instance, fake_dns_instance, **create_args)\n scheduler.TaskRunner(instance.create)()\n self.assertEqual((instance.CREATE, instance.COMPLETE), instance.state)\n self.m.VerifyAll()", "def create(self, key, val):\n if key not in self._datastore:\n self._datastore[key] = val\n return True\n else:\n raise KeyError(\n \"Tried to create a record for an existing key\"\n )", "def add(ts, taddr, ipaddr):\n # Create session\n\ts = Session()\n\n\ttry:\n\t query = s.query(Lookup).filter(\n\t Lookup.timestamp.in_([ts]))\n\t result = query.first()\n\n\t if result:\n\t return -1\n\t else:\n\t af = Lookup(ts, taddr, ipaddr)\n\t s.add(af)\n\n\t # commit the record the database\n\t s.commit()\n\t return 0\n\n\texcept:\n\t s.rollback()\n\t return -1\n\n\tfinally:\n\t\ts.close()", "def test_append_updated_record_to_queue_new_record(small_app):\n sample_hep_record = _IdDict({\n '$schema': 'http://localhost:5000/schemas/records/hep.json',\n 'authors': [{\n 'affiliations': [{'value': 'Copenhagen U.'}],\n 'curated_relation': False,\n 'full_name': 'Glashow, S.L.',\n 'signature_block': 'GLASs',\n 'uuid': '5ece3c81-0a50-481d-8bee-5f78576e9504'\n }],\n 'collections': [\n {'primary': 'CORE'},\n {'primary': 'HEP'}\n ],\n 'control_number': '4328',\n 'self': {'$ref': 'http://localhost:5000/api/literature/4328'},\n 'titles': [{'title': 'Partial Symmetries of Weak Interactions'}]\n })\n\n result = append_updated_record_to_queue(None, sample_hep_record,\n sample_hep_record, \"records-hep\",\n \"hep\")\n\n assert result is None\n assert str(sample_hep_record.id) != \\\n DisambiguationRecord.query.order_by(desc(\"id\")).first().record_id", "def add_unique_record(sprayday_pk, location_pk):\n try:\n sprayday = SprayDay.objects.get(pk=sprayday_pk)\n location = Location.objects.get(pk=location_pk)\n except (SprayDay.DoesNotExist, Location.DoesNotExist):\n pass\n else:\n from mspray.apps.main.utils import add_unique_data\n\n osmid = (\n get_osmid(sprayday.data)\n or get_updated_osm_from_ona(sprayday)\n or sprayday.data.get(\"newstructure/gps\")\n )\n if osmid:\n try:\n osmid = int(osmid)\n except ValueError:\n pass\n else:\n if osmid > 0:\n # see if we have a matching household structure\n try:\n Household.objects.get(hh_id=osmid)\n except Household.DoesNotExist:\n try:\n household = Household.objects.get(\n bgeom=sprayday.bgeom\n )\n except Household.DoesNotExist:\n pass\n else:\n osmid = household.hh_id\n sprayday.osmid = osmid\n sprayday.save()\n sprayday.refresh_from_db()\n add_unique_data(sprayday, HAS_UNIQUE_FIELD, location, osmid)", "def test_append_updated_record_to_queue_same_data(small_app):\n pid = PersistentIdentifier.get(\"literature\", 11883)\n publication_id = str(pid.object_uuid)\n record = Record.get_record(publication_id)\n\n append_updated_record_to_queue(None, record, record, \"records-hep\", \"hep\")\n\n assert str(record.id) != \\\n DisambiguationRecord.query.order_by(desc(\"id\")).first().record_id", "def check_and_add(email, name):\n\n key = ndb.Key(AddressEntry, email)\n model = key.get()\n # we only have a problem if a model for the given email exists AND the name is different\n if not model is None:\n if model.name != name:\n jdict = model.to_json_dict()\n jdict[\"requested_name\"] = name\n return False, jdict\n\n model = AddressEntry(\n id=email,\n email=email,\n name=name\n )\n model.put()\n return True, model.to_json_dict()", "def put_record(uuid):\n\n record = collection[uuid]\n record.data.update(request.json or request.form.to_dict())\n record.save()\n\n return get_record(uuid)", "def add_ip(uid, ip_addrs, ip_port, server_user, server_password, description):\r\n session = tables.get_session()\r\n if session is None:\r\n return False\r\n res = False\r\n try:\r\n ip_table = IpAddrs()\r\n if not ip_table.check_exist_by_ip(ip_addrs, uid, session):\r\n res = ip_table.insert_ip_by_user(ip_addrs, ip_port, server_user, server_password, description, uid, session)\r\n session.commit()\r\n except SQLAlchemyError as err:\r\n LOGGER.error('Insert new ip failed: %s', err)\r\n return res\r\n finally:\r\n session.close()\r\n return res", "def _upsert_single_stack(\n *, region: str, logical_name: str, regional_record: Deployment, template_kwargs: Dict[str, str]\n):\n stack_name = getattr(regional_record, logical_name)\n\n if stack_name is None:\n _create_single_stack(\n region=region, logical_name=logical_name, regional_record=regional_record, template_kwargs=template_kwargs\n )\n else:\n _update_single_stack(\n region=region, logical_name=logical_name, stack_name=stack_name, template_kwargs=template_kwargs\n )", "def post_instance_ip_create(self, resource_dict):\n pass", "def db_add_device_record(db_path, device_info):\n path_exist = os.path.exists(db_path)\n if path_exist is False:\n print '!!!Error, database does not exist.'\n return\n\n try:\n with db.connect(db_path) as conn:\n cursor = conn.cursor()\n # Prepare and execute SQL statement.\n # The REPLACE statement is an alias for the \"INSERT OR REPLACE\"\n # variant of the INSERT statement. This alias is provided for\n # compatibility with other SQL database engines.\n tup = (device_info['device_name'],\n device_info['os_type'],\n device_info['ip_address'],\n \", \".join(device_info['interfaces']))\n sql = (\"REPLACE INTO Devices VALUES (?,?,?,?)\")\n cursor.execute(sql, tup)\n conn.commit()\n except (db.OperationalError) as e:\n print(\"!!!Error, %s\" % repr(e))", "def add_record(self, msg_id, rec):\n if msg_id in self._records:\n raise KeyError(\"Already have msg_id %r\" % (msg_id))\n self._check_dates(rec)\n self._records[msg_id] = rec\n self._add_bytes(rec)\n self._maybe_cull()", "def save_record(self, data, rownum=None):\n\n if rownum is not None:\n # This is an update\n records = self.get_all_records()\n records[rownum] = data\n with open(self.filename, 'w', encoding='utf-8') as fh:\n csvwriter = csv.DictWriter(fh, fieldnames=self.fields.keys())\n csvwriter.writeheader()\n csvwriter.writerows(records)\n else:\n # This is a new record\n newfile = not os.path.exists(self.filename)\n\n with open(self.filename, 'a', encoding='utf-8') as fh:\n csvwriter = csv.DictWriter(fh, fieldnames=self.fields.keys())\n if newfile:\n csvwriter.writeheader()\n csvwriter.writerow(data)", "def pre_virtual_DNS_record_create(self, resource_dict):\n pass", "def touch(self, connection=None):\n self.create_marker_table()\n\n if connection is None:\n connection = self.connect()\n connection.autocommit = True # if connection created here, we commit it here\n\n connection.cursor().execute(\n \"\"\"INSERT INTO {marker_table} (update_id, target_table)\n VALUES (%s, %s)\n ON DUPLICATE KEY UPDATE\n update_id = VALUES(update_id)\n \"\"\".format(marker_table=self.marker_table),\n (self.update_id, self.table)\n )\n # make sure update is properly marked\n assert self.exists(connection)", "def update(request):\n from pprint import pformat\n if 'ipv4' not in request.GET and 'ipv6' not in request.GET:\n return HttpResponse(\"Must specify one or both of ipv4/ipv6 address\\nParams:%s\" % pformat(request.GET.dict()), status=400)\n if not u'domain' in request.GET:\n return HttpResponse(\"Must specify domain\\nParams:%s\" % pformat(request.GET.dict()), status=400)\n\n for ipvx, record_type in ((u'ipv4', 'A'), (u'ipv6', 'AAAA')):\n if ipvx not in request.GET:\n continue\n record, created = Record.objects.get_or_create(\n name=request.GET['domain'],\n type=record_type,\n )\n record.domain_id = 1\n record.ttl = 1\n record.auth = True\n record.content = request.GET[ipvx]\n record.save()\n\n return HttpResponse(\"Saved record(s)\")", "def new_entry(self,hostname,ip,port): \n\t\tnew_transaction = {\n\t\t'hostname':hostname,\n\t\t'ip':ip,\n\t\t'port':port\n\t\t}\n\t\tbuffer_len = self.blockchain.new_transaction(new_transaction)\n\t\tif buffer_len >= self.BUFFER_MAX_LEN or buffer_len >= self.blockchain.quota-self.BUFFER_MAX_LEN:\n\t\t\tself.mine_block()", "def _do_upsert(self, conn, item, spider):\n query_check = \"select * from %s where url = %%s\" % spider.name\n conn.execute(query_check, (item['url'], ))\n result = conn.fetchone()\n if result:\n query_udpate = \"UPDATE %s SET price=%ss\" % spider.name\n conn.execute(query_udpate, (item['price']))\n log.msg(\"Item updated in db: %s\" % item, level=log.DEBUG)\n else:\n query_insert = \"INSERT INTO %s (title, company, description, price, status, image, url, category) VALUES (%%s, %%s, %%s, %%s, %%s, %%s, %%s, %%s)\" % spider.name\n conn.execute(query_insert,\n (item['title'], item['company'], item['description'], item['price'], item['status'], item['image'], item['url'], item['category']))\n log.msg(\"Item stored in db: %s\" % item, level=log.DEBUG)", "def disease_duplicate(item):\n\n data = item.data\n code = data.get(\"code\")\n name = data.get(\"name\")\n\n table = item.table\n queries = []\n if code:\n queries.append((table.code == code))\n if name:\n queries.append((table.name == name))\n if queries:\n query = reduce(lambda x, y: x | y, queries)\n else:\n return\n\n rows = current.db(query).select(table.id,\n table.code,\n table.name)\n duplicate = None\n for row in rows:\n if code and row.code == code:\n duplicate = row.id\n break\n if name and row.name == name:\n duplicate = row.id\n if duplicate:\n item.id = duplicate\n item.method = item.METHOD.UPDATE", "def insert_record(self):\r\n try:\r\n db.session.add(self)\r\n db.session.commit()\r\n return {\"error\": False, \"id\": self.id}\r\n except exc.SQLAlchemyError as e: # pragma: no cover\r\n # print(e)\r\n # print(sys.exc_info())\r\n db.session.rollback()\r\n return {\"error\": True}\r\n finally:\r\n db.session.close()", "def create_A_record(self, heroku_host_ip, domain, ttl):\n r = self.api.post_create_record(\n domain = domain,\n name = None,\n record_type = \"A\",\n prio = None,\n content = heroku_host_ip,\n ttl = ttl)\n dns_a_record = self.extract_A_records(r[\"record\"])\n return dns_a_record", "def createRecord(self):\n self.dto.getRecord().append(self.controller.createNewObj())\n print(\"Record added.\")", "def test_update_record_only(self):\n fake_dns_instance = FakeDnsInstance()\n t = template_format.parse(domain_only_template)\n instance = self._setup_test_cloud_dns_instance('dnsinstance_update', t)\n instance.resource_id = 4\n update_records = [{'type': 'A',\n 'name': 'ftp.example.com',\n 'data': '192.0.2.8',\n 'ttl': 3600}]\n\n mock_client = self.m.CreateMockAnything()\n self.m.StubOutWithMock(instance, 'cloud_dns')\n instance.cloud_dns().AndReturn(mock_client)\n self.m.StubOutWithMock(mock_client, \"get\")\n mock_domain = self.m.CreateMockAnything()\n mock_client.get(fake_dns_instance.resource_id).AndReturn(mock_domain)\n\n # mock_domain.update shouldn't be called in this scenario, so\n # stub it out but don't record a call to it\n self.m.StubOutWithMock(mock_domain, \"update\")\n\n fake_records = list()\n mock_domain.list_records().AndReturn(fake_records)\n mock_domain.add_records([{\n 'comment': None,\n 'priority': None,\n 'type': 'A',\n 'name': 'ftp.example.com',\n 'data': '192.0.2.8',\n 'ttl': 3600}])\n self.m.ReplayAll()\n\n uprops = dict(instance.properties)\n uprops['records'] = update_records\n ut = rsrc_defn.ResourceDefinition(instance.name,\n instance.type(),\n uprops)\n instance.state_set(instance.CREATE, instance.COMPLETE)\n\n scheduler.TaskRunner(instance.update, ut)()\n self.assertEqual((instance.UPDATE, instance.COMPLETE), instance.state)\n self.m.VerifyAll()", "def post_virtual_DNS_record_update(self, resource_id, resource_dict):\n pass", "def dnsUpdate(portId, ipAddr='', action='create'):\n\tzone = 'osdev.skrill.net.'\n\trevZone = '23.32.10.in-addr.arpa'\n\tcname = portId + '.' + zone\n\tttl = 300\n\tnsServer = '10.32.29.99'\n key = 'yw0ADuZjXAhcGgMOYg/Clx1128iUSfhlOHdsY4CzVNIVVVXismrAe+WKMBxocLhbrIVHGvmR94jDC46K18K6oQ=='\n keyRing = dns.tsigkeyring.from_text({zone : key})\n\thostName = genHostname(ipAddr)\n\tdnsUpdate = dns.update.Update(zone, keyring=keyRing)\n\tipAddr = str(ipAddr)\n\thostName = str(hostName)\n\tif action == 'create':\n\t\tdnsUpdate.replace( hostName.split('.')[0], ttl, 'A', ipAddr )\n\t\tdnsResponse = dns.query.tcp(dnsUpdate, nsServer )\n\t\tlogging.info('DNS A record updated for: ' + hostName)\n\t\tdnsUpdate.replace(portId, ttl, 'CNAME', hostName)\n\t\tdnsResponse = dns.query.tcp(dnsUpdate, nsServer )\n\t\tlogging.info('DNS CNAME record updated for: ' + hostName)\n\t\tdnsUpdate = dns.update.Update(revZone, keyring=keyRing)\n\t\tdnsUpdate.replace(ipAddr.split('.')[3], ttl, 'PTR', hostName)\n\t\tdnsResponse = dns.query.tcp(dnsUpdate, nsServer )\n\t\tlogging.info('DNS PTR record updated for: ' + hostName)\n\tif action == 'delete':\n\t\ttry:\n\t\t\thostName = dns.resolver.query(cname, 'CNAME')[0].to_text()\n\t\t\tipAddr = dns.resolver.query(hostName, 'A')[0].to_text()\n\t\texcept Exception, e:\n\t\t\tlogging.exception('DNS query failed for cname and A records: ' + cname + ' ' + hostName)\n\t\t\thostName = ''\n\t\t\treturn hostName\n\t\tdnsUpdate.delete(cname, 'CNAME')\n\t\tdnsResponse = dns.query.tcp(dnsUpdate, nsServer )\n\t\tlogging.info('DNS CNAME record deleted for: ' + portId + ' to ' + hostName)\n\t\tdnsUpdate.delete(hostName.split('.')[0])\n\t\tdnsResponse = dns.query.tcp(dnsUpdate, nsServer )\n\t\tlogging.info('DNS A record deleted for: ' + hostName)\n\t\tdnsUpdate = dns.update.Update(revZone, keyring=keyRing)\n dnsUpdate.delete(ipAddr.split('.')[3])\n\t\tdnsResponse = dns.query.tcp(dnsUpdate, nsServer )\n\t\tlogging.info('DNS PTR record deleted for: ' + hostName)\n\t\treturn hostName", "def _do_upsert(self, conn, item, spider):\n id = self._get_id(item)\n now = datetime.utcnow().replace(microsecond=0).isoformat(' ')\n\n conn.execute(\"\"\"SELECT EXISTS(\n SELECT 1 FROM products WHERE id = %s\n )\"\"\", (id, ))\n ret = conn.fetchone()[0]\n\n if ret:\n conn.execute(\"\"\"\n UPDATE products\n SET url=%s, title=%s, picture=%s, price=%s, brand=%s, store=%s, id_store=%s, updated=%s, tag1=%s, tag2=%s, tag3=%s, tag4=%s, tag5=%s\n WHERE id=%s\n \"\"\", (item['url'], item['title'], item['picture'], item['price'], item['brand'], item['store'], item['id_store'], now, item['tag1'], item['tag2'] , item['tag3'], item['tag4'], item['tag5'], id))\n spider.log(\"Item updated in db: %s %r\" % (id, item))\n else:\n conn.execute(\"\"\"\n INSERT INTO products (id, url, title, picture, price, brand, store, id_store, updated, tag1, tag2, tag3, tag4, tag5)\n VALUES (%s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s)\n \"\"\", (id, item['url'], item['title'], item['picture'], item['price'], item['brand'], item['store'], item['id_store'], now, item['tag1'], item['tag2'] , item['tag3'], item['tag4'], item['tag5']))\n spider.log(\"Item stored in db: %s %r\" % (id, item))", "def create_record(\n cls, dump, model, pid_provider, legacy_id_key=\"legacy_recid\"\n ):\n import ipdb\n\n ipdb.set_trace()\n\n if legacy_id_key is None:\n legacy_id_key = \"pid\"\n try:\n with db.session.begin_nested():\n record_uuid = uuid.uuid4()\n provider = pid_provider.create(\n object_type=\"rec\",\n object_uuid=record_uuid,\n )\n dump[\"pid\"] = provider.pid.pid_value\n record = model.create(dump, record_uuid)\n record.commit()\n db.session.commit()\n return record\n except IlsValidationError as e:\n click.secho(\"VALIDATION ERROR\", fg=\"blue\")\n click.secho(\n \"RECID {0} did not pass validation. ERROR: \\n {1}\".format(\n dump[legacy_id_key],\n [\n \"{0}: {1}\".format(\n error.res[\"field\"], error.res[\"message\"]\n )\n for error in e.errors\n ],\n ).join(\"\\n\"),\n fg=\"blue\",\n )\n click.secho(e.original_exception.message, fg=\"blue\")\n db.session.rollback()\n raise e", "def save_ehr_records(self, ehr_records, patient_record, skip_existing_duplicated=False):\n self._check_index_service()\n drf = self._get_drivers_factory(self.ehr_repository)\n with drf.get_driver() as driver:\n for r in ehr_records:\n # calculate and set the structure ID for the given record\n self._set_structure_id(r)\n r.bind_to_patient(patient_record)\n if not r.is_persistent:\n r.increase_version()\n encoded_records = [driver.encode_record(r) for r in ehr_records]\n try:\n saved, errors = driver.add_records(encoded_records, skip_existing_duplicated)\n except Exception, exc:\n for ehr in ehr_records:\n self.index_service.check_structure_counter(ehr.structure_id)\n raise exc\n errors = [driver.decode_record(e) for e in errors]\n saved_struct_counter = Counter()\n for rec in ehr_records:\n if rec.record_id in saved:\n saved_struct_counter[rec.structure_id] += 1\n error_struct_counter = set([rec.record_id for rec in errors])\n for struct, counter in saved_struct_counter.iteritems():\n self.index_service.increase_structure_counter(struct, counter)\n for struct in error_struct_counter:\n self.index_service.check_structure_counter(struct)\n saved_ehr_records = [ehr for ehr in ehr_records if ehr.record_id in saved]\n patient_record = self._add_ehr_records(patient_record, saved_ehr_records)\n return saved_ehr_records, patient_record, errors", "def create(self, name, check_duplicate=True, **kwargs):\n return super().create(name, check_duplicate=check_duplicate, **kwargs)", "def test_update_domain_with_a_record(self):\n a_record = [{'type': 'A',\n 'name': 'ftp.example.com',\n 'data': '192.0.2.8',\n 'ttl': 3600}]\n self.test_update(updateRecords=a_record)", "def save_data(self, record):\n self.dbm.addRecord(record)", "def test_same_ip(self):\n self.create_ptr(\n ip_str='128.193.0.2', ip_type='4', fqdn='foo1.oregonstate.edu')\n\n with self.assertRaises(ValidationError):\n self.create_ptr(\n ip_str='128.193.0.2', ip_type='4', fqdn='foo2.oregonstate.edu')", "def file_record(db, user_name, original_file_name, target_user=None):\n\n record = db.get_file_record(user_name)\n\n if record is None:\n db.create_file_record(user_name, target_user, original_file_name)\n else:\n db.update_file_record(user_name, target_user, original_file_name)", "def test_create_record(self):\n pass", "def setRecord(self,record):\n idLower = record.getId().lower()\n type = record.name\n typeIds = self.indexed[type]\n if idLower in typeIds:\n oldRecord = typeIds[idLower]\n index = self.records.index(oldRecord)\n self.records[index] = record\n else:\n self.records.append(record)\n typeIds[idLower] = record", "def _add_record(self, datetime_, hash_):\n assert isinstance(datetime_, datetime)\n assert isinstance(hash_, str)\n record = {'datetime': datetime_, 'hash': hash_, 'artifacts': self.artifacts}\n self.logger.debug(f'Adding record: {record}')\n self.db_collection.update_one(self.query, {'$addToSet': {'records': record}})", "def save(self, **kwargs):\n # use request ip if it's not given\n if not self.ip:\n self.ip = self.request_ip\n # map all ipv4 addresses to ipv6\n # (for consistency with node who systematically does it)\n if ip_address(self.ip).version == 4:\n self.ip = \"::ffff:\" + self.ip\n \n super(Hit, self).save()", "async def insert(self, record, collection: str):\n db_record = await self.database[collection].insert_one(record.dict(exclude={'id'})) \n return record" ]
[ "0.72148836", "0.66041684", "0.62076926", "0.6207673", "0.5900262", "0.58394516", "0.58283144", "0.57992715", "0.57729447", "0.5762532", "0.57310665", "0.5682131", "0.5672659", "0.5666608", "0.5655603", "0.5637485", "0.56220573", "0.56179416", "0.560817", "0.56004244", "0.5599704", "0.5577504", "0.5555616", "0.5548527", "0.5533867", "0.55329686", "0.546559", "0.54505575", "0.5434616", "0.5421104", "0.54064876", "0.5398219", "0.5370938", "0.5336876", "0.5329138", "0.53115386", "0.53113335", "0.5311161", "0.5310964", "0.53011936", "0.52785456", "0.52725565", "0.5262811", "0.5262434", "0.52547103", "0.5249709", "0.52436966", "0.5232702", "0.52305114", "0.52238333", "0.52081287", "0.52057594", "0.5198249", "0.5187979", "0.51732004", "0.5141102", "0.51282907", "0.5127616", "0.5124287", "0.5123466", "0.5119206", "0.510896", "0.5107917", "0.5100139", "0.5090782", "0.5089584", "0.5089102", "0.50875515", "0.50816804", "0.5079255", "0.50637794", "0.5061306", "0.5059351", "0.50519973", "0.50514936", "0.5045781", "0.5042535", "0.50424814", "0.50067747", "0.5005403", "0.50038296", "0.50023335", "0.49972847", "0.49915975", "0.4989929", "0.49789083", "0.4975163", "0.49688995", "0.49667972", "0.49662602", "0.49615964", "0.4959126", "0.49578577", "0.49521664", "0.4952029", "0.4948242", "0.494483", "0.49412435", "0.49381337", "0.49357113" ]
0.7590886
0
Terminate an ec2 instance
def terminateInstance(region,zone,instance_id): try: ec2 = boto.ec2.connect_to_region(region+'-'+zone) ec2.terminate_instances(instance_ids=[instance_id]) return True except Exception as e: logError(e) return False
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def ec2_stop(resource, metadata):\n instances = resource.instances.filter(\n Filters=[{'Name': 'instance-state-name', 'Values': ['running']},\n {'Name': 'tag:Name', 'Values': [metadata['fqdn']]}, ])\n\n for instance in instances:\n print(\"Terminating vm id {0} name {1}\".format(instance.id, instance.tags[0]['Value']))\n # resource.instances.filter(InstanceIds=[instance.id]).stop()\n resource.instances.filter(InstanceIds=[instance.id]).terminate()", "def terminate_instance(instance_id):\n # Instantiate the service resource object\n ec2_resource = session.resource('ec2', region_name=region)\n try:\n # Terminate an instance\n response = ec2_resource.Instance(instance_id).terminate(DryRun=False)\n print(response)\n print(\"\\nSuccessfully terminating instance: \", instance_id)\n except botocore.exceptions.ClientError as e:\n if e.response['Error']['Code'] == \"InvalidInstanceID.Malformed\":\n print(\"Error: Invalid instance id!!\")\n else:\n raise\n return", "def terminate_instance(self):\n # connect to ec2\n try:\n ec2_region = [r for r in boto.ec2.regions() if r.name == self._region][0]\n except indexerror:\n print >> sys.stderr, 'unknown region: %s' % self._region\n exit(2)\n ec2_connection = ec2_region.connect()\n\n #import code; code.interact(local=locals())\n instances = reduce(list.__add__, [reservation.instances for reservation in ec2_connection.get_all_instances()])\n name_matches = [i for i in instances\n if i.tags.get('Name', None) == self._instance_name and i.state == 'running']\n\n if (not name_matches):\n raise ValueError('No instance found with name %s' % self._instance_name)\n elif len(name_matches) > 1:\n raise ValueError('Multiple instances found with name %s' % self._instance_name)\n\n instance = name_matches[0]\n\n ec2_connection.terminate_instances(instance_ids=[instance.id])", "def stop_instance():\n send_line('stop instance')\n os.system(f'gcloud compute instances stop {os.uname()[1]} --zone us-east1-b')", "def terminate(filter=\".*\"):\n list_instances,list_headers = ec2list(filter=filter)\n if not list_instances:\n print(\"No instance matched the filter\")\n sys.exit(1)\n title = \"Pick the instances to terminate\"\n options = [ '{} ---- {} ---- {} ---- {}'.format(\n x[\"name\"],\n x[\"privateip\"],\n x[\"id\"],\n x[\"launchtime\"],\n x[\"state\"]) for x in list_instances ]\n\n list_selected = pick(options, title, multiselect=True, default_index=len(options)-1)\n del(options[:-1])\n list_ips = []\n if not list_selected:\n print(\"No host selected, exiting\")\n return\n list_ids = []\n for option,index in list_selected:\n list_ids.append(list_instances[index]['id'])\n print(\"Terminating instances {}\".format(list_ids))\n boto3.client(\"ec2\").terminate_instances(InstanceIds=list_ids)", "def terminate_ow_instance(ow, ow_instance_id):\n log.info(\"terminate_ow_instance( %s )\", ow_instance_id)\n try:\n ow.stop_instance(InstanceId=ow_instance_id)\n except Exception, e:\n print(e)\n log.info(e)\n sys.exit()\n while True:\n data = ow.describe_instances(InstanceIds=[ow_instance_id])['Instances']\n raw = json.dumps(data)\n ow_instance_json = json.loads(raw)\n print(ow_instance_json[0]['InstanceId'], ow_instance_json[0]['Status'])\n log.info(\"%s %s\", ow_instance_json[0]['InstanceId'],\n ow_instance_json[0]['Status'])\n if ow_instance_json[0]['Status'] == \"stopped\":\n print(ow_instance_json[0]['InstanceId'],\n ow_instance_json[0]['Status'])\n log.info(\"%s %s\", ow_instance_json[0]['InstanceId'],\n ow_instance_json[0]['Status'])\n response = ow.delete_instance(InstanceId=ow_instance_id)\n print(response)\n log.info(\"Delete instance = %s\", response)\n break\n else:\n time.sleep(60)\n continue", "def stop_instance(instance_id):\n # Instantiate the service resource object\n ec2_resource = session.resource('ec2', region_name=region)\n try:\n # Stop an instance\n response = ec2_resource.Instance(instance_id).stop(DryRun=False)\n print(response)\n print(\"\\nSuccessfully stopping instance: \", instance_id)\n except botocore.exceptions.ClientError as e:\n if e.response['Error']['Code'] == \"InvalidInstanceID.Malformed\":\n print(\"Error: Invalid instance id!!\")\n else:\n raise\n return", "def terminate_instance_in_asg(instance_id):\n if not app_config['DRY_RUN']:\n logger.info('Terminating ec2 instance in ASG {}...'.format(instance_id))\n try:\n response = client.terminate_instance_in_auto_scaling_group(\n InstanceId=instance_id,\n ShouldDecrementDesiredCapacity=True\n )\n if response['ResponseMetadata']['HTTPStatusCode'] == requests.codes.ok:\n logger.info('Termination signal for instance is successfully sent.')\n else:\n logger.info('Termination signal for instance has failed. Response code was {}. Exiting.'.format(response['ResponseMetadata']['HTTPStatusCode']))\n raise Exception('Termination of instance failed. Response code was {}. Exiting.'.format(response['ResponseMetadata']['HTTPStatusCode']))\n\n except client.exceptions.ClientError as e:\n if 'DryRunOperation' not in str(e):\n raise", "def stop_ec2_instance(client, instance_id, hibernate=False):\n\n response = client.stop_instances(\n InstanceIds=[instance_id],\n )\n return response", "def stop_instance(self, args):\n message = MessageClass()\n region = args[\"Region\"]\n instanceid = args[\"Instance-ID\"].replace(\",\", \" \").split()\n\n # Boto3 resource creation by providing the access_id and access_secret\n ec2 = boto3.resource(service_name='ec2', region_name=region, api_version=None, use_ssl=True,\n verify=None, endpoint_url=None, aws_access_key_id=self.aws_access_key,\n aws_secret_access_key=self.aws_secret_token, aws_session_token=None,\n config=None)\n ec2.instances.filter(InstanceIds=instanceid).stop()\n\n message.message_text = \"Instance Stopped\"\n return message.to_json()", "def stop_instance(ec2_client, instances):\n # get a list of instance ids\n instances_ids = [i.instance_id for i in instances]\n \n # start the instances\n ec2_client.stop_instances(InstanceIds=instances_ids)\n \n # wait till instance is stopped\n waiter = ec2_client.get_waiter(\"instance_stopped\")\n waiter.wait(InstanceIds=instances_ids)\n print(\"\\n===EC2 instance has stopped!\")", "def stop_instance(tcserver_dir, instance_name=\"instance1\"):\n print(\"Stopping a tcServer instance...\")\n\n pushdir(tcserver_dir)\n subprocess.call([\"./tcruntime-ctl.sh\", instance_name, \"stop\"])\n popdir()", "def main(self, cmd_args):\n reservations = self._ec2_client.describe_instances(Filters=[\n {'Name': \"instance-id\", 'Values': [cmd_args.id]},\n {'Name': \"tag:Name\", 'Values': [cmd_args.name]}\n ])['Reservations']\n if not reservations:\n halt.err(\"No instances matching given parameters found.\")\n instance_state = reservations[0]['Instances'][0]['State']['Name']\n if instance_state in (\"shutting-down\", \"terminated\"):\n halt.err(\"Instance has already been terminated.\")\n\n addresses = self._ec2_client.describe_addresses(Filters=[\n {'Name': \"instance-id\", 'Values': [cmd_args.id]}\n ])['Addresses']\n print(\"\")\n if addresses:\n self._disassociate_addresses(addresses, cmd_args.save_ips)\n elif cmd_args.save_ips is True:\n print(\"No elastic IPs associated with instance.\")\n\n self._ec2_client.terminate_instances(InstanceIds=[cmd_args.id])\n print(\"Instance termination process started.\")", "def stop_instance(self):\n instance_id = self._choose_among_running_instances()\n\n # Cancel\n if not instance_id:\n print 'Operation cancelled'\n return\n\n print '# Stopping the instance \"%s\"' % instance_id\n self.compute.stop_instance(instance_id)\n print 'The instance has been stopped'", "def stop(self, aws_tags: List[Dict]) -> None:\n for instance_arn in self.tag_api.get_resources(\"ec2:instance\", aws_tags):\n instance_id = instance_arn.split(\"/\")[-1]\n try:\n if not self.asg.describe_auto_scaling_instances(\n InstanceIds=[instance_id]\n )[\"AutoScalingInstances\"]:\n self.ec2.stop_instances(InstanceIds=[instance_id])\n print(f\"Stop instances {instance_id}\")\n except ClientError as exc:\n ec2_exception(\"instance\", instance_id, exc)", "def asg_restart(session, hostname, timeout, callback=None):\n client = session.client('ec2')\n resource = session.resource('ec2')\n response = client.describe_instances(Filters=[{\"Name\":\"tag:Name\", \"Values\":[hostname]},\n {\"Name\":\"instance-state-name\", \"Values\":[\"running\"]}])\n\n for reservation in response['Reservations']:\n for instance in reservation['Instances']:\n id = instance['InstanceId']\n print(\"Terminating {} instance {}\".format(hostname, id))\n resource.Instance(id).terminate()\n print(\"Sleeping for {} minutes\".format(timeout/60.0))\n time.sleep(timeout)\n\n if callback is not None:\n callback()", "def shutdown_vpc ( ec2_conn, vpc, exceptions = [] ) :\n instances = ec2_conn.get_only_instances( filters = { \"vpc-id\" : vpc.id } )\n exceptions = [ exception.upper( ) for exception in exceptions ]\n for instance in instances :\n if instance.state == 'running' :\n is_exception = False\n instance_name = instance.tags[ 'Name' ].upper( )\n for exception in exceptions :\n if instance_name.find( exception ) != -1 :\n is_exception = True\n break\n\n if not is_exception :\n print \" Stopping instance \" + instance_name\n instance.stop( )\n else :\n print \" Exception found, not stopping \" + instance_name\n\n else :\n print \"WARNING: found instance in non-running state.\"\n print \" name: \" + instance.tags[ 'Name' ]\n print \" id: \" + instance.id\n print \" state: \" + instance.state", "async def stop(self, now=False):\n alive = await self.remote_signal(15)\n\n try:\n self.stop_ec2_instance(self.ec2_instance_id) # function that uses boto3 to stop an instance based on instance_id\n except Exception as e:\n self.log.error(\"Error in terminating instance\") # easy to save the instance id when you start the instance\n self.log.error(str(e)) # this will print the error on our JupyterHub process' output\n\n self.clear_state()", "def terminate(\n instance_id=None,\n name=None,\n region=None,\n key=None,\n keyid=None,\n profile=None,\n filters=None,\n):\n instances = find_instances(\n instance_id=instance_id,\n name=name,\n region=region,\n key=key,\n keyid=keyid,\n profile=profile,\n return_objs=True,\n filters=filters,\n )\n if instances in (False, None, []):\n return instances\n\n if len(instances) == 1:\n instances[0].terminate()\n return True\n else:\n log.warning(\"Refusing to terminate multiple instances at once\")\n return False", "def destroy_VM_instance(settings, ip_address):\n print(\"Terminating VM instance %s\" % ip_address)\n if _is_instance_running(settings, ip_address, ip_given=True):\n try:\n if not confirm_teardown():\n return\n instance = get_this_instance(settings, ip_address,\n ip_given=True)\n instance_id = instance.id\n ip_address = get_instance_ip(instance)\n delete_chef_node_client(settings, instance_id, ip_address)\n connection = create_cloud_connection(settings)\n connection.terminate_instances([instance_id])\n _wait_for_instance_to_terminate(settings, ip_address)\n except Exception:\n traceback.print_exc(file=sys.stdout)\n raise\n else:\n print \"VM instance with IP %s doesn't exist\" % ip_address", "def TerminateMachine(instance_id, status):\n # Terminate the EC2 instance.\n ec2 = ec2_manager.EC2Manager()\n\n if TERMINATE_INSTANCES:\n logging.info('Terminating the machine with instance id \"%s\".', instance_id)\n ec2.TerminateInstances([instance_id])\n else:\n logging.info('Stopping the machine with instance id \"%s\".', instance_id)\n ec2.StopInstances([instance_id])\n\n # Update the corresponding client machine model.\n client_machine.SetMachineStatus(instance_id, status)", "def stop_instance(InstanceId=None, Force=None):\n pass", "def destory_instance(InstanceIds):\n client = boto3.client('ec2')\n # TODO: delete an EC2 instance\n # Wait for the instance to enter the running state\n # Reload the instance attributes\n\n try:\n response = client.terminate_instances(\n InstanceIds=InstanceIds,\n DryRun=False\n )\n except ClientError as e:\n print(e)\n\n return", "def teardown(self):\n # Only terminate if instance is running\n if self.instance:\n instance_status = aws.check_instance_status(self.config, self.instance.id)\n if instance_status == \"running\":\n aws.terminate_ec2_instance(self.config, self.instance.id)\n super().teardown()", "def terminate_instances(self, instance_ids):\n response = instance.terminate_instances(self.url, self.verb,\n self.headers, self.version,\n instance_ids)\n if response is not None :\n res = TerminateInstancesResponse.TerminateInstancesResponse()\n parseString(str(response.text), res)\n return res\n else :\n return None", "def delete_instance(InstanceId=None, DeleteElasticIp=None, DeleteVolumes=None):\n pass", "def test_ec2_down_no_instance(runner, ec2):\n result = runner.invoke(cli.cli, ['ec2', 'down', '-i', 'dummy'])\n assert result.exit_code == 2", "def terminate_instances(module, ecs, instance_ids, instance_tags):\n\n changed = False\n instance_dict_array = []\n\n if not isinstance(instance_ids, list) or len(instance_ids) < 1:\n module.fail_json(msg='instance_ids should be a list of instances, aborting')\n filters = {}\n if instance_tags:\n for key, value in instance_tags.items():\n filters[\"tag:\" + key] = value\n\n terminated_instance_ids = []\n region, connect_args = get_acs_connection_info(module)\n for inst in ecs.get_all_instances(instance_ids=instance_ids, filters=filters):\n if inst.state == 'absent':\n terminated_instance_ids.append(inst.id)\n instance_dict_array.append(get_instance_info(inst))\n try:\n inst.terminate(**connect_args)\n except ECSResponseError as e:\n module.fail_json(msg='Unable to terminate instance {0}, error: {1}'.format(inst.id, e))\n changed = True\n\n return (changed, instance_dict_array, terminated_instance_ids)", "def terminate(self, hostname):\n # unique names\n matches = list(filter(lambda x: x.name == hostname, self.instances))\n\n if len(matches) == 0:\n # already terminated\n return\n elif len(matches) == 1:\n instance = matches[0]\n # terminate it\n self.names.append(instance.name)\n self.instances.remove(instance)\n # actual deletion from openstack\n status = self.nova.servers.get(instance.id).status\n\n while status == 'BUILD':\n time.sleep(5)\n status = self.nova.servers.get(instance.id).status\n print(\"Waiting for VM to finish BUILD before terminating.\")\n instance.delete()\n print(\"Worker VM \" + hostname + \" deleted.\")\n else:\n # inconsistency in the system\n raise ValueError('More than one of same name in self.instances')", "def restart_ec2_instance(client, instance_id):\n\n response = client.reboot_instances(\n InstanceIds=[instance_id],\n )\n return response", "def Stop_Instances(ids=Get_Running_Instances()):\n ec2 = boto3.client('ec2')\n #call the features client from the boto3 library\n if not ids:\n #if the list of Ec2 instances returned is empty.\n print(\"No Instance in the state Running or pending\")\n else:\n ec2.stop_instances(InstanceIds=ids)\n #stop the instances using their id\n ec2.get_waiter('instance_stopped').wait(InstanceIds=ids)\n #wait for the state of the instances to change to stopped.\n print('instance {} was shutdown'.format(ids))", "def TerminateExpiredMachine(instance_id):\n TerminateMachine(instance_id, enum.MACHINE_STATUS.EXPIRED)", "def send_termination_command(\n instance: Dict[str, str], groups: Dict[str, Dict[str, str]], topic_arn: str, service_role_arn: str\n) -> None:\n try:\n ssm.send_command(\n InstanceIds=[\n instance[\"InstanceId\"],\n ],\n DocumentName=SSM_TERMINATION_DOCUMENT_NAME,\n DocumentVersion=\"$LATEST\",\n Comment=\"Instance is about to be terminated\",\n Parameters={\"commands\": [groups[instance[\"AutoScalingGroupName\"]][\"stop_command\"]]},\n NotificationConfig={\n \"NotificationArn\": topic_arn,\n \"NotificationEvents\": [\"All\"],\n \"NotificationType\": \"Invocation\",\n },\n ServiceRoleArn=service_role_arn,\n )\n except ssm.exceptions.InvalidInstanceId:\n logger = getLogger()\n logger.info(\"Instance is not registered with SSM, skipping\")", "def _stop(self, instance):\n try:\n _, err = utils.execute('sudo', 'vzctl', 'stop', instance['id'])\n if err:\n LOG.error(err)\n except ProcessExecutionError:\n raise exception.Error('Failed to stop %s' % instance['id'])\n\n # Update instance state\n try:\n db.instance_set_state(context.get_admin_context(),\n instance['id'],\n power_state.SHUTDOWN)\n except exception.DBError as err:\n LOG.error(err)\n raise exception.Error('Failed to update db for %s' % instance['id'])\n \n return True", "def terminate_instances(self, instance_ids=None):\r\n params = {}\r\n if instance_ids:\r\n self.build_list_params(params, instance_ids, 'InstanceId')\r\n return self.get_list('TerminateInstances', params,\r\n [('item', Instance)], verb='POST')", "def spin_ec2(self):\n #message = event['message']\n init_script = \"\"\"#!/bin/bash\necho \"sleep 50\" >> /etc/rc.local\necho \"shutdown -H +5 >> /etc/rc.local\"\nsleep 50\nshutdown -H +5\"\"\"\n\n print ('Running script:')\n print (init_script)\n\n instance = EC2.run_instances(\n ImageId=AMI,\n InstanceType=INSTANCE_TYPE,\n MinCount=1, # required by boto, even though it's kinda obvious.\n MaxCount=1,\n InstanceInitiatedShutdownBehavior='stop', # make shutdown in script terminate ec2\n UserData=init_script # file to run on instance init.\n \n )\n\n print (\"New instance created.\")\n instance_id = instance['Instances'][0]['InstanceId']\n print (instance_id)\n print (instance)\n EC2.create_tags(Resources=[instance_id], Tags=[{\"Key\" : \"Name\", 'Value': 'test01',},],)", "def RunStop(self, zone=None):\n if zone is None:\n zone = self.zone\n try:\n self.run_instance_params['image'] = self.tester.ec2.get_emi(emi=self.args.emi,\n root_device_type=\"ebs\",\n basic_image=True)\n except Exception, e:\n self.RegisterImage()\n self.run_instance_params['image'] = self.tester.ec2.get_emi(emi=self.args.emi,\n root_device_type=\"ebs\",\n basic_image=True)\n if not self.volume_1:\n self.volume_1 = self.tester.ec2.create_volume(zone=self.zone, size=2)\n if not self.volume_2:\n self.volume_2 = self.tester.ec2.create_volume(zone=self.zone, size=1)\n\n if self.reservation:\n self.tester.ec2.terminate_instances(self.reservation)\n self.reservation = self.tester.ec2.run_image(**self.run_instance_params)\n ## Ensure that we can attach and use a volume\n for instance in self.reservation.instances:\n instance.attach_volume(self.volume_1, self.test_volume_1_path)\n instance.attach_volume(self.volume_2, self.test_volume_2_path)\n self.tester.ec2.stop_instances(self.reservation)\n for instance in self.reservation.instances:\n if instance.ip_address or instance.private_ip_address:\n raise Exception(\"Instance had a public \" + str(instance.ip_address) + \" private \" + str(instance.private_ip_address) )\n if instance.block_device_mapping[self.test_volume_1_path] is None:\n raise Exception(\"DBM path is invalid\")\n if self.volume_1.id != instance.block_device_mapping[self.test_volume_1_path].volume_id:\n raise Exception(\"Volume id does not match\")", "def auto_terminate(cls):\n # Get the ids of the ec2 not_terminated instances\n not_terminated_ids = ReaderNotTerminated.ids()\n if len(not_terminated_ids) > 0:\n # Print the instances found\n print(\"Theses instances are not terminated, so we will terminate them : \")\n for not_terminated_id in not_terminated_ids:\n print(not_terminated_id)\n # Terminate the instances\n Terminate.execute_multiple(ReaderNotTerminated.ids())\n # Verify the termination\n if(len(ReaderNotTerminated.ids()) == 0):\n print(\"Termination order send to everybody : success.\")\n else:\n print(\"Termination order send to everybody : failure.\")\n else:\n print(\"No instances activated : Nothing to do.\")", "def terminate_instances(self, ids):\n self.conn.terminate_instances(instance_ids=ids)", "def test_ungraceful_shutdown_aws(self, resources, instances, aws_obj, force):\n aws_obj.stop_ec2_instances(instances=instances, wait=True, force=force)\n aws_obj.start_ec2_instances(instances=instances, wait=True)\n self.validate_cluster(resources, instances)", "def terminate_instances(self, props):\n return self._vm_async_apply(props, 'delete')", "def destroy_node(self, node):\n params = {'Action': 'TerminateInstances'}\n params.update(self._pathlist('InstanceId', [node.id]))\n res = self.connection.request(self.path, params=params).object\n return self._get_terminate_boolean(res)", "def test_ec2_down(runner, ec2):\n result = runner.invoke(cli.cli, ['ec2', 'down', '-i', ec2['server'].id])\n assert result.exit_code == 0", "def destroy(self, instance):\n # TODO(imsplitbit): This needs to check the state of the VE\n # and if it isn't stopped it needs to stop it first. This is\n # an openvz limitation that needs to be worked around.\n # For now we will assume it needs to be stopped prior to destroying it.\n self._stop(instance)\n\n try:\n _, err = utils.execute('sudo', 'vzctl', 'destroy', instance['id'])\n if err:\n LOG.error(err)\n except ProcessExecutionError:\n raise exception.Error('Error destroying %d' % instance['id'])", "def reboot_instance(instance_id):\n # Instantiate the service resource object\n ec2_resource = session.resource('ec2', region_name=region)\n try:\n # Reboot an instance\n response = ec2_resource.Instance(instance_id).reboot(DryRun=False)\n print(response)\n print(\"\\nSuccessfully rebooting instance: \", instance_id)\n except botocore.exceptions.ClientError as e:\n if e.response['Error']['Code'] == \"InvalidInstanceID.Malformed\":\n print(\"Error: Invalid instance id!!\")\n else:\n raise\n return", "def destroy():\n # instance first\n old_vm = _existing.vm\n _destroy_resource('vm')\n if not dry and old_vm is not None:\n # Wait for instance to be fully terminated before carrying on or we will have\n # dependency issues.\n print('Waiting for instance to be terminated before deleting other resources...')\n old_vm.wait_until_terminated()\n time.sleep(1) # One would think that wait for terminated should be enough...\n\n _destroy_resource('disk')\n\n # detach before destroy\n _detach_vpc_igw(vpc=_existing.vpc, igw=_existing.igw)\n _destroy_resource('igw')\n\n # sg and sub before vpc\n _destroy_resource('sg')\n _destroy_resource('sub')\n\n _destroy_resource('vpc')", "def terminate():\n with open (f\"{CLUSTER_FOLDER}/uuid\", \"r\") as f:\n uuid = f.read().strip()\n\n start_time = time.time() \n cluster = delete_cluster(os.environ[\"T2_URL\"], os.environ[\"T2_TOKEN\"], uuid) \n if(not cluster):\n log(\"Failed to terminate cluster via API.\")\n exit(1)\n\n log(f\"Started termination of cluster '{cluster['id']}'. Waiting for cluster to be terminated...\")\n cluster = get_cluster(os.environ[\"T2_URL\"], os.environ[\"T2_TOKEN\"], cluster['id'])\n while(TIMEOUT_SECONDS > (time.time()-start_time) and cluster['status']['state'] != 'TERMINATED' and not cluster['status']['failed']):\n time.sleep(5)\n cluster = get_cluster(os.environ[\"T2_URL\"], os.environ[\"T2_TOKEN\"], cluster['id'])\n\n if(cluster['status']['failed']):\n log(\"Cluster termination failed.\")\n exit(1)\n\n if(TIMEOUT_SECONDS <= (time.time()-start_time)):\n log(\"Timeout while launching cluster.\")\n exit(1)\n\n log(f\"Cluster '{cluster['id']}' is terminated.\")", "def terminate():\n sys.exit()", "def TerminateFailedMachine(instance_id):\n TerminateMachine(instance_id, enum.MACHINE_STATUS.FAILED)", "def stop(instance):\n if instance.state == STOPPED:\n return\n\n Queue.objects.add(function=\"terminate\", instance=instance)", "def stop(self, name=None, **kwargs):\n result = None\n compute_service = self._get_compute_service()\n _operation = None\n if name is None:\n return\n try:\n\n project_id = kwargs.pop('project_id', self.auth[\"project_id\"])\n zone = kwargs.pop('zone', self.default[\"zone\"])\n\n _operation = compute_service.instances().stop(\n project=project_id,\n zone=zone,\n instance=name).execute()\n\n self._wait_for_operation(compute_service,\n _operation,\n project_id,\n zone,\n name)\n\n # Get the instance details to update DB.\n result = self.__info(name, displayType=\"vm\")\n\n except Exception as se:\n print(se)\n if type(se) == HttpError:\n Console.error(\n f'Unable to stop instance {name}. Reason: {se._get_reason()}')\n else:\n Console.error(f'Unable to stop instance {name}.')\n\n return result", "def power_off(self, ec2_session, ami_id):\n instance = self.aws_api.get_instance_by_id(ec2_session, ami_id)\n instance.stop()\n self.instance_waiter.wait(instance, self.instance_waiter.STOPPED)\n return True", "def power_off(self, instance, timeout=0, retry_interval=0):\n azure_name = self._get_omni_name_from_instance(instance)\n utils.stop_instance(self.compute_client, drv_conf.resource_group,\n azure_name)", "def TerminateFinishedMachine(instance_id):\n TerminateMachine(instance_id, enum.MACHINE_STATUS.TERMINATED)", "async def terminate(self, restart=False) -> None:\n pass", "def run_instance():\n ami_id = \"ami-04876f29fd3a5e8ba\" # AMI Id\n instance_type = \"t2.micro\" # Instance Type\n tag_specs = [\n {\n 'ResourceType': 'instance',\n 'Tags': [\n {\n 'Key': 'Name',\n 'Value': 'BoB10@ProductDev.sdk.8084'\n }\n ]\n }\n ]\n\n # Instantiate the service resource object\n ec2_resource = session.resource('ec2', region_name=region)\n # Run an instance\n\n instances = ec2_resource.create_instances(ImageId=ami_id, InstanceType=instance_type,\n MaxCount=1, MinCount=1, KeyName='BoB10@ProductDev.8084',\n TagSpecifications=tag_specs, \n SecurityGroupIds=['sg-06b757b4bb272d98f'],\n UserData=assemble_userdata().as_string())\n Instance_id = instances[0].id\n print('\\nInstance Id: ' + Instance_id)\n print('Image Id: ' + instances[0].image_id)\n print('Instance Type: ' + instances[0].instance_type)\n print('State: ' + instances[0].state['Name'])\n return Instance_id", "def reboot_instance(InstanceId=None):\n pass", "def stop(self, ids: list) -> str:\n # If no ids are passed raise Nothing to do\n if 'None' in ids:\n raise EC2Error('Nothing to do. Need IDS! Arrgh!!!')\n\n try:\n status = self.ec2.instances.filter(InstanceIds=ids).stop()\n return status\n except IOError as e:\n raise EC2Error('Error stopping EC2 Instances {}'.format(e))", "def terminate_preemptible_instances(self, context, instances):\n # NOTE(aloga): we should not delete them directly, but probably send\n # them a signal so that the user is able to save her work.\n elevated = context.elevated()\n for instance in instances:\n LOG.info(_LI(\"Deleting %(uuid)s\") % {\"uuid\": instance[\"uuid\"]})\n instance = self.compute_api.get(elevated,\n instance[\"uuid\"],\n want_objects=True)\n self.compute_api.delete(elevated, instance)", "def launch_instance(ec2, ami, itype, kp_name, sec_group_name):\n\n\n instance = ec2.run_instances(\n ami,\n key_name=kp_name,\n instance_type=itype,\n security_groups=[sec_group_name]\n ).instances[0]\n\n while instance.state != 'running':\n sys.stdout.write('Waiting for instance: {}, at DNS: {} to start\\n'.format(instance.id,\n str(instance.dns_name).split('.')[0]))\n time.sleep(5)\n instance.update()\n\n sys.stdout.write('\\nSuccess! EC2 Instance Launched \\nInstance_Type: {} in {}'.format(instance.instance_type,\n instance.placement))\n return instance", "def lambda_handler(event, context):\n Stop_Instances()", "def terminate(self, ids: list, exclude: list = None) -> str:\n if exclude is None:\n try:\n\n status = self.ec2.instances.filter(InstanceIds=ids).terminate()\n return \"Terminated status: {0}, instances:\\n {1}\".format(ids, status)\n except IOError as e:\n raise TerminationError('Error terminating EC2 Instances {}'.format(e))\n else:\n #filtered_ids = list(set(ids) - set(exclude))\n try:\n # status = self.ec2.instances.filter(InstanceIds=filtered_ids).terminate()\n status = self.instances = self.ec2.instances.filter(\n Filters=[\n {'Name': 'instance-state-name', 'Values': [state]}\n ]\n )\n return \"Terminated status: {0}, instances:\\n {1}\".format(filtered_ids, status)\n except IOError as e:\n raise TerminationError('Error terminating EC2 Instances {}'.format(e))", "def terminate_instance(self, instance_ids):\n instances_terminated = []\n if (len(instance_ids) > 0):\n self.__validate_instance_id(instance_ids)\n euca_conn = self.__make_connection()\n for instance_id in instance_ids:\n try:\n instance = euca_conn.terminate_instances(instance_id.encode(\"latin-1\"))\n instances_terminated.append(instance)\n except Exception, ex:\n self.euca.display_error_and_exit('%s' % ex)\n\n return instances_terminated\n else:\n return False", "def delete_instance(db_instance):\n rds = boto3.client('rds')\n rds.delete_db_instance(\n DBInstanceIdentifier=db_instance.aws_instance_identifier,\n SkipFinalSnapshot=True\n )", "def test_instance_termination_exception_sets_error(self):\n instance = self._create_fake_instance_obj()\n\n def fake_delete_instance(self, context, instance, bdms):\n raise exception.InstanceTerminationFailure(reason='')\n\n self.stub_out('nova.compute.manager.ComputeManager._delete_instance',\n fake_delete_instance)\n\n self.assertRaises(exception.InstanceTerminationFailure,\n self.compute.terminate_instance,\n self.context,\n instance, [])\n instance = db.instance_get_by_uuid(self.context, instance['uuid'])\n self.assertEqual(instance['vm_state'], vm_states.ERROR)", "def terminate(ctx):\n ctl = ctx.ctl\n jobs = ctl('list-avail', '--partition', 'main', flatten=False)\n\n for job in jobs:\n jobid = job['id']\n click.echo('Terminating {}'.format(jobid))\n ctl('terminate', '--jobid', jobid)", "def deregister_instance(InstanceId=None):\n pass", "def terminate(self):\n\t\tself.raise_exc(SystemExit)", "def kill_instance(py, accelerator, sig_name):\n acc_client = get_accelerator_client(py, accelerator)\n acc_client.kill_instance(sig_name)", "def ssh_to_ec2(instance):\n subprocess.Popen(['ssh', instance.dns_name])", "def test_ec2_up_no_instance(runner, ec2):\n result = runner.invoke(cli.cli, ['ec2', 'up', '-i', 'dummy'])\n assert result.exit_code == 2", "def update_instances_os ( ec2_conn, vpc, base_name, restart = False ) :\n instances = get_all_vpc_instances( ec2_conn, vpc )\n status = ssh_call_vpc( ec2_conn, base_name, instances, \"sudo yum -y update\", True )\n if restart and status == 0 :\n for instance in instances :\n instance.reboot( )", "def vm_stop(self, params: dict) -> Tuple[\"Status\", dict]:", "def terminate(self):\n self._stop_proc(signal.SIGTERM)", "def Stop(self, name, zone):\n project = properties.VALUES.core.project.Get(required=True)\n request = self.messages.ComputeInstancesStopRequest(\n instance=name,\n project=project,\n zone=zone\n )\n operation = self.client.instances.Stop(request)\n return self._GetComputeZoneOperationRef(operation)", "def launch_instance_nonvpc ( ec2_conn,\n ami,\n base_name,\n instance_type,\n keypair,\n security_group,\n machine_type = 'm1.small',\n user_data = None,\n wait_for_running = True ) :\n instance_r = ami.run( key_name = keypair,\n instance_type = machine_type,\n security_groups = [ security_group ],\n user_data = user_data )\n instance = instance_r.instances[ 0 ];\n aws_cmd( ec2_conn.create_tags,\n [ instance.id, { \"Name\": get_instance_name( base_name, instance_type ) } ] )\n if wait_for_running :\n running = wait_on_object_state( instance, 'running', failure_state = 'terminated' )\n if not running :\n print \"Deployment instance still not up after long period of time! Exiting...\"\n sys.exit( 3 )\n\n return instance", "def terminate(self):", "def stop_notebook_instance(NotebookInstanceName=None):\n pass", "def amazonEc2_delete(amazonEc2):\n\n\treturn amazonEc2", "def instance_terminated(instance_id, max_retry=app_config['GLOBAL_MAX_RETRY'], wait=app_config['GLOBAL_HEALTH_WAIT'],\n wait_for_stopping=app_config['INSTANCE_WAIT_FOR_STOPPING']):\n retry_count = 1\n is_instance_terminated = False\n while retry_count < max_retry:\n logger.info('Checking instance {} is terminated...'.format(instance_id))\n retry_count += 1\n response = ec2_client.describe_instances(\n InstanceIds=[instance_id]\n )\n state = response['Reservations'][0]['Instances'][0]['State']\n stop_states = ['terminated', 'stopped']\n stopping_states = ['shutting-down', 'stopping']\n\n if state['Name'] in stop_states or (wait_for_stopping and state['Name'] in stopping_states):\n logger.info('Instance {} {}!'.format(instance_id, state['Name']))\n is_instance_terminated = True\n break\n else:\n is_instance_terminated = False\n logger.info('Instance {} is {}, checking again...'.format(instance_id, state['Name']))\n\n time.sleep(wait)\n return is_instance_terminated", "def terminate(self):\n self.send_signal(signal.SIGTERM)", "def terminate(self):\n self.raise_exc(SystemExit)", "def quick_instance(self, name, image, instance_type, env_tag='dev', zone_tag='starwatts', os_tag='debian', sg_id=None,\n private=True, extra_sg_ids=None, extra_tags=None, terminate_on_shutdown=False,\n debug=False):\n # Debug setting\n if debug:\n logging.basicConfig(level=logging.DEBUG)\n\n # Preliminary tests\n try:\n ami = self.get_image(image_id=image)\n except EC2ResponseError:\n logging.error(\"The image {} could not be found. Aborting.\".format(image))\n return\n print(\"Using AMI {} : {}\".format(image, ami.name))\n if len(self.get_only_instances(filters={'tag:name': name, 'instance-state-name': 'running'})) > 0 or \\\n len(self.get_only_instances(filters={'tag:name': name, 'instance-state-name': 'stopped'})) > 0:\n logging.error(\"An instance with the same name ({}) already exists. Aborting.\".format(name))\n return\n logging.debug(\"Test passed : No instance has the same 'name' tag.\")\n if self.keypair_exists(name):\n logging.error(\"A keypair with the same name ({}) already exists. Aborting.\".format(name))\n return\n logging.debug(\"Test passed : No keypair was found with the same name.\")\n if sg_id is None:\n if self.security_group_exists(name=name):\n logging.error(\"A security group with the same name ({}) already exists. Aborting.\".format(name))\n return\n logging.debug(\"Test passed : No security group was found with the same name.\")\n\n # Tags generation\n logging.debug(\"Generating tags to apply.\")\n tags = dict(name=name, os=os_tag, env=env_tag, zone=zone_tag, privacy='true' if private else 'false')\n if extra_tags is not None:\n tags.update(extra_tags)\n print(\"Tags : {}\".format(tags))\n\n # Fetching needed security groups (bastion and zabbix)\n standard_sg = self.get_all_security_groups(groupnames=['standard'])\n if len(standard_sg) != 1:\n logging.error(\"Multiple or no security group was found for the 'bastion' search. Aborting.\")\n return\n standard_sg = standard_sg[0]\n logging.debug(\"The following security group was found for 'standard : {} {}\".format(standard_sg.id,\n standard_sg.description))\n\n # Security group creation\n if sg_id is None:\n sg = self.create_security_group(name, \"SG applied to {} VM\".format(name))\n sg_id = sg.id\n\n sg_ids = [sg_id, standard_sg.id, ]\n # Using the extra security groups if any\n if extra_sg_ids is not None:\n logging.debug(\"Extra security groups to add : {}\".format(extra_sg_ids))\n sg_ids.extend(extra_sg_ids)\n logging.debug(\"Security Groups : {}\".format(sg_ids))\n\n user_data = \"-----BEGIN OUTSCALE SECTION-----\\nprivate_only=true\\n-----END OUTSCALE SECTION-----\" if private else \"\"\n logging.debug(\"Creating keypair.\")\n kp = self.create_key_pair(key_name=name)\n fp = os.path.join(os.path.expanduser('~/.ssh'), '%s.pem' % kp.name)\n with open(fp, 'wb') as fd:\n fd.write(bytes(kp.material, \"UTF-8\"))\n logging.debug(\"Keypair written to ~/.ssh/{}.pem\".format(name))\n\n resa = self.run_instances(image_id=image, key_name=name, security_groups=sg_ids, instance_type=instance_type,\n user_data=user_data,\n instance_initiated_shutdown_behavior='terminate' if terminate_on_shutdown else 'stop')\n inst = resa.instances[0]\n logging.debug(\"Adding tags to the newly created machine.\")\n inst.add_tags(tags)\n return inst", "def terminate_instance_on_failure(self) -> pulumi.Input[bool]:\n return pulumi.get(self, \"terminate_instance_on_failure\")", "def stop(instance=\"default\"):\n global logger_ic\n logger_ic = infrasim_log.get_logger(LoggerType.ipmi_console.value, instance)\n\n try:\n file_ipmi_console_pid = \"{}/{}/.ipmi_console.pid\".\\\n format(config.infrasim_home, instance)\n with open(file_ipmi_console_pid, \"r\") as f:\n pid = f.readline().strip()\n\n os.kill(int(pid), signal.SIGTERM)\n logger_ic.info(\"SIGTERM is sent to pid: {}\".format(pid))\n os.remove(file_ipmi_console_pid)\n except IOError:\n # When pid file is missing, by e.g., node destroy,\n # find process id by instance name\n if instance == \"default\":\n process_name = \"ipmi-console start$\"\n else:\n process_name = \"ipmi-console start {}\".format(instance)\n\n ps_cmd = r\"ps ax | grep '{}' | grep Sl | awk '{{print $1}}' | head -n1\".format(process_name)\n logger_ic.warning(\"Fail to find ipmi console pid file, check by:\")\n logger_ic.warning(\"> {}\".format(ps_cmd))\n _, pid = run_command(cmd=ps_cmd)\n logger_ic.warning(\"ipmi console pid got: {}\".format(pid))\n if not pid:\n logger_ic.warning(\"ipmi console for instance {} is not running\".format(instance))\n return\n\n os.kill(int(pid), signal.SIGTERM)\n logger_ic.info(\"SIGTERM is sent to pid: {}\".format(pid))\n except Exception:\n logger_ic.warning(traceback.format_exc())\n pass", "def terminate(self):\n self._running = False", "def test_run_terminate_no_image(self):\n params = {'image_ref': ''}\n instance = self._create_fake_instance_obj(params)\n self.compute.build_and_run_instance(self.context, instance, {}, {}, {},\n [], block_device_mapping=[])\n self._assert_state({'vm_state': vm_states.ACTIVE,\n 'task_state': None})\n\n self.compute.terminate_instance(self.context, instance, [])\n instances = db.instance_get_all(self.context)\n self.assertEqual(len(instances), 0)", "def stop_vm(client, resource_group_name, vm_name, stop_mode):\n return client.stop(resource_group_name, vm_name, stop_mode)", "def terminate_volumes(db, context, instance_id):\n volume_api = volume.API()\n for bdm in db.block_device_mapping_get_all_by_instance(context,\n instance_id):\n #LOG.debug(_(\"terminating bdm %s\") % bdm)\n if bdm['volume_id'] and bdm['delete_on_termination']:\n volume_api.delete(context, bdm['volume_id'])\n db.block_device_mapping_destroy(context, bdm['id'])", "def vm_stop(vm_hostname, force=False, retire=False):\n with _get_vm(vm_hostname, allow_retired=True) as vm:\n if vm.dataset_obj['datacenter_type'] == 'aws.dct':\n vm.aws_shutdown()\n elif vm.dataset_obj['datacenter_type'] == 'kvm.dct':\n _check_defined(vm)\n\n if not vm.is_running():\n log.info('\"{}\" is already stopped.'.format(vm.fqdn))\n return\n if force:\n vm.hypervisor.stop_vm_force(vm)\n else:\n vm.shutdown()\n else:\n raise NotImplementedError(\n 'This operation is not yet supported for {}'.format(\n vm.dataset_obj['datacenter_type'])\n )\n\n if retire:\n vm.dataset_obj['state'] = 'retired'\n vm.dataset_obj.commit()\n log.info('\"{}\" is retired.'.format(vm.fqdn))\n\n log.info('\"{}\" is stopped.'.format(vm.fqdn))", "def stop(\n context,\n user=get_local_user(),\n remote=False,\n instance=None,\n stack=None,\n services=None,\n):\n command = \"stop\"\n run_command_with_services(context, user, remote, instance, stack, command, services)", "def shutdown():\n os.kill(os.getpid(), signal.SIGTERM)", "def stop():\n _with_deploy_env(['./bin/paster serve src/remix/oerpub/rhaptoslabs/production.ini --stop-daemon'])", "def launch_instance_vpc ( ec2_conn,\n ami,\n base_name,\n instance_type,\n keypair,\n subnet_id,\n security_group_id,\n machine_type = 'm3.medium',\n user_data = None,\n wait_for_running = True,\n public_ip = False,\n static_ip_address = None,\n monitor_params = None ) :\n interfaces = None\n subnet = None\n security_group_ids = None\n \n if static_ip_address is None:\n spec = boto.ec2.networkinterface.NetworkInterfaceSpecification( subnet_id = subnet_id,\n groups = [ security_group_id ],\n associate_public_ip_address = public_ip )\n interfaces = boto.ec2.networkinterface.NetworkInterfaceCollection( spec )\n else:\n subnet = subnet_id\n security_group_ids = [security_group_id]\n\n instance_r = ec2_conn.run_instances( image_id = ami.id,\n key_name = keypair,\n instance_type = machine_type,\n monitoring_enabled = True,\n network_interfaces = interfaces,\n subnet_id = subnet, \n user_data = user_data,\n security_group_ids = security_group_ids,\n private_ip_address = static_ip_address )\n instance = instance_r.instances[ 0 ];\n aws_cmd( ec2_conn.create_tags,\n [ instance.id, { \"Name\": get_instance_name( base_name, instance_type ) } ] )\n \n print \"Waiting for instance to be ready\"\n \n if wait_for_running :\n running = wait_on_object_state( instance, 'running', max_wait = 600, failure_state = 'terminated' )\n if not running :\n print \"Deployment instance still not up after long period of time! Exiting...\"\n sys.exit( 3 )\n\n if monitor_params :\n print \"Adding monitoring to the instance.\"\n\n return instance", "def terminate_controller(cls, args, config):\n logging.debug(\"MOLNSController.terminate_controller(args={0})\".format(args))\n controller_obj = cls._get_controllerobj(args, config)\n if controller_obj is None:\n return\n instance_list = config.get_all_instances(controller_id=controller_obj.id)\n logging.debug(\"\\tinstance_list={0}\".format([str(i) for i in instance_list]))\n print(\"\\tinstance_list={0}\".format([str(i) for i in instance_list]))\n # Check if they are running or stopped\n if len(instance_list) > 0:\n for i in instance_list:\n if i.worker_group_id is None:\n status = controller_obj.get_instance_status(i)\n if status == controller_obj.STATUS_RUNNING or status == controller_obj.STATUS_STOPPED:\n print \"Terminating controller running at {0}\".format(i.ip_address)\n controller_obj.terminate_instance(i)\n else:\n worker_name = config.get_object_by_id(i.worker_group_id, 'WorkerGroup').name\n worker_obj = cls._get_workerobj([worker_name], config)\n status = worker_obj.get_instance_status(i)\n if status == worker_obj.STATUS_RUNNING or status == worker_obj.STATUS_STOPPED:\n print \"Terminating worker '{1}' running at {0}\".format(i.ip_address, worker_name)\n worker_obj.terminate_instance(i)\n else:\n print \"No instance running for this controller\"", "def kill_vrouter_instance(self):\n # Stop vrouter\n if (self.vr_args['vtest_only']):\n self.logger.info(\"Stopping vrouter pid=\" + str(self.pid))\n if (self.pid > 0):\n try:\n os.kill(self.pid, signal.SIGTERM)\n time.sleep(1)\n except OSError as e:\n self.logger.error(e)", "def launch_instance(tag, key_name, group_name, inst_type, ami_name, user_data,\n wait=True, returninfo=None):\n ec2 = boto.ec2.connect_to_region(AWS_REGION)\n failures = 0\n max_failures = 10\n while True:\n try:\n reservation = ec2.run_instances(ami_name,\n key_name=key_name,\n security_groups=[group_name],\n instance_type=inst_type,\n user_data=None)\n break\n except Exception, err:\n # Failed to get instance; wait 15 seconds and then try again (up to\n # 10 total times)\n errortext = str(err)\n if errortext.find(\"Not authorized for images\") >= 0:\n print \"**************************************\"\n print \"* Error from AWS suggests that the AMI code in\"\n print \"* CloudSetup.py is deprecated. Please go to\"\n print \"* https://aws.amazon.com/marketplace/ and search for\"\n print \"* \\\"Ubuntu server lts hvm\\\", selecting the most recent\"\n print \"* version. Click \\\"Continue\\\", \\\"Manual Launch\\\",\"\n print \"* and then copy the AMI ID for the US East region.\"\n print \"* Copy that to the AMI_NAME value in CloudSetup.py\"\n print \"* and re-run.\"\n print \"***************************************\"\n print \"* (Full text of error):\"\n print errortext\n print \"***************************************\"\n return None\n elif errortext.find(\"accept terms and subscribe\") >= 0:\n print \"**************************************\"\n print \"* Error from AWS suggests that you have never used this\"\n print \"* AMI before and need to accept its terms and\"\n print \"* subscribe to it. Please follow the link in the below\"\n print \"* error text. Click \\\"Continue\\\", \\\"Manual Launch\\\",\"\n print \"* and \\\"Accept Terms\\\". After receiving email\"\n print \"* confirmation, you can re-run the code.\"\n print \"**************************************\"\n print \"* (Full text of error):\"\n print errortext\n print \"**************************************\"\n return None\n failures += 1\n if failures == max_failures:\n print \"**************************************\"\n print \"* Maximum number of instance launch failures reached.\"\n print \"* (Full text of error):\"\n print errortext\n print \"**************************************\"\n return None\n print \" ** ec2.run_instances failed for tag\", tag, \"; waiting 15\"\n print \" ** seconds and then trying again...\"\n time.sleep(15)\n\n time.sleep(5) # Slow things down -- they're never running super fast anyway\n instance = reservation.instances[0]\n time.sleep(5) # Slow things down -- they're never running super fast anyway\n instance.add_tag(\"tag\", tag)\n time.sleep(5) # Slow things down -- they're never running super fast anyway\n\n if wait:\n print \" Instance requested, waiting for 'running' for tag\", tag\n while instance.state != \"running\":\n print \" %s ...\" % tag\n time.sleep(5)\n try:\n instance.update()\n except boto.exception.EC2ResponseError as e:\n print \"******************\"\n print \"Error caught in instance.update():\"\n print e.strerror\n print \"******************\"\n print \" %s done!\" % tag\n if returninfo:\n returninfo.put(tag)\n return instance", "def stop_instances(self, instance_ids):\n response = instance.stop_instances(self.url, self.verb,\n self.headers, self.version, instance_ids)\n if response is not None :\n res = StopInstancesResponse.StopInstancesResponse()\n parseString(str(response.text), res)\n return res\n else :\n return None", "def swap_elb_instances ( elb_conn,\n elb,\n new_instance_ids,\n swap_smoothly = True,\n terminate_old_instances = False,\n ec2_conn = None,\n cloudwatch_conn = None ) :\n old_instance_ids = []\n \n if elb.instances:\n old_instance_ids = [ elb_i.id for elb_i in elb.instances ]\n \n \"Sometimes aws does not return the existing instances\"\n if len( old_instance_ids ) == 0 :\n old_instance_ids = [ elb_i.instance_id for elb_i in elb.get_instance_health() ]\n \n print 'old instances'\n print old_instance_ids\n \n \n elb_conn.register_instances( elb.name, new_instance_ids )\n new_instances_started = True\n if swap_smoothly :\n for new_instance_id in new_instance_ids :\n inservice = wait_on_elb_instance( elb_conn, elb.name, new_instance_id )\n if not inservice :\n new_instances_started = False\n\n if len( old_instance_ids ) > 0 :\n elb_conn.deregister_instances( elb.name, old_instance_ids )\n\n if new_instances_started and terminate_old_instances :\n print \"terminating old instances\"\n remove_alarms_on_instances( cloudwatch_conn, old_instance_ids )\n ec2_conn.terminate_instances( old_instance_ids )\n\n return new_instances_started", "def launch_example_ec2_cmd(*args, **kwargs):\n return launch_example_ec2(*args, **kwargs)" ]
[ "0.80912846", "0.80731577", "0.79928845", "0.7745165", "0.7609396", "0.7455729", "0.74132353", "0.7362064", "0.7344869", "0.7302483", "0.72819716", "0.71319824", "0.711395", "0.7096879", "0.7086053", "0.7067986", "0.6984061", "0.6978721", "0.68962944", "0.6884447", "0.6829472", "0.6827937", "0.6809962", "0.67803675", "0.67529845", "0.67111695", "0.67004204", "0.6682894", "0.66789716", "0.66476434", "0.6576389", "0.6554469", "0.6541763", "0.65123653", "0.64727706", "0.64479786", "0.63951194", "0.6394504", "0.6383286", "0.6371322", "0.6357109", "0.63340247", "0.6323922", "0.6317273", "0.62912714", "0.62878805", "0.6247083", "0.6239334", "0.62242013", "0.6222601", "0.619165", "0.6182661", "0.6169283", "0.61675894", "0.61239636", "0.6117902", "0.61176795", "0.60691893", "0.6066028", "0.6049735", "0.6047992", "0.60351545", "0.5984911", "0.59776914", "0.5943546", "0.5935726", "0.59241307", "0.5916691", "0.5900754", "0.5891841", "0.58789337", "0.5867396", "0.5867187", "0.5861501", "0.5856165", "0.5851927", "0.58401716", "0.5830088", "0.5816846", "0.5812065", "0.5805558", "0.5797626", "0.5796673", "0.5795923", "0.5789537", "0.5749864", "0.5727123", "0.57126033", "0.57074213", "0.570725", "0.57055897", "0.5704585", "0.5693847", "0.5668492", "0.5667833", "0.56653595", "0.5656826", "0.5652418", "0.5633559", "0.5629638" ]
0.7719454
4
Create a new EC2 instance with specific parameters SecurityGroup (sg) and KeyPair (key) have to be previously created (see cassandgo initSG and cassandgo initKP)
def createInstance(ec2,ami,nb_nodes,placement,instance_type,key,sg,user_data=None): reservation = ec2.run_instances(ami,min_count=nb_nodes,max_count=nb_nodes,placement = placement,key_name=key,security_groups=[sg],instance_type=instance_type,user_data=user_data) instance = reservation.instances[0] return instance
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def create_instance(ami, sg_name):\n instance = None\n ec2 = boto3.resource('ec2',region_name=\"us-east-1\")\n # TODO: Create an EC2 instance\n # Wait for the instance to enter the running state\n # Reload the instance attributes\n\n try:\n instance = ec2.create_instances(\n ImageId=ami,\n InstanceType=INSTANCE_TYPE,\n KeyName=KEY_NAME,\n MaxCount=1,\n MinCount=1,\n SecurityGroupIds=[\n sg_name,\n ],\n TagSpecifications=[{\n 'ResourceType': 'instance',\n 'Tags': TAGS\n }, {\n 'ResourceType': 'volume',\n 'Tags': TAGS\n }]\n )[0]\n instance.wait_until_running()\n instance.reload()\n print(instance.state)\n except ClientError as e:\n print(e)\n\n return instance", "def create_sec_group(ec2, sec_group_name):\n sec = ec2.create_security_group(sec_group_name, 'Jvivian Boto SecGroup')\n port = 22\n sec.authorize('tcp', port, port, '0.0.0.0/0')", "def quick_instance(self, name, image, instance_type, env_tag='dev', zone_tag='starwatts', os_tag='debian', sg_id=None,\n private=True, extra_sg_ids=None, extra_tags=None, terminate_on_shutdown=False,\n debug=False):\n # Debug setting\n if debug:\n logging.basicConfig(level=logging.DEBUG)\n\n # Preliminary tests\n try:\n ami = self.get_image(image_id=image)\n except EC2ResponseError:\n logging.error(\"The image {} could not be found. Aborting.\".format(image))\n return\n print(\"Using AMI {} : {}\".format(image, ami.name))\n if len(self.get_only_instances(filters={'tag:name': name, 'instance-state-name': 'running'})) > 0 or \\\n len(self.get_only_instances(filters={'tag:name': name, 'instance-state-name': 'stopped'})) > 0:\n logging.error(\"An instance with the same name ({}) already exists. Aborting.\".format(name))\n return\n logging.debug(\"Test passed : No instance has the same 'name' tag.\")\n if self.keypair_exists(name):\n logging.error(\"A keypair with the same name ({}) already exists. Aborting.\".format(name))\n return\n logging.debug(\"Test passed : No keypair was found with the same name.\")\n if sg_id is None:\n if self.security_group_exists(name=name):\n logging.error(\"A security group with the same name ({}) already exists. Aborting.\".format(name))\n return\n logging.debug(\"Test passed : No security group was found with the same name.\")\n\n # Tags generation\n logging.debug(\"Generating tags to apply.\")\n tags = dict(name=name, os=os_tag, env=env_tag, zone=zone_tag, privacy='true' if private else 'false')\n if extra_tags is not None:\n tags.update(extra_tags)\n print(\"Tags : {}\".format(tags))\n\n # Fetching needed security groups (bastion and zabbix)\n standard_sg = self.get_all_security_groups(groupnames=['standard'])\n if len(standard_sg) != 1:\n logging.error(\"Multiple or no security group was found for the 'bastion' search. Aborting.\")\n return\n standard_sg = standard_sg[0]\n logging.debug(\"The following security group was found for 'standard : {} {}\".format(standard_sg.id,\n standard_sg.description))\n\n # Security group creation\n if sg_id is None:\n sg = self.create_security_group(name, \"SG applied to {} VM\".format(name))\n sg_id = sg.id\n\n sg_ids = [sg_id, standard_sg.id, ]\n # Using the extra security groups if any\n if extra_sg_ids is not None:\n logging.debug(\"Extra security groups to add : {}\".format(extra_sg_ids))\n sg_ids.extend(extra_sg_ids)\n logging.debug(\"Security Groups : {}\".format(sg_ids))\n\n user_data = \"-----BEGIN OUTSCALE SECTION-----\\nprivate_only=true\\n-----END OUTSCALE SECTION-----\" if private else \"\"\n logging.debug(\"Creating keypair.\")\n kp = self.create_key_pair(key_name=name)\n fp = os.path.join(os.path.expanduser('~/.ssh'), '%s.pem' % kp.name)\n with open(fp, 'wb') as fd:\n fd.write(bytes(kp.material, \"UTF-8\"))\n logging.debug(\"Keypair written to ~/.ssh/{}.pem\".format(name))\n\n resa = self.run_instances(image_id=image, key_name=name, security_groups=sg_ids, instance_type=instance_type,\n user_data=user_data,\n instance_initiated_shutdown_behavior='terminate' if terminate_on_shutdown else 'stop')\n inst = resa.instances[0]\n logging.debug(\"Adding tags to the newly created machine.\")\n inst.add_tags(tags)\n return inst", "def launch(\n *,\n key_name: Optional[str],\n instance_type: str,\n ami: str,\n ami_user: str,\n tags: Dict[str, str],\n display_name: Optional[str] = None,\n size_gb: int,\n security_group_name: str,\n instance_profile: Optional[str],\n nonce: str,\n delete_after: datetime.datetime,\n) -> Instance:\n\n if display_name:\n tags[\"Name\"] = display_name\n tags[\"scratch-delete-after\"] = str(delete_after.timestamp())\n tags[\"nonce\"] = nonce\n tags[\"git_ref\"] = git.describe()\n tags[\"ami-user\"] = ami_user\n\n ec2 = boto3.client(\"ec2\")\n groups = ec2.describe_security_groups()\n security_group_id = None\n for group in groups[\"SecurityGroups\"]:\n if group[\"GroupName\"] == security_group_name:\n security_group_id = group[\"GroupId\"]\n break\n\n if security_group_id is None:\n vpcs = ec2.describe_vpcs()\n vpc_id = None\n for vpc in vpcs[\"Vpcs\"]:\n if vpc[\"IsDefault\"] == True:\n vpc_id = vpc[\"VpcId\"]\n break\n if vpc_id is None:\n default_vpc = ec2.create_default_vpc()\n vpc_id = default_vpc[\"Vpc\"][\"VpcId\"]\n securitygroup = ec2.create_security_group(\n GroupName=security_group_name,\n Description=\"Allows all.\",\n VpcId=vpc_id,\n )\n security_group_id = securitygroup[\"GroupId\"]\n ec2.authorize_security_group_ingress(\n GroupId=security_group_id,\n CidrIp=\"0.0.0.0/0\",\n IpProtocol=\"tcp\",\n FromPort=22,\n ToPort=22,\n )\n\n network_interface: InstanceNetworkInterfaceSpecificationTypeDef = {\n \"AssociatePublicIpAddress\": True,\n \"DeviceIndex\": 0,\n \"Groups\": [security_group_id],\n }\n\n say(f\"launching instance {display_name or '(unnamed)'}\")\n with open(ROOT / \"misc\" / \"scratch\" / \"provision.bash\") as f:\n provisioning_script = f.read()\n kwargs: RunInstancesRequestRequestTypeDef = {\n \"MinCount\": 1,\n \"MaxCount\": 1,\n \"ImageId\": ami,\n \"InstanceType\": cast(InstanceTypeType, instance_type),\n \"UserData\": provisioning_script,\n \"TagSpecifications\": [\n {\n \"ResourceType\": \"instance\",\n \"Tags\": [{\"Key\": k, \"Value\": v} for (k, v) in tags.items()],\n }\n ],\n \"NetworkInterfaces\": [network_interface],\n \"BlockDeviceMappings\": [\n {\n \"DeviceName\": \"/dev/sda1\",\n \"Ebs\": {\n \"VolumeSize\": size_gb,\n \"VolumeType\": \"gp3\",\n },\n }\n ],\n \"MetadataOptions\": {\n # Allow Docker containers to access IMDSv2.\n \"HttpPutResponseHopLimit\": 2,\n },\n }\n if key_name:\n kwargs[\"KeyName\"] = key_name\n if instance_profile:\n kwargs[\"IamInstanceProfile\"] = {\"Name\": instance_profile}\n i = boto3.resource(\"ec2\").create_instances(**kwargs)[0]\n\n return i", "def create_sg(vpc_id, description, group_name):\n client = boto3.client('ec2')\n security_group = str(group_name + \"_sg\")\n\n # get the security groups\n idle_sg = get_sg()\n\n print(idle_sg)\n print(security_group)\n\n # if security group doesnt exist, create it\n if security_group not in idle_sg:\n print(\"Creating SG\")\n return client.create_security_group(\n Description=description,\n GroupName=security_group,\n VpcId=vpc_id\n )\n return get_sg_id(security_group)", "def create():\n\n # remember what is created or not\n vpc = False\n igw = False\n sg = False\n sub = False\n vm = False\n\n vpc = _create_resource('vpc', CidrBlock=args.cidr, InstanceTenancy='default')\n igw = _create_resource('igw')\n\n if vpc and igw:\n _attach_vpc_igw(vpc=_existing.vpc, igw=_existing.igw)\n else:\n print('Cannot attach an igw to a vpc as at least one of them could not be created.')\n\n if vpc:\n sg = _create_resource(\n 'sg',\n GroupName=args.role,\n Description='SG for ' + args.role,\n VpcId=getattr(_existing.vpc, 'id', None)\n )\n else:\n print('Cannot create a sg as the vpc to attach it to could not be created.')\n\n if sg:\n _add_ingress_rules()\n else:\n print('Cannot create ingress rule as the sg could not be created.')\n\n if vpc:\n sub = _create_resource(\n 'sub',\n VpcId=getattr(_existing.vpc, 'id', None),\n CidrBlock=args.cidr\n )\n else:\n print('Cannot create a subnet as the vpc to attach it to could not be created.')\n\n if vpc and sub:\n _link_route_table()\n else:\n print('Cannot link subnet and VPC in the route table as vpc or sub not created.')\n\n if sub and sg:\n vm = _create_resource(\n 'vm',\n ImageId=args.ami,\n MinCount=1,\n MaxCount=1,\n KeyName=args.keypair,\n InstanceType=args.instance,\n # Note that there will be no internal name.\n # To get one, create first a DHCP options set and associate it with the VPC.\n NetworkInterfaces=[{\n 'AssociatePublicIpAddress': True,\n 'DeviceIndex': 0, # needs to be 0 to get a public IP\n 'SubnetId': getattr(_existing.sub, 'id', None),\n 'Groups': [getattr(_existing.sg, 'id', None)],\n }],\n )\n else:\n print('Cannot create an instance as the sub or sg to use could not be created.')\n\n if vm:\n if not dry:\n print('Waiting for the instance to be up and running, usually done in less than 45 seconds...')\n _existing.vm.wait_until_running()\n _tag_volume()\n print('you can reach your VM at ' + _existing.vm.public_ip_address)\n\n else:\n print('VM not created for some reason.')", "def create_instance(self, image='ami-660c3023', key_name='linuxonEC2', instance_type='t1.micro', security_groups=['default']):\n return self.conn.run_instances(image,\n key_name=key_name,\n instance_type=instance_type,\n security_groups=security_groups).instances[0]", "def init_region ( aws, region_name, aws_account_type, init_params ) :\n ec2_conn = aws.ec2_conn( )\n keypair_savedir = os.environ[ 'PWD' ]\n print \"Creating new keypairs for region \" + region_name\n for keytype in init_params.get( 'keypairs', [] ) :\n keypair_name = get_keypair_name( aws_account_type, region_name, keytype )\n keypair = ec2_conn.get_key_pair( keypair_name )\n if keypair :\n print 'Keypair ' + keypair_name + ' already exists. Skipping.'\n else :\n keypair = ec2_conn.create_key_pair( keypair_name )\n keypair.save( keypair_savedir )\n keypair_filename = keypair_savedir + '/' + keypair_name + '.pem'\n print 'Created keypair ' + keypair_filename\n store_keypair( s3_infra_conn = aws.s3_infrastructure_conn( ),\n region_name = region_name,\n aws_account_type = aws_account_type,\n keypair_name = get_keypair_keypath( aws_account_type ) + keypair_name,\n keypair_filename = keypair_filename )\n print 'Stored keypair in S3 at: ' + get_keypair_keypath( aws_account_type )\n os.remove( keypair_filename )\n\n if init_params.get( 'init-deployment', 'YES' ) == 'YES' :\n print \"Creating Deployment security group.\"\n deploy_secgrp = ec2_conn.create_security_group( get_deployment_secgrp_name( ),\n \"Used by the deployment server.\" )\n deploy_secgrp.authorize( ip_protocol = \"tcp\",\n from_port = 22,\n to_port = 22,\n cidr_ip = hbo_cidr_list ) \n deploy_secgrp.authorize( ip_protocol = \"tcp\",\n from_port = 8080,\n to_port = 8080,\n cidr_ip = hbo_cidr_list ) \n\n deploy_secgrp.authorize( ip_protocol = \"tcp\",\n from_port = 22,\n to_port = 22,\n cidr_ip = build_server_cidr ) \n deploy_secgrp.authorize( ip_protocol = \"tcp\",\n from_port = 8080,\n to_port = 8080,\n cidr_ip = build_server_cidr ) \n\n if init_params.get( 'init-ami-update', 'YES' ) == 'YES' :\n print \"Creating ami-update security group.\"\n amiupdate_secgrp = ec2_conn.create_security_group( get_amiupdate_secgrp_name( ),\n \"Used by the ami update instances.\" )\n amiupdate_secgrp.authorize( ip_protocol = \"tcp\",\n from_port = 22,\n to_port = 22,\n cidr_ip = hbo_cidr_list ) \n amiupdate_secgrp.authorize( ip_protocol = \"tcp\",\n from_port = 8080,\n to_port = 8080,\n cidr_ip = hbo_cidr_list )", "def amazonEc2_create(amazonEc2):\n\treturn amazonEc2", "def create_ec2_with_eip(ec2, ec2_client, subnet_pub_ec2):\n ## create EC2 instance\n print(\"\\n===Creating an EC2 instance\")\n instances = ec2.create_instances(\n ImageId=AMI_ID,\n MinCount=1,\n MaxCount=1,\n InstanceType=EC2_TYPE,\n KeyName=KEY_PAIR_NAME,\n NetworkInterfaces=[{\n \"DeviceIndex\":0,\n \"SubnetId\": subnet_pub_ec2.id}],\n TagSpecifications=[{\n \"ResourceType\":\"instance\",\n \"Tags\":[{\"Key\": \"Name\", \"Value\": EC2_NAME}]\n }]\n )\n \n ## get instance ids\n instances_ids = [i.instance_id for i in instances]\n\n ## wait till instance is ready\n waiter = ec2_client.get_waiter(\"instance_running\")\n waiter.wait(InstanceIds=instances_ids)\n print(\"An EC2 instance is ready.\")\n\n ## create new EIP and attach it to existing EC2 instance\n instance_id = instances[0].instance_id\n try:\n allocation = ec2_client.allocate_address(Domain=\"vpc\")\n response = ec2_client.associate_address(AllocationId=allocation[\"AllocationId\"],\n InstanceId=instance_id)\n print(response)\n except ClientError as e:\n print(e)\n print(f\"===EIP {allocation['PublicIp']} has been assigned to the EC2 instance!\")\n return instances, allocation[\"PublicIp\"]", "def create_secgroup(self, args):\n message = MessageClass()\n region = args[\"Region\"]\n sgid = args[\"Group-Name\"]\n desc = args[\"Description\"]\n\n # Boto3 client creation by providing the access_id and access_secret\n ec2 = boto3.client(service_name='ec2', region_name=region, api_version=None, use_ssl=True,\n verify=None, endpoint_url=None, aws_access_key_id=self.aws_access_key,\n aws_secret_access_key=self.aws_secret_token, aws_session_token=None,\n config=None)\n response = ec2.describe_vpcs()\n vpc_id = response.get('Vpcs', [{}])[0].get('VpcId', '')\n\n response = ec2.create_security_group(GroupName=sgid,\n Description=desc,\n VpcId=vpc_id)\n attachment = MessageAttachmentsClass()\n d = response[\"GroupId\"]\n attachment.title = d\n message.message_text = \"Security group created:\"\n message.attach(attachment)\n\n return message.to_json()", "def create_asg(AvailabilityZone):\n lc_name= lib.get_lc_name(stackname, ELBTargetGroupName, AvailabilityZone)\n\n logger.info('Creating launch-config for a new ASG: ' + lc_name)\n userdata='vmseries-bootstrap-aws-s3bucket=' + s3master\n \n try:\n response=asg.create_launch_configuration(LaunchConfigurationName=lc_name, \n ImageId=imageID, KeyName=keyname, SecurityGroups=[sg_untrust], InstanceType=instanceType,\n AssociatePublicIpAddress=False, EbsOptimized=True,\n IamInstanceProfile=iamprofilebs,\n BlockDeviceMappings=[\n {'DeviceName': \"/dev/xvda\", \n 'Ebs': \n {'DeleteOnTermination': True,\n 'VolumeType': 'gp2'\n }\n }\n ],\n UserData=userdata)\n except Exception as e:\n logger.error(\"[ASG LC error]: {}\".format(e))\n return False\n #Get ELB ARN\n tgtGrp = elbv2.describe_target_groups(Names=[ELBTargetGroupName])\n if tgtGrp == None:\n tgtGrp_arn = None\n logger.info('ELB target group is not found!')\n else:\n tgtGrp_d = tgtGrp['TargetGroups']\n tgtGrp_arn = tgtGrp_d[0].get('TargetGroupArn')\n print(\"targetgroup arn: \" + tgtGrp_arn)\n print( \"ELBTargetGroupName: \" +ELBTargetGroupName)\n \n asg_name = lib.get_asg_name(stackname, ELBTargetGroupName, AvailabilityZone)\n logger.info('Creating Auto-Scaling Group with name: ' + asg_name)\n tags={'ResourceId': asg_name, 'ResourceType': 'auto-scaling-group', 'Key': 'Name', 'Value': asg_name, 'PropagateAtLaunch':True}\n \n subnet=lib.choose_subnet(subnetuntrust, AvailabilityZone)\n try:\n response=asg.create_auto_scaling_group(AutoScalingGroupName=asg_name, LaunchConfigurationName=lc_name,\n MinSize=MinInstancesASG, MaxSize=MaximumInstancesASG, DesiredCapacity=MinInstancesASG,\n DefaultCooldown=ScalingPeriod, TargetGroupARNs=[tgtGrp_arn],\n VPCZoneIdentifier=subnet,\n Tags=[tags],\n HealthCheckGracePeriod=900)\n except Exception as e:\n logger.error(\"[ASG create error]: {}\".format(e))\n return False\n \n if create_asg_life_cycle(asg_name, AvailabilityZone) == False:\n return False\n \n scalein=asg_name + '-scalein'\n try:\n response = asg.put_scaling_policy(AutoScalingGroupName=asg_name, PolicyName=scalein, AdjustmentType='ChangeInCapacity',\n ScalingAdjustment=-1, Cooldown=600)\n arn_scalein=response['PolicyARN']\n except Exception as e:\n logger.error(\"[ASG ScaleIn12 Policy]: {}\".format(e))\n return False\n \n scaleout=asg_name + '-scaleout'\n try:\n response = asg.put_scaling_policy(AutoScalingGroupName=asg_name, PolicyName=scaleout, AdjustmentType='ChangeInCapacity',\n ScalingAdjustment=1, Cooldown=600)\n arn_scaleout=response['PolicyARN']\n except Exception as e:\n logger.info(\"[ASG ScaleOut123]: {}\".format(e))\n return False\n \n logger.info('ARN of Scale In and Scale Out: ' + arn_scalein + ' ' + arn_scaleout)\n logger.info('Adding Cloud Watch Alarm : ' + ScalingParameter + ' for ASG: ' + asg_name)\n if cw_func_add_alarms[ScalingParameter](asg_name, arn_scalein, arn_scaleout) == False:\n return False\n \n return True", "def test_deploy_instance_with_new_network_and_sec_group(self):\n suffix = datetime.utcnow().strftime('%Y%m%d%H%M%S')\n instance_name = TEST_SERVER_PREFIX + \"_network_sec_group_\" + suffix\n sec_group_name = TEST_SEC_GROUP_PREFIX + \"_\" + suffix\n network_name = TEST_NETWORK_PREFIX + \"_\" + suffix\n network_cidr = TEST_CIDR_PATTERN % 249\n self.__deploy_instance_helper__(instance_name=instance_name,\n network_name=network_name,\n network_cidr=network_cidr,\n sec_group_name=sec_group_name)", "def prepareInstance(username, sshId):\n print os.environ['EC2_KEYPAIR_PATH']\n with settings(user='ubuntu',\n key_filename=os.environ['EC2_KEYPAIR_PATH']):\n password = getpass('Enter a new password for user %s:' % username)\n password2 = getpass('Enter the password a again:')\n if password != password2:\n raise RuntimeError(\"Passwords don't match\")\n sudo('adduser --disabled-password --gecos \",,,\" %s' % username)\n cryptedPassword = _hashPassword(password)\n sudo('usermod --password %s %s' % (cryptedPassword, username))\n sudo('gpasswd --add %s admin' % username)\n authorizeSshKey(username, sshId)\n sudo('apt-get update')\n sudo('DEBIAN_FRONTEND=noninteractive apt-get dist-upgrade -y')\n if exists('/var/run/reboot-required'):\n reboot()", "def launch_instance_vpc ( ec2_conn,\n ami,\n base_name,\n instance_type,\n keypair,\n subnet_id,\n security_group_id,\n machine_type = 'm3.medium',\n user_data = None,\n wait_for_running = True,\n public_ip = False,\n static_ip_address = None,\n monitor_params = None ) :\n interfaces = None\n subnet = None\n security_group_ids = None\n \n if static_ip_address is None:\n spec = boto.ec2.networkinterface.NetworkInterfaceSpecification( subnet_id = subnet_id,\n groups = [ security_group_id ],\n associate_public_ip_address = public_ip )\n interfaces = boto.ec2.networkinterface.NetworkInterfaceCollection( spec )\n else:\n subnet = subnet_id\n security_group_ids = [security_group_id]\n\n instance_r = ec2_conn.run_instances( image_id = ami.id,\n key_name = keypair,\n instance_type = machine_type,\n monitoring_enabled = True,\n network_interfaces = interfaces,\n subnet_id = subnet, \n user_data = user_data,\n security_group_ids = security_group_ids,\n private_ip_address = static_ip_address )\n instance = instance_r.instances[ 0 ];\n aws_cmd( ec2_conn.create_tags,\n [ instance.id, { \"Name\": get_instance_name( base_name, instance_type ) } ] )\n \n print \"Waiting for instance to be ready\"\n \n if wait_for_running :\n running = wait_on_object_state( instance, 'running', max_wait = 600, failure_state = 'terminated' )\n if not running :\n print \"Deployment instance still not up after long period of time! Exiting...\"\n sys.exit( 3 )\n\n if monitor_params :\n print \"Adding monitoring to the instance.\"\n\n return instance", "def create_instance(StackId=None, LayerIds=None, InstanceType=None, AutoScalingType=None, Hostname=None, Os=None, AmiId=None, SshKeyName=None, AvailabilityZone=None, VirtualizationType=None, SubnetId=None, Architecture=None, RootDeviceType=None, BlockDeviceMappings=None, InstallUpdatesOnBoot=None, EbsOptimized=None, AgentVersion=None, Tenancy=None):\n pass", "def create_keypair(econfig_file=None, region=None, keyname=\"bcbio\"):\n import boto\n import boto.ec2\n if econfig_file:\n keypair_dir = os.path.dirname(econfig_file).replace(\"elasticluster\", \"aws_keypairs\")\n else:\n keypair_dir = os.path.join(os.getcwd(), \"aws_keypairs\")\n if not os.path.exists(keypair_dir):\n os.makedirs(keypair_dir)\n private_key = os.path.join(os.path.join(keypair_dir, keyname))\n new_key = not os.path.exists(private_key)\n if new_key:\n cmd = [\"ssh-keygen\", \"-t\", \"rsa\", \"-N\", \"\", \"-f\", private_key, \"-C\", \"bcbio_aws_keypair\"]\n subprocess.check_call(cmd)\n public_key = private_key + \".pub\"\n if region:\n ec2 = boto.ec2.connect_to_region(region)\n else:\n ec2 = boto.connect_ec2()\n key = ec2.get_key_pair(keyname)\n if key and new_key:\n print(\"Non matching key %s found in AWS, removing.\" % keyname)\n ec2.delete_key_pair(keyname)\n key = None\n if not key:\n print(\"Key %s not found in AWS, importing created key\" % keyname)\n with open(public_key) as in_handle:\n body = in_handle.read()\n try:\n ec2.import_key_pair(keyname, body)\n except TypeError as e:\n body = body.encode('utf-8')\n ec2.import_key_pair(keyname, body)\n return {\"user_key_name\": keyname, \"user_key_private\": private_key,\n \"user_key_public\": public_key}", "def create_instances(region_name, app_name, image_name,\n storage_enckey=None,\n s3_logs_bucket=None,\n identities_url=None,\n ssh_key_name=None,\n company_domain=None,\n ldap_host=None,\n instance_type=None,\n security_group_ids=None,\n instance_profile_arn=None,\n subnet_type=SUBNET_PRIVATE,\n subnet_id=None,\n vpc_id=None,\n vpc_cidr=None,\n tag_prefix=None,\n dry_run=False,\n template_name=None,\n ec2_client=None,\n **kwargs):\n if not instance_type:\n instance_type = 't3a.micro'\n if not template_name:\n template_name = \"%s-cloud-init-script.j2\" % app_name\n if not ec2_client:\n ec2_client = boto3.client('ec2', region_name=region_name)\n resp = ec2_client.describe_instances(\n Filters=[\n {'Name': 'tag:Name', 'Values': [\"*%s*\" % app_name]},\n {'Name': 'instance-state-name',\n 'Values': [EC2_RUNNING, EC2_STOPPED, EC2_PENDING]}])\n\n instances = None\n instance_ids = []\n stopped_instance_ids = []\n for reserv in resp['Reservations']:\n instances = reserv['Instances']\n for instance in reserv['Instances']:\n names = []\n for tag in instance['Tags']:\n if tag['Key'] == 'Name':\n names = [name.strip() for name in tag['Value'].split(',')]\n break\n if app_name not in names:\n continue\n instance_ids += [instance['InstanceId']]\n if instance['State']['Name'] == EC2_STOPPED:\n stopped_instance_ids += [instance['InstanceId']]\n if stopped_instance_ids:\n ec2_client.start_instances(\n InstanceIds=stopped_instance_ids,\n DryRun=dry_run)\n LOGGER.info(\"%s restarted instances %s for '%s'\",\n tag_prefix, stopped_instance_ids, app_name)\n if instance_ids:\n LOGGER.info(\"%s found instances %s for '%s'\",\n tag_prefix, instance_ids, app_name)\n # If instances are running and there is a message queue,\n # we assume the infrastructure for this app is ready to accept\n # containers.\n return instances\n\n search_path = os.path.join(\n os.path.dirname(os.path.abspath(__file__)), 'templates')\n template_loader = jinja2.FileSystemLoader(searchpath=search_path)\n template_env = jinja2.Environment(loader=template_loader)\n template = template_env.get_template(template_name)\n user_data = template.render(\n logs_storage_location=\"s3://%s\" % s3_logs_bucket,\n identities_url=identities_url,\n remote_drop_repo=\"https://github.com/djaodjin/drop.git\",\n company_domain=company_domain,\n ldap_host=ldap_host,\n **kwargs)\n\n # Find the ImageId\n image_id = _get_image_id(\n image_name, instance_profile_arn=instance_profile_arn,\n ec2_client=ec2_client, region_name=region_name)\n\n if not storage_enckey:\n # Always make sure the EBS storage is encrypted.\n storage_enckey = _get_or_create_storage_enckey(\n region_name, tag_prefix, dry_run=dry_run)\n\n block_devices = [\n {\n # `DeviceName` is required and must match expected name otherwise\n # an extra disk is created.\n 'DeviceName': '/dev/xvda', # XXX '/dev/sda1',\n #'VirtualName': 'string',\n # https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ebs-volume-types.html\n 'Ebs': {\n 'DeleteOnTermination': False,\n #'Iops': 100, # 'not supported for gp2'\n #'SnapshotId': 'string',\n 'VolumeSize': 8,\n 'VolumeType': 'gp2'\n },\n #'NoDevice': 'string'\n },\n ]\n if storage_enckey:\n # XXX Haven't been able to use the key we created but the default\n # aws/ebs is OK...\n for block_device in block_devices:\n block_device['Ebs'].update({\n 'KmsKeyId': storage_enckey,\n 'Encrypted': True\n })\n\n network_interfaces = [{\n 'DeviceIndex': 0,\n 'SubnetId': subnet_id,\n # Cannot use `SecurityGroups` with `SubnetId`\n # but can use `SecurityGroupIds`.\n 'Groups': security_group_ids\n }]\n if not subnet_id:\n if not vpc_id:\n vpc_id, _ = _get_vpc_id(tag_prefix, ec2_client=ec2_client,\n region_name=region_name)\n web_subnet_cidrs, dbs_subnet_cidrs, app_subnet_cidrs = _split_cidrs(\n vpc_cidr, ec2_client=ec2_client, region_name=region_name)\n if subnet_type == SUBNET_PRIVATE:\n app_subnet_by_cidrs = _get_subnet_by_cidrs(\n app_subnet_cidrs, tag_prefix,\n vpc_id=vpc_id, ec2_client=ec2_client)\n # Use first valid subnet.\n subnet_id = next(iter(app_subnet_by_cidrs.values()))['SubnetId']\n elif subnet_type == SUBNET_DBS:\n dbs_subnet_by_cidrs = _get_subnet_by_cidrs(\n dbs_subnet_cidrs, tag_prefix,\n vpc_id=vpc_id, ec2_client=ec2_client)\n # Use first valid subnet.\n subnet_id = next(iter(dbs_subnet_by_cidrs.values()))['SubnetId']\n elif subnet_type in [SUBNET_PUBLIC_READY, SUBNET_PUBLIC]:\n web_subnet_by_cidrs = _get_subnet_by_cidrs(\n web_subnet_cidrs, tag_prefix,\n vpc_id=vpc_id, ec2_client=ec2_client)\n # Use first valid subnet.\n subnet_id = next(iter(web_subnet_by_cidrs.values()))['SubnetId']\n if subnet_type == SUBNET_PUBLIC:\n network_interfaces = [{\n 'AssociatePublicIpAddress': True,\n 'DeviceIndex': 0,\n 'SubnetId': subnet_id,\n # Cannot use `SecurityGroups` with `SubnetId`\n # but can use `SecurityGroupIds`.\n 'Groups': security_group_ids\n }]\n\n if not instances or not instance_ids:\n for _ in range(0, NB_RETRIES):\n # The IAM instance profile take some time to be visible.\n try:\n # Cannot use `SecurityGroups` with `SubnetId`\n # but can use `SecurityGroupIds`.\n resp = ec2_client.run_instances(\n BlockDeviceMappings=block_devices,\n ImageId=image_id,\n KeyName=ssh_key_name,\n InstanceType=instance_type,\n MinCount=1,\n MaxCount=1,\n#botocore.exceptions.ClientError: An error occurred (InvalidParameterCombination) when calling the RunInstances operation: Network interfaces and an instance-level subnet ID may not be specified on the same request\n# SubnetId=subnet_id,\n# SecurityGroupIds=security_group_ids,\n IamInstanceProfile={'Arn': instance_profile_arn},\n NetworkInterfaces=network_interfaces,\n TagSpecifications=[{\n 'ResourceType': \"instance\",\n 'Tags': [{\n 'Key': 'Name',\n 'Value': app_name\n }, {\n 'Key': 'Prefix',\n 'Value': tag_prefix\n }]\n }],\n UserData=user_data,\n DryRun=dry_run)\n instances = resp['Instances']\n instance_ids = [\n instance['InstanceId'] for instance in instances]\n break\n except botocore.exceptions.ClientError as err:\n if not err.response.get('Error', {}).get(\n 'Code', 'Unknown') == 'InvalidParameterValue':\n raise\n LOGGER.info(\"%s waiting for IAM instance profile %s to be\"\\\n \" operational ...\", tag_prefix, instance_profile_arn)\n time.sleep(RETRY_WAIT_DELAY)\n LOGGER.info(\"%s started instances %s for '%s'\",\n tag_prefix, instance_ids, app_name)\n for _ in range(0, NB_RETRIES):\n # It can take some time before the instances will appear\n # in a `describe_instances` call. We want to make sure\n # not to get errors later on if we execute too fast.\n try:\n resp = ec2_client.describe_instances(InstanceIds=instance_ids)\n break\n except botocore.exceptions.ClientError as err:\n err_code = err.response.get('Error', {}).get('Code', 'Unknown')\n LOGGER.error(\"XXX err_code=%s\", err_code)\n if not err_code == 'InvalidInstanceID.NotFound':\n raise\n LOGGER.info(\"%s waiting for EC2 instances %s to be\"\\\n \" operational ...\", tag_prefix, instance_ids)\n time.sleep(RETRY_WAIT_DELAY)\n\n return instances", "def createSG(ec2,name,rules):\n\t# check if the security group exists\n\tgroup = None\n\tsgGroups = [sg for sg in ec2.get_all_security_groups() if sg.name == name]\n\tif sgGroups:\n\t\tgroup = sgGroups[0]\n\t\tec2.delete_security_group(name=name, group_id=group)\t\n\tprint \"Creating %s Security Group\" % name\n\tgroup = ec2.create_security_group(name, 'group for %s' % name)\n\tif group:\n\t\t# Set the inbound rules\n\t\tfor rule in rules:\n\t\t\tif rule.src_group_name:\n\t\t\t\tgroup.authorize(ip_protocol=rule.ip_protocol,from_port=rule.from_port,to_port=rule.to_port,cidr_ip=rule.cidr_ip,src_group=group)\n\t\t\telse:\n\t\t\t\tgroup.authorize(ip_protocol=rule.ip_protocol,from_port=rule.from_port,to_port=rule.to_port,cidr_ip=rule.cidr_ip,src_group=None)\n\t\treturn True\n\telse:\n\t\tlogError('Error during '+name+' Security Group update')\n\t\treturn False", "def ec2_start(resource, metadata):\n\n # do minimal provisioning of machine through cloud-init\n # this installs git and bootstraps puppet to provision the rest\n # requires recent ubuntu (14.04/16.04) or RHEL/CentOS 7\n userdata = \"\"\"#cloud-config\npackage_update: true\nhostname: {hostname}\nfqdn: {fqdn}\nmanage_etc_hosts: true\npackages:\n - git\nwrite_files:\n - path: /etc/facter/facts.d/hostgroup.txt\n content: hostgroup=aws\n - path: /etc/facter/facts.d/role.txt\n content: role={role}\nruncmd:\n - git clone {repo} /etc/puppet\n - /etc/puppet/support_scripts/bootstrap-puppet.sh\"\"\".format(\n hostname=metadata['hostname'], fqdn=metadata['fqdn'],\n role=metadata['role'], repo=metadata['repo'])\n\n instances = resource.create_instances(\n ImageId=metadata['ami'],\n MinCount=1,\n MaxCount=1,\n InstanceType=metadata['type'],\n SubnetId=metadata['subnet'],\n SecurityGroupIds=[metadata['secgroup']],\n KeyName=metadata['keypair'],\n UserData=userdata,\n BlockDeviceMappings=[\n {\n 'DeviceName': '/dev/sda1', # root so far, sometimes /dev/xvdh ?\n 'Ebs': {\n 'VolumeSize': 20,\n 'DeleteOnTermination': True,\n 'VolumeType': 'gp2'\n },\n },\n ]\n )\n\n # not sure if we really need to sleep before tagging but\n # we wait until running anyway which takes much longer than 1 second\n time.sleep(1)\n for instance in instances:\n # first set tags, Name and Role\n instance.create_tags(\n Resources=[instance.id],\n Tags=[\n {\n 'Key': 'Role',\n 'Value': metadata['role']\n },\n {\n 'Key': 'Name',\n 'Value': metadata['fqdn']\n },\n ]\n )\n\n # ensure system is running before we print address to connect to\n instance.wait_until_running()\n # instance.load()\n ec2_status(resource, metadata)", "def create_instance(\r\n image_id, instance_type, key_name, security_group_names=None):\r\n try:\r\n instance_params = {\r\n 'ImageId': image_id, 'InstanceType': instance_type, 'KeyName': key_name\r\n }\r\n if security_group_names is not None:\r\n instance_params['SecurityGroups'] = security_group_names\r\n instance = ec2.create_instances(**instance_params, MinCount=1, MaxCount=1)[0]\r\n logger.info(\"Created instance %s.\", instance.id)\r\n except ClientError:\r\n logging.exception(\r\n \"Couldn't create instance with image %s, instance type %s, and key %s.\",\r\n image_id, instance_type, key_name)\r\n raise\r\n else:\r\n return instance", "def createaws() -> my_aws_api_library.MyAws:\r\n aws_cred_file_path = os.environ['AWS_CRED_FILE']\r\n comp_pubkey = os.environ['COMPANY_PUBKEY']\r\n my_aws = my_aws_api_library.MyAws(aws_cred_file_path, comp_pubkey)\r\n return my_aws", "def new_instance(self, args):\n message = MessageClass()\n region = args[\"Region\"]\n image_id = args[\"Image-ID\"]\n\n # Boto3 resource creation by providing the access_id and access_secret\n ec2 = boto3.resource(service_name='ec2', region_name=region, api_version=None, use_ssl=True,\n verify=None, endpoint_url=None, aws_access_key_id=self.aws_access_key,\n aws_secret_access_key=self.aws_secret_token, aws_session_token=None,\n config=None)\n ec2.create_instances(ImageId=image_id, MinCount=1, MaxCount=5)\n\n message.message_text = \"New Instance Created\"\n return message.to_json()", "def create(self):\n self.initialize()\n\n if not self.__keypair:\n logger.info('Creating keypair %s...' % self.keypair_settings.name)\n\n if self.keypair_settings.public_filepath and os.path.isfile(\n self.keypair_settings.public_filepath):\n logger.info(\"Uploading existing keypair\")\n self.__keypair = nova_utils.upload_keypair_file(\n self._nova, self.keypair_settings.name,\n self.keypair_settings.public_filepath)\n\n if self.keypair_settings.delete_on_clean is not None:\n delete_on_clean = self.keypair_settings.delete_on_clean\n self.__delete_keys_on_clean = delete_on_clean\n else:\n self.__delete_keys_on_clean = False\n else:\n logger.info(\"Creating new keypair\")\n keys = nova_utils.create_keys(self.keypair_settings.key_size)\n self.__keypair = nova_utils.upload_keypair(\n self._nova, self.keypair_settings.name,\n nova_utils.public_key_openssh(keys))\n file_utils.save_keys_to_files(\n keys, self.keypair_settings.public_filepath,\n self.keypair_settings.private_filepath)\n\n if self.keypair_settings.delete_on_clean is not None:\n delete_on_clean = self.keypair_settings.delete_on_clean\n self.__delete_keys_on_clean = delete_on_clean\n else:\n self.__delete_keys_on_clean = True\n elif self.__keypair and not os.path.isfile(\n self.keypair_settings.private_filepath):\n logger.warn(\"The public key already exist in OpenStack \\\n but the private key file is not found ..\")\n\n return self.__keypair", "def generate(cls, params = None, quiet = False):\n\n if params is None:\n if not quiet:\n logger.debug(\"Generating new ECDSA key parameters\")\n params = KeyParams.generateEC()\n\n assert isinstance(params, KeyParams)\n\n if not quiet:\n logger.debug(\"Generating new ECDSA key\")\n\n return cls(POW = rpki.POW.Asymmetric.generateFromParams(params.get_POW()))", "def spin_ec2(self):\n #message = event['message']\n init_script = \"\"\"#!/bin/bash\necho \"sleep 50\" >> /etc/rc.local\necho \"shutdown -H +5 >> /etc/rc.local\"\nsleep 50\nshutdown -H +5\"\"\"\n\n print ('Running script:')\n print (init_script)\n\n instance = EC2.run_instances(\n ImageId=AMI,\n InstanceType=INSTANCE_TYPE,\n MinCount=1, # required by boto, even though it's kinda obvious.\n MaxCount=1,\n InstanceInitiatedShutdownBehavior='stop', # make shutdown in script terminate ec2\n UserData=init_script # file to run on instance init.\n \n )\n\n print (\"New instance created.\")\n instance_id = instance['Instances'][0]['InstanceId']\n print (instance_id)\n print (instance)\n EC2.create_tags(Resources=[instance_id], Tags=[{\"Key\" : \"Name\", 'Value': 'test01',},],)", "def process_security_group ( ec2_conn, vpc, base_name, params, secgrp_type = None, secgrp_description = None ) :\n if not secgrp_type :\n secgrp_type = params[ 'type' ]\n if not secgrp_description :\n secgrp_description = params[ 'description' ]\n\n secgrp = find_group( ec2_conn, base_name, secgrp_type )\n if not secgrp :\n if params.get( 'create', 'NO' ) == 'YES' :\n secgrp_name = get_secgrp_name( base_name, secgrp_type )\n print \"Creating security group with name: \" + secgrp_name\n secgrp = create_secgrp( ec2_conn, vpc, secgrp_name, secgrp_description )\n else :\n print \"ERROR: Could not find group with name \" + get_secgrp_name( base_name, secgrp_type )\n sys.exit( 1 )\n\n print \"Prepping rules for security group \" + secgrp.name\n remove_all_rules( ec2_conn, [ secgrp ], True, base_name )\n\n # Reload group to retrieve new object with no rules.\n secgrp = find_group( ec2_conn, base_name, secgrp_type )\n\n is_public = params.get( 'public' ) == 'YES'\n if is_public :\n nat_secgrp = None\n if params.get( 'os-update' ) == 'YES' :\n ec2_conn.authorize_security_group_egress( group_id = secgrp.id,\n ip_protocol = \"tcp\",\n from_port = 80,\n to_port = 80,\n cidr_ip = all_ip_cidr )\n if params.get( 'public-tcp-ports' ) :\n public_ports = params[ 'public-tcp-ports' ]\n for port in public_ports :\n secgrp.authorize( ip_protocol = \"tcp\",\n from_port = port,\n to_port = port,\n cidr_ip = all_ip_cidr )\n\n if params.get( 'incoming-cidr-rules' ) :\n for incoming_rule in params[ 'incoming-cidr-rules' ] :\n start_port = incoming_rule.get( 'port' )\n end_port = start_port\n\n protocol = get_secgrp_protocol_param( incoming_rule )\n cidr_list = get_cidr_param( incoming_rule[ 'cidr' ] )\n\n secgrp.authorize( ip_protocol = protocol,\n from_port = start_port,\n to_port = end_port,\n cidr_ip = cidr_list )\n\n else :\n nat_secgrp = find_group( ec2_conn, base_name, 'NAT' )\n # Grant NAT access to login to the machine\n if not nat_secgrp :\n print \"ERROR: Could not find NAT security group!\"\n sys.exit( 1 )\n grant_ssh_access( ec2_conn, [ secgrp ], nat_secgrp )\n if params.get( 'os-update' ) == 'YES' :\n grant_cidr_access( ec2_conn, all_ip_cidr, [ secgrp ], 80, nat_secgrp )\n # Need to reload secgrp so it contains latest rules\n secgrp = find_group( ec2_conn, base_name, secgrp_type )\n # Need to reload NAT secgrp so it contains latest rules\n nat_secgrp = find_group( ec2_conn, base_name, 'NAT' )\n\n if params.get( 'outgoing-cidr-rules' ) :\n for outgoing_rule in params[ 'outgoing-cidr-rules' ] :\n start_port = outgoing_rule.get( 'port' )\n end_port = start_port\n protocol = get_secgrp_protocol_param( outgoing_rule )\n cidr_list = get_cidr_param( outgoing_rule[ 'cidr' ] )\n\n for cidr in cidr_list :\n grant_cidr_access( ec2_conn, cidr, [ secgrp ], start_port, nat_secgrp, protocol )\n\n if params.get( 'outgoing-group-rules' ) :\n for outgoing_rule in params[ 'outgoing-group-rules' ] :\n start_port = outgoing_rule.get( 'port' )\n end_port = start_port\n protocol = get_secgrp_protocol_param( outgoing_rule )\n target_secgrp_type = outgoing_rule[ 'group-type' ]\n target_secgrp = find_group( ec2_conn, base_name, target_secgrp_type )\n grant_grp_access( ec2_conn, [ secgrp ], target_secgrp, start_port, protocol )\n \n if params.get( 'incoming-group-rules' ) :\n for incoming_rule in params[ 'incoming-group-rules' ] :\n start_port = incoming_rule.get( 'port' )\n end_port = start_port\n protocol = get_secgrp_protocol_param( incoming_rule )\n incoming_secgrp_type = incoming_rule[ 'group-type' ]\n incoming_secgrp = find_group( ec2_conn, base_name, incoming_secgrp_type )\n grant_grp_access( ec2_conn, [ incoming_secgrp ], secgrp, start_port, protocol )\n\n if params.get( 'self-rules' ) :\n for self_rule in params[ 'self-rules' ] :\n start_port = self_rule.get( 'port' )\n end_port = start_port\n\n if not start_port :\n start_port = self_rule[ 'port-range' ][ 'start' ]\n end_port = self_rule[ 'port-range' ][ 'end' ]\n\n protocol = get_secgrp_protocol_param( self_rule )\n\n grant_grp_self_access( ec2_conn, secgrp, start_port, end_port, protocol )\n\n # Reload the security group so it contains all the latest rules\n secgrp = find_group( ec2_conn, base_name, secgrp_type )\n return ( secgrp_type, secgrp )", "def _generate_ec2_instance_and_sg(resource):\n for instance in resource.instances.all():\n for security_group in instance.security_groups:\n yield instance, security_group", "def launch_instance_nonvpc ( ec2_conn,\n ami,\n base_name,\n instance_type,\n keypair,\n security_group,\n machine_type = 'm1.small',\n user_data = None,\n wait_for_running = True ) :\n instance_r = ami.run( key_name = keypair,\n instance_type = machine_type,\n security_groups = [ security_group ],\n user_data = user_data )\n instance = instance_r.instances[ 0 ];\n aws_cmd( ec2_conn.create_tags,\n [ instance.id, { \"Name\": get_instance_name( base_name, instance_type ) } ] )\n if wait_for_running :\n running = wait_on_object_state( instance, 'running', failure_state = 'terminated' )\n if not running :\n print \"Deployment instance still not up after long period of time! Exiting...\"\n sys.exit( 3 )\n\n return instance", "def __deploy_instance_helper__(self, instance_name,\n network_name=None, network_cidr=None, is_network_new=True,\n keypair_name=None, is_keypair_new=True,\n sec_group_name=None, metadata=None):\n\n flavor_id = self.nova_operations.get_any_flavor_id()\n self.assertIsNotNone(flavor_id, \"Problems retrieving a flavor\")\n\n image_id = self.nova_operations.find_image_id_by_name(image_name=BASE_IMAGE_NAME)\n self.assertIsNotNone(image_id, \"Problems retrieving the image '{}'\".format(BASE_IMAGE_NAME))\n\n # instance prerequisites\n try:\n network_id_list = None\n if network_name:\n if is_network_new:\n # Create the given network\n cidr = network_cidr or TEST_CIDR_DEFAULT\n network = self.neutron_operations.create_network_and_subnet(network_name, cidr=cidr)\n self.test_world['networks'].append(network['id'])\n network_id_list = [{'net-id': network['id']}]\n else:\n # Look for the network id\n net_list = self.neutron_operations.find_networks(name=network_name)\n self.assertTrue(len(net_list) != 0, \"Required network '%s' could not be found\" % network_name)\n network_id_list = [{'net-id': net_list[0]['id']}]\n\n except NeutronClientException as e:\n self.logger.debug(\"Required network could not be created: %s\", e)\n self.fail(e)\n\n try:\n if keypair_name:\n if is_keypair_new:\n self.nova_operations.create_keypair(keypair_name)\n self.test_world['keypair_names'].append(keypair_name)\n else:\n keypair_found = self.nova_operations.find_keypair(name=keypair_name)\n self.assertIsNotNone(keypair_found, \"Required Keypair '%s' could not be found\" % keypair_name)\n except NovaClientException as e:\n self.logger.debug(\"Required keypair could not be created: %s\", e)\n self.fail(e)\n\n try:\n security_group_name_list = None\n if sec_group_name:\n sec_group_id = self.nova_operations.create_security_group_and_rules(sec_group_name)\n self.test_world['sec_groups'].append(sec_group_id)\n security_group_name_list = [sec_group_name]\n except NovaClientException as e:\n self.logger.debug(\"Required security group could not be created: %s\", e)\n self.fail(e)\n\n # create new instance\n try:\n server_data = self.nova_operations.launch_instance(instance_name=instance_name,\n flavor_id=flavor_id,\n image_id=image_id,\n metadata=metadata,\n keypair_name=keypair_name,\n security_group_name_list=security_group_name_list,\n network_id_list=network_id_list)\n except Forbidden as e:\n self.logger.debug(\"Quota exceeded when launching a new instance\")\n self.fail(e)\n except OverLimit as e:\n self.logger.debug(\"Not enough resources to launch new instance: %s\", e)\n self.fail(e)\n else:\n self.test_world['servers'].append(server_data['id'])\n\n # Wait for status=ACTIVE\n status, detail = self.nova_operations.wait_for_task_status(server_data['id'], 'ACTIVE')\n self.assertEqual(status, 'ACTIVE', \"{detail}. Current status is {status}\".format(detail=detail, status=status))\n\n return server_data['id']", "def create_instances(ec2_resource: ServiceResource,\n image_id: str,\n key_name: str,\n instance_type: str,\n num_instances: int = 1,\n security_group_ids: Optional[List] = None,\n user_data: Optional[Union[str, bytes]] = None,\n block_device_map: Optional[List[Dict]] = None,\n instance_profile_arn: Optional[str] = None,\n placement_az: Optional[str] = None,\n subnet_id: str = None,\n tags: Optional[Dict[str, str]] = None) -> List[dict]:\n logger.info('Creating %s instance(s) ... ', instance_type)\n\n if isinstance(user_data, str):\n user_data = user_data.encode('utf-8')\n\n request = {'ImageId': image_id,\n 'MinCount': num_instances,\n 'MaxCount': num_instances,\n 'KeyName': key_name,\n 'SecurityGroupIds': security_group_ids,\n 'InstanceType': instance_type,\n 'UserData': user_data,\n 'BlockDeviceMappings': block_device_map,\n 'SubnetId': subnet_id}\n\n if instance_profile_arn:\n # We could just retry when we get an error because the ARN doesn't\n # exist, but we might as well wait for it.\n wait_until_instance_profile_arn_exists(instance_profile_arn)\n\n # Add it to the request\n request['IamInstanceProfile'] = {'Arn': instance_profile_arn}\n\n if placement_az:\n request['Placement'] = {'AvailabilityZone': placement_az}\n\n if tags:\n # Tag everything when we make it.\n flat_tags = flatten_tags(tags)\n request['TagSpecifications'] = [{'ResourceType': 'instance', 'Tags': flat_tags},\n {'ResourceType': 'volume', 'Tags': flat_tags}]\n\n return ec2_resource.create_instances(**prune(request))", "def create_instance(module, ecs, zone_id, image_id, instance_type, group_id, io_optimized,\n vswitch_id, instance_name, description, internet_data, host_name, password,\n system_disk, volumes, count, allocate_public_ip, bind_eip, instance_tags,\n instance_charge_type, period, auto_renew, ids):\n\n changed = False\n try:\n # call to CreateInstance method in Footmark\n result = ecs.create_instance(zone_id=zone_id, image_id=image_id, instance_type=instance_type, group_id=group_id,\n io_optimized=io_optimized, vswitch_id=vswitch_id, instance_name=instance_name,\n description=description, internet_data=internet_data, host_name=host_name,\n password=password, system_disk=system_disk, volumes=volumes, count=count,\n allocate_public_ip=allocate_public_ip, bind_eip=bind_eip,\n instance_tags=instance_tags, instance_charge_type=instance_charge_type,\n period=period, auto_renew=auto_renew, ids=ids)\n\n if 'error' in (''.join(str(result))).lower():\n module.fail_json(msg=result)\n changed = True\n\n except ECSResponseError as e:\n module.fail_json(msg='Unable to create instance, error: {0}'.format(e))\n\n return changed, result", "def create_instance(name, config, region, secrets, key_name, instance_data,\n deploypass):\n conn = connect_to_region(\n region,\n aws_access_key_id=secrets['aws_access_key_id'],\n aws_secret_access_key=secrets['aws_secret_access_key']\n )\n vpc = VPCConnection(\n region=conn.region,\n aws_access_key_id=secrets['aws_access_key_id'],\n aws_secret_access_key=secrets['aws_secret_access_key'])\n\n # Make sure we don't request the same things twice\n token = str(uuid.uuid4())[:16]\n\n instance_data = instance_data.copy()\n instance_data['name'] = name\n instance_data['hostname'] = '{name}.{domain}'.format(\n name=name, domain=config['domain'])\n\n bdm = None\n if 'device_map' in config:\n bdm = BlockDeviceMapping()\n for device, device_info in config['device_map'].items():\n bdm[device] = BlockDeviceType(size=device_info['size'],\n delete_on_termination=True)\n\n ip_address = get_ip(instance_data['hostname'])\n subnet_id = None\n\n if ip_address:\n s_id = get_subnet_id(vpc, ip_address)\n if s_id in config['subnet_ids']:\n if ip_available(conn, ip_address):\n subnet_id = s_id\n else:\n log.warning(\"%s already assigned\" % ip_address)\n\n # TODO: fail if no IP assigned\n if not ip_address or not subnet_id:\n ip_address = None\n subnet_id = choice(config.get('subnet_ids'))\n\n while True:\n try:\n reservation = conn.run_instances(\n image_id=config['ami'],\n key_name=key_name,\n instance_type=config['instance_type'],\n block_device_map=bdm,\n client_token=token,\n subnet_id=subnet_id,\n private_ip_address=ip_address,\n disable_api_termination=bool(config.get('disable_api_termination')),\n security_group_ids=config.get('security_group_ids', []),\n )\n break\n except boto.exception.BotoServerError:\n log.exception(\"Cannot start an instance\")\n time.sleep(10)\n\n instance = reservation.instances[0]\n log.info(\"instance %s created, waiting to come up\", instance)\n # Wait for the instance to come up\n while True:\n try:\n instance.update()\n if instance.state == 'running':\n break\n except Exception:\n log.exception(\"hit error waiting for instance to come up\")\n time.sleep(10)\n\n instance.add_tag('Name', name)\n instance.add_tag('FQDN', instance_data['hostname'])\n instance.add_tag('created', time.strftime(\"%Y-%m-%d %H:%M:%S %Z\",\n time.gmtime()))\n instance.add_tag('moz-type', config['type'])\n\n log.info(\"assimilating %s\", instance)\n instance.add_tag('moz-state', 'pending')\n while True:\n try:\n assimilate(instance.private_ip_address, config, instance_data, deploypass)\n break\n except:\n log.exception(\"problem assimilating %s\", instance)\n time.sleep(10)\n instance.add_tag('moz-state', 'ready')", "def test_temp_create_sg_multinode(iam_client_stub, ec2_client_stub):\n\n # Generate a config of the desired form.\n subnet_id = DEFAULT_SUBNET[\"SubnetId\"]\n # head and worker stuff:\n head_and_worker_kludge = {\n \"head_node\": {\n \"SubnetIds\": [subnet_id]\n },\n \"worker_nodes\": {\n \"SubnetIds\": [subnet_id]\n }\n }\n # security group info to go in provider field\n provider_data = helpers.load_aws_example_config_file(\n \"example-security-group.yaml\")[\"provider\"]\n\n # a multi-node-type config -- will add head/worker stuff and security group\n # info to this.\n base_config = helpers.load_aws_example_config_file(\"example-full.yaml\")\n\n config = copy.deepcopy(base_config)\n # Add security group data\n config[\"provider\"] = provider_data\n # Add head and worker fields.\n config.update(head_and_worker_kludge)\n\n # Generate stubs\n stubs.configure_iam_role_default(iam_client_stub)\n stubs.configure_key_pair_default(ec2_client_stub)\n\n # Only one of these (the one specified in head_node / worker_nodes)\n # is in the correct vpc.\n # This list of subnets is generated by the ec2.subnets.all() call\n # and then ignored, since head_node and worker_nodes already specify\n # subnet_ids.\n stubs.describe_a_thousand_subnets_in_different_vpcs(ec2_client_stub)\n\n # The rest of the stubbing logic is copied from\n # test_create_sg_with_custom_inbound_rules_and_name.\n\n # expect to describe the head subnet ID\n stubs.describe_subnets_echo(ec2_client_stub, DEFAULT_SUBNET)\n # given no existing security groups within the VPC...\n stubs.describe_no_security_groups(ec2_client_stub)\n # expect to create a security group on the head node VPC\n stubs.create_sg_echo(ec2_client_stub, DEFAULT_SG_WITH_NAME)\n # expect new head security group details to be retrieved after creation\n stubs.describe_sgs_on_vpc(\n ec2_client_stub,\n [DEFAULT_SUBNET[\"VpcId\"]],\n [DEFAULT_SG_WITH_NAME],\n )\n\n # given custom existing default head security group inbound rules...\n # expect to authorize both default and custom inbound rules\n stubs.authorize_sg_ingress(\n ec2_client_stub,\n DEFAULT_SG_WITH_NAME_AND_RULES,\n )\n\n # given the prior modification to the head security group...\n # expect the next read of a head security group property to reload it\n stubs.describe_sg_echo(ec2_client_stub, DEFAULT_SG_WITH_NAME_AND_RULES)\n\n _get_vpc_id_or_die.cache_clear()\n\n # given our mocks and the config as input...\n # expect the config to be validated and bootstrapped successfully\n bootstrapped_config = helpers.bootstrap_aws_config(config)\n\n # expect the bootstrapped config to have the custom security group...\n # name and in bound rules\n assert bootstrapped_config[\"provider\"][\"security_group\"][\n \"GroupName\"] == DEFAULT_SG_WITH_NAME_AND_RULES[\"GroupName\"]\n assert config[\"provider\"][\"security_group\"][\n \"IpPermissions\"] == CUSTOM_IN_BOUND_RULES\n\n # Confirming boostrap config does not currently touch available node types.\n assert bootstrapped_config[\"available_node_types\"] == config[\n \"available_node_types\"]\n\n # Confirming head and worker subnet_ids are untouched\n assert bootstrapped_config[\"head_node\"][\"SubnetIds\"] ==\\\n config[\"head_node\"][\"SubnetIds\"] ==\\\n config[\"worker_nodes\"][\"SubnetIds\"] ==\\\n bootstrapped_config[\"worker_nodes\"][\"SubnetIds\"] ==\\\n [DEFAULT_SUBNET[\"SubnetId\"]]\n\n # Confirming correct security group got filled for head and workers\n sg_id = DEFAULT_SG[\"GroupId\"]\n assert bootstrapped_config[\"head_node\"][\"SecurityGroupIds\"] == [sg_id]\n assert bootstrapped_config[\"worker_nodes\"][\"SecurityGroupIds\"] == [sg_id]\n\n # Confirm security group is in the right VPC.\n # (Doesn't really confirm anything except for the structure of this test\n # data.)\n assert DEFAULT_SG[\"VpcId\"] == DEFAULT_SUBNET[\"VpcId\"]\n assert DEFAULT_SUBNET[\"SubnetId\"] ==\\\n bootstrapped_config[\"head_node\"][\"SubnetIds\"][0]\n\n # expect no pending responses left in IAM or EC2 client stub queues\n iam_client_stub.assert_no_pending_responses()\n ec2_client_stub.assert_no_pending_responses()", "def create(self, body: CloudSecurityGroup) -> Dict:\n\t\treturn self._post(route=AWSSecurityGroupConsts.CLOUD_SECURITY_GROUP.value, body=body)", "def create_keypair(address_type, addresses_path, address_prefix, name):\n vkey_file = get_vkey_file(addresses_path, address_prefix, name)\n skey_file = get_skey_file(addresses_path, address_prefix, name)\n\n if(path.exists(vkey_file)) :\n print(address_prefix, \"key pair already exists for\", name)\n return\n \n makedirs(path.dirname(vkey_file), mode=0o777, exist_ok=True)\n\n run_params = ['cardano-cli', address_type, 'key-gen', '--verification-key-file', vkey_file, '--signing-key-file', skey_file]\n subprocess_run(run_params, capture_output=False, text=True)\n return", "def launch_instance(ec2, ami, itype, kp_name, sec_group_name):\n\n\n instance = ec2.run_instances(\n ami,\n key_name=kp_name,\n instance_type=itype,\n security_groups=[sec_group_name]\n ).instances[0]\n\n while instance.state != 'running':\n sys.stdout.write('Waiting for instance: {}, at DNS: {} to start\\n'.format(instance.id,\n str(instance.dns_name).split('.')[0]))\n time.sleep(5)\n instance.update()\n\n sys.stdout.write('\\nSuccess! EC2 Instance Launched \\nInstance_Type: {} in {}'.format(instance.instance_type,\n instance.placement))\n return instance", "def add_extra_args(self):\n super(AwsCreateInstancesMethod, self).add_extra_args()\n self.parser.add_argument(\"--key_pair_name\", default=os.environ.get(\"YB_EC2_KEY_PAIR_NAME\"),\n help=\"AWS Key Pair name\")\n self.parser.add_argument(\"--security_group_id\", default=None,\n help=\"AWS comma delimited security group IDs.\")\n self.parser.add_argument(\"--volume_type\", choices=[\"gp3\", \"gp2\", \"io1\"], default=\"gp2\",\n help=\"Volume type for volumes on EBS-backed instances.\")\n self.parser.add_argument(\"--spot_price\", default=None,\n help=\"Spot price for each instance (if desired)\")\n self.parser.add_argument(\"--cmk_res_name\", help=\"CMK arn to enable encrypted EBS volumes.\")\n self.parser.add_argument(\"--iam_profile_arn\", help=\"ARN string for IAM instance profile\")\n self.parser.add_argument(\"--disk_iops\", type=int, default=1000,\n help=\"desired iops for aws v4 instance volumes\")\n self.parser.add_argument(\"--disk_throughput\", type=int, default=125,\n help=\"desired throughput for aws gp3 instance volumes\")", "async def buildInstance(self, instanceDef, env):\n # Create tag specifications which we use to pass variables to the instance\n tags = self.env_to_tags(env)\n \n import EC2Spawner as ec2spawnerModule\n bootstrapPath = os.path.dirname(ec2spawnerModule.__file__) + '/data/bootstrap.sh'\n\n with open(bootstrapPath, 'r') as myfile:\n UserData = myfile.read()\n \n instance = self.ec2.create_instances(\n ImageId=instanceDef['AWS_AMI_ID'],\n KeyName=instanceDef['AWS_KEYNAME'], \n InstanceType='t2.medium',\n MinCount=1, MaxCount=1,\n DryRun=instanceDef['DryRun'],\n SubnetId=instanceDef['AWS_SUBNET'], \n SecurityGroupIds=[instanceDef['AWS_SECURITY_GROUP']],\n TagSpecifications=tags,\n IamInstanceProfile={'Arn': instanceDef['AWS_IAM_ARN']},\n UserData=UserData\n )\n\n # InstanceType='t1.micro'|'t2.nano'|'t2.micro'|'t2.small'|'t2.medium'|'t2.large'|'t2.xlarge'|'t2.2xlarge'|'t3.nano'|'t3.micro'|'t3.small'|'t3.medium'|'t3.large'|'t3.xlarge'|'t3.2xlarge'|'m1.small'|'m1.medium'|'m1.large'|'m1.xlarge'|'m3.medium'|'m3.large'|'m3.xlarge'|'m3.2xlarge'|'m4.large'|'m4.xlarge'|'m4.2xlarge'|'m4.4xlarge'|'m4.10xlarge'|'m4.16xlarge'|'m2.xlarge'|'m2.2xlarge'|'m2.4xlarge'|'cr1.8xlarge'|'r3.large'|'r3.xlarge'|'r3.2xlarge'|'r3.4xlarge'|'r3.8xlarge'|'r4.large'|'r4.xlarge'|'r4.2xlarge'|'r4.4xlarge'|'r4.8xlarge'|'r4.16xlarge'|'r5.large'|'r5.xlarge'|'r5.2xlarge'|'r5.4xlarge'|'r5.8xlarge'|'r5.12xlarge'|'r5.16xlarge'|'r5.24xlarge'|'r5.metal'|'r5a.large'|'r5a.xlarge'|'r5a.2xlarge'|'r5a.4xlarge'|'r5a.12xlarge'|'r5a.24xlarge'|'r5d.large'|'r5d.xlarge'|'r5d.2xlarge'|'r5d.4xlarge'|'r5d.8xlarge'|'r5d.12xlarge'|'r5d.16xlarge'|'r5d.24xlarge'|'r5d.metal'|'x1.16xlarge'|'x1.32xlarge'|'x1e.xlarge'|'x1e.2xlarge'|'x1e.4xlarge'|'x1e.8xlarge'|'x1e.16xlarge'|'x1e.32xlarge'|'i2.xlarge'|'i2.2xlarge'|'i2.4xlarge'|'i2.8xlarge'|'i3.large'|'i3.xlarge'|'i3.2xlarge'|'i3.4xlarge'|'i3.8xlarge'|'i3.16xlarge'|'i3.metal'|'hi1.4xlarge'|'hs1.8xlarge'|'c1.medium'|'c1.xlarge'|'c3.large'|'c3.xlarge'|'c3.2xlarge'|'c3.4xlarge'|'c3.8xlarge'|'c4.large'|'c4.xlarge'|'c4.2xlarge'|'c4.4xlarge'|'c4.8xlarge'|'c5.large'|'c5.xlarge'|'c5.2xlarge'|'c5.4xlarge'|'c5.9xlarge'|'c5.18xlarge'|'c5d.large'|'c5d.xlarge'|'c5d.2xlarge'|'c5d.4xlarge'|'c5d.9xlarge'|'c5d.18xlarge'|'cc1.4xlarge'|'cc2.8xlarge'|'g2.2xlarge'|'g2.8xlarge'|'g3.4xlarge'|'g3.8xlarge'|'g3.16xlarge'|'g3s.xlarge'|'cg1.4xlarge'|'p2.xlarge'|'p2.8xlarge'|'p2.16xlarge'|'p3.2xlarge'|'p3.8xlarge'|'p3.16xlarge'|'d2.xlarge'|'d2.2xlarge'|'d2.4xlarge'|'d2.8xlarge'|'f1.2xlarge'|'f1.4xlarge'|'f1.16xlarge'|'m5.large'|'m5.xlarge'|'m5.2xlarge'|'m5.4xlarge'|'m5.12xlarge'|'m5.24xlarge'|'m5a.large'|'m5a.xlarge'|'m5a.2xlarge'|'m5a.4xlarge'|'m5a.12xlarge'|'m5a.24xlarge'|'m5d.large'|'m5d.xlarge'|'m5d.2xlarge'|'m5d.4xlarge'|'m5d.12xlarge'|'m5d.24xlarge'|'h1.2xlarge'|'h1.4xlarge'|'h1.8xlarge'|'h1.16xlarge'|'z1d.large'|'z1d.xlarge'|'z1d.2xlarge'|'z1d.3xlarge'|'z1d.6xlarge'|'z1d.12xlarge'|'u-6tb1.metal'|'u-9tb1.metal'|'u-12tb1.metal'\n \n self.log.debug(\"AWS Instance ID: {}\".format(instance[0].id))\n waiter = self.client.get_waiter('instance_running')\n \n self.log.debug('Waiting...')\n await waiter.wait(InstanceIds=[instance[0].id])\n description = self.client.describe_instances(InstanceIds=[instance[0].id])\n instanceIP = description['Reservations'][0]['Instances'][0]['NetworkInterfaces'][0]['Association']['PublicIp']\n\n self.ec2_instance_ip = instanceIP\n self.log.debug(\"AWS Instance IP: {}\".format(self.ec2_instance_ip))\n self.ec2_instance_id = instance[0].id\n return instanceIP", "def create_security_group(group_name):\n ec2 = boto.ec2.connect_to_region(AWS_REGION)\n for g in ec2.get_all_security_groups():\n if g.name == group_name:\n return # We already have this group setup\n group = ec2.create_security_group(group_name,\n \"%s SSH access group\" % group_name)\n group.authorize(\"tcp\", 22, 22, \"0.0.0.0/0\") # SSH is on port 22, all IPs\n group.authorize(\"tcp\", 80, 80, \"0.0.0.0/0\")\n group.authorize(\"tcp\", 61000, 65000, \"0.0.0.0/0\")\n print \"Created new security group\"", "def test_deploy_instance_with_new_network_and_keypair(self):\n suffix = datetime.utcnow().strftime('%Y%m%d%H%M%S')\n instance_name = TEST_SERVER_PREFIX + \"_network_keypair_\" + suffix\n keypair_name = TEST_KEYPAIR_PREFIX + \"_\" + suffix\n network_name = TEST_NETWORK_PREFIX + \"_\" + suffix\n network_cidr = TEST_CIDR_PATTERN % 250\n self.__deploy_instance_helper__(instance_name=instance_name,\n network_name=network_name,\n network_cidr=network_cidr,\n keypair_name=keypair_name)", "def create_network(region_name, vpc_cidr, tag_prefix,\n tls_priv_key=None, tls_fullchain_cert=None,\n ssh_key_name=None, ssh_key_content=None, sally_ip=None,\n s3_logs_bucket=None, s3_identities_bucket=None,\n storage_enckey=None,\n dry_run=False):\n sg_tag_prefix = tag_prefix\n\n LOGGER.info(\"Provisions network ...\")\n ec2_client = boto3.client('ec2', region_name=region_name)\n\n # Create a VPC\n vpc_id, vpc_cidr_read = _get_vpc_id(tag_prefix, ec2_client=ec2_client,\n region_name=region_name)\n if vpc_id:\n if vpc_cidr != vpc_cidr_read:\n raise RuntimeError(\n \"%s cidr block for VPC is %s while it was expected to be %s\" %\n (tag_prefix, vpc_cidr_read, vpc_cidr))\n else:\n if not vpc_cidr:\n raise RuntimeError(\n \"%s could not find VPC and no cidr block is specified\"\\\n \" to create one.\" % tag_prefix)\n resp = ec2_client.create_vpc(\n DryRun=dry_run,\n CidrBlock=vpc_cidr,\n AmazonProvidedIpv6CidrBlock=False,\n InstanceTenancy='default')\n vpc_id = resp['Vpc']['VpcId']\n ec2_client.create_tags(\n DryRun=dry_run,\n Resources=[vpc_id],\n Tags=[\n {'Key': \"Prefix\", 'Value': tag_prefix},\n {'Key': \"Name\", 'Value': \"%s-vpc\" % tag_prefix}])\n LOGGER.info(\"%s created VPC %s\", tag_prefix, vpc_id)\n\n # Create subnets for app, dbs and web services\n # ELB will require that there is at least one subnet per availability zones.\n # RDS will require that there is at least two subnets for databases.\n resp = ec2_client.describe_availability_zones()\n zones = {(zone['ZoneId'], zone['ZoneName'])\n for zone in resp['AvailabilityZones']}\n web_subnet_cidrs, dbs_subnet_cidrs, app_subnet_cidrs = _split_cidrs(\n vpc_cidr, zones=zones, region_name=region_name)\n\n LOGGER.info(\"%s provisioning web subnets...\", tag_prefix)\n web_zones = set([])\n web_subnet_by_cidrs = _get_subnet_by_cidrs(\n web_subnet_cidrs, tag_prefix, vpc_id=vpc_id, ec2_client=ec2_client)\n for cidr_block, subnet in web_subnet_by_cidrs.items():\n if subnet:\n web_zones |= {\n (subnet['AvailabilityZoneId'], subnet['AvailabilityZone'])}\n for cidr_block, subnet in web_subnet_by_cidrs.items():\n if not subnet:\n available_zones = zones - web_zones\n zone_id, zone_name = available_zones.pop()\n try:\n resp = ec2_client.create_subnet(\n AvailabilityZoneId=zone_id,\n CidrBlock=cidr_block,\n VpcId=vpc_id,\n TagSpecifications=[{\n 'ResourceType': 'subnet',\n 'Tags': [\n {'Key': \"Prefix\", 'Value': tag_prefix},\n {'Key': \"Name\",\n 'Value': \"%s %s web\" % (tag_prefix, zone_name)}]}],\n DryRun=dry_run)\n subnet = resp['Subnet']\n web_subnet_by_cidrs[cidr_block] = subnet\n web_zones |= set([(zone_id, zone_name)])\n subnet_id = subnet['SubnetId']\n LOGGER.info(\"%s created subnet %s in zone %s for cidr %s\",\n tag_prefix, subnet_id, zone_name, cidr_block)\n except botocore.exceptions.ClientError as err:\n if not err.response.get('Error', {}).get(\n 'Code', 'Unknown') == 'InvalidSubnet.Conflict':\n raise\n # We have a conflict, let's just skip over it.\n LOGGER.warning(\n \"%s (skip) created subnet in zone %s because '%s'\",\n tag_prefix, zone_name, err)\n if subnet and not subnet['MapPublicIpOnLaunch']:\n subnet_id = subnet['SubnetId']\n if not dry_run:\n resp = ec2_client.modify_subnet_attribute(\n SubnetId=subnet_id,\n MapPublicIpOnLaunch={'Value': True})\n LOGGER.info(\"%s modify web subnet %s so instance can receive\"\\\n \" a public IP by default\", tag_prefix, subnet_id)\n\n LOGGER.info(\"%s provisioning dbs subnets...\", tag_prefix)\n dbs_zones = set([])\n dbs_subnet_by_cidrs = _get_subnet_by_cidrs(\n dbs_subnet_cidrs, tag_prefix, vpc_id=vpc_id, ec2_client=ec2_client)\n for cidr_block, subnet in dbs_subnet_by_cidrs.items():\n if subnet:\n dbs_zones |= {\n (subnet['AvailabilityZoneId'], subnet['AvailabilityZone'])}\n for cidr_block, subnet in dbs_subnet_by_cidrs.items():\n if not subnet:\n available_zones = zones - dbs_zones\n zone_id, zone_name = available_zones.pop()\n resp = ec2_client.create_subnet(\n AvailabilityZoneId=zone_id,\n CidrBlock=cidr_block,\n VpcId=vpc_id,\n TagSpecifications=[{\n 'ResourceType': 'subnet',\n 'Tags': [\n {'Key': \"Prefix\", 'Value': tag_prefix},\n {'Key': \"Name\",\n 'Value': \"%s %s dbs\" % (tag_prefix, zone_name)}]}],\n DryRun=dry_run)\n subnet = resp['Subnet']\n dbs_subnet_by_cidrs[cidr_block] = subnet\n dbs_zones |= set([(zone_id, zone_name)])\n subnet_id = subnet['SubnetId']\n LOGGER.info(\"%s created subnet %s in zone %s for cidr %s\",\n tag_prefix, subnet_id, zone_name, cidr_block)\n if subnet['MapPublicIpOnLaunch']:\n subnet_id = subnet['SubnetId']\n if not dry_run:\n resp = ec2_client.modify_subnet_attribute(\n SubnetId=subnet_id,\n MapPublicIpOnLaunch={'Value': False})\n LOGGER.info(\"%s modify dbs subnet %s so instance do not receive\"\\\n \" a public IP by default\", tag_prefix, subnet_id)\n\n LOGGER.info(\"%s provisioning apps subnets...\", tag_prefix)\n app_zones = set([])\n app_subnet_by_cidrs = _get_subnet_by_cidrs(\n app_subnet_cidrs, tag_prefix, vpc_id=vpc_id, ec2_client=ec2_client)\n for cidr_block, subnet in app_subnet_by_cidrs.items():\n if subnet:\n app_zones |= {\n (subnet['AvailabilityZoneId'], subnet['AvailabilityZone'])}\n for cidr_block, subnet in app_subnet_by_cidrs.items():\n if not subnet:\n available_zones = zones - app_zones\n zone_id, zone_name = available_zones.pop()\n resp = ec2_client.create_subnet(\n AvailabilityZoneId=zone_id,\n CidrBlock=cidr_block,\n VpcId=vpc_id,\n # COMMIT MSG:\n # this requires boto3>=1.14, using `createTag` might fail\n # because the subnet is not fully created yet.\n TagSpecifications=[{\n 'ResourceType': 'subnet',\n 'Tags': [\n {'Key': \"Prefix\", 'Value': tag_prefix},\n {'Key': \"Name\",\n 'Value': \"%s %s app\" % (tag_prefix, zone_name)}]}],\n DryRun=dry_run)\n subnet = resp['Subnet']\n app_subnet_by_cidrs[cidr_block] = subnet\n app_zones |= set([(zone_id, zone_name)])\n subnet_id = subnet['SubnetId']\n LOGGER.info(\"%s created subnet %s in %s for cidr %s\",\n tag_prefix, subnet_id, zone_name, cidr_block)\n if subnet['MapPublicIpOnLaunch']:\n subnet_id = subnet['SubnetId']\n if not dry_run:\n resp = ec2_client.modify_subnet_attribute(\n SubnetId=subnet_id,\n MapPublicIpOnLaunch={'Value': False})\n LOGGER.info(\"%s modify app subnet %s so instance do not receive\"\\\n \" a public IP by default\", tag_prefix, subnet_id)\n\n # Ensure that the VPC has an Internet Gateway.\n resp = ec2_client.describe_internet_gateways(\n Filters=[{'Name': 'attachment.vpc-id', 'Values': [vpc_id]}])\n if resp['InternetGateways']:\n igw_id = resp['InternetGateways'][0]['InternetGatewayId']\n LOGGER.info(\"%s found Internet Gateway %s\", tag_prefix, igw_id)\n else:\n resp = ec2_client.describe_internet_gateways(\n Filters=[{'Name': 'tag:Prefix', 'Values': [tag_prefix]}])\n if resp['InternetGateways']:\n igw_id = resp['InternetGateways'][0]['InternetGatewayId']\n LOGGER.info(\"%s found Internet Gateway %s\", tag_prefix, igw_id)\n else:\n resp = ec2_client.create_internet_gateway(DryRun=dry_run)\n igw_id = resp['InternetGateway']['InternetGatewayId']\n ec2_client.create_tags(\n DryRun=dry_run,\n Resources=[igw_id],\n Tags=[{'Key': \"Prefix\", 'Value': tag_prefix},\n {'Key': \"Name\",\n 'Value': \"%s internet gateway\" % tag_prefix}])\n LOGGER.info(\"%s created Internet Gateway %s\", tag_prefix, igw_id)\n resp = ec2_client.attach_internet_gateway(\n DryRun=dry_run,\n InternetGatewayId=igw_id,\n VpcId=vpc_id)\n\n # Create the NAT gateway by which private subnets connect to Internet\n # XXX Why do we have a Network interface eni-****?\n nat_elastic_ip = None\n web_elastic_ip = None\n resp = ec2_client.describe_addresses(\n Filters=[{'Name': 'tag:Prefix', 'Values': [tag_prefix]}])\n if resp['Addresses']:\n for resp_address in resp['Addresses']:\n for resp_tag in resp_address['Tags']:\n if resp_tag['Key'] == 'Name':\n if 'NAT gateway' in resp_tag['Value']:\n nat_elastic_ip = resp_address['AllocationId']\n break\n if 'Sally' in resp_tag['Value']:\n web_elastic_ip = resp_address['AllocationId']\n break\n\n if nat_elastic_ip:\n LOGGER.info(\"%s found NAT gateway public IP %s\",\n tag_prefix, nat_elastic_ip)\n else:\n resp = ec2_client.allocate_address(\n DryRun=dry_run,\n Domain='vpc')\n nat_elastic_ip = resp['AllocationId']\n ec2_client.create_tags(\n DryRun=dry_run,\n Resources=[nat_elastic_ip],\n Tags=[{'Key': \"Prefix\", 'Value': tag_prefix},\n {'Key': \"Name\",\n 'Value': \"%s NAT gateway public IP\" % tag_prefix}])\n LOGGER.info(\"%s created NAT gateway public IP %s\",\n tag_prefix, nat_elastic_ip)\n if web_elastic_ip:\n LOGGER.info(\"%s found Sally public IP %s\",\n tag_prefix, web_elastic_ip)\n else:\n resp = ec2_client.allocate_address(\n DryRun=dry_run,\n Domain='vpc')\n web_elastic_ip = resp['AllocationId']\n ec2_client.create_tags(\n DryRun=dry_run,\n Resources=[web_elastic_ip],\n Tags=[{'Key': \"Prefix\", 'Value': tag_prefix},\n {'Key': \"Name\",\n 'Value': \"%s Sally public IP\" % tag_prefix}])\n LOGGER.info(\"%s created Sally public IP %s\",\n tag_prefix, web_elastic_ip)\n\n # We have 2 EIP addresses. They need to be connected to machines\n # running in an Internet facing subnet.\n client_token = tag_prefix\n # XXX shouldn't it be the first web subnet instead?\n resp = ec2_client.describe_nat_gateways(Filters=[\n {'Name': \"vpc-id\", 'Values': [vpc_id]},\n {'Name': \"state\", 'Values': ['pending', 'available']}])\n if resp['NatGateways']:\n if len(resp['NatGateways']) > 1:\n LOGGER.warning(\"%s found more than one NAT gateway.\"\\\n \" Using first one in the list.\", tag_prefix)\n nat_gateway = resp['NatGateways'][0]\n nat_gateway_id = nat_gateway['NatGatewayId']\n nat_gateway_subnet_id = nat_gateway['SubnetId']\n LOGGER.info(\"%s found NAT gateway %s\", tag_prefix, nat_gateway_id)\n else:\n nat_gateway_subnet_id = next(web_subnet_by_cidrs.values())['SubnetId']\n resp = ec2_client.create_nat_gateway(\n AllocationId=nat_elastic_ip,\n ClientToken=client_token,\n SubnetId=nat_gateway_subnet_id)\n nat_gateway_id = resp['NatGateway']['NatGatewayId']\n ec2_client.create_tags(\n DryRun=dry_run,\n Resources=[nat_gateway_id],\n Tags=[{'Key': \"Prefix\", 'Value': tag_prefix},\n {'Key': \"Name\",\n 'Value': \"%s NAT gateway\" % tag_prefix}])\n LOGGER.info(\"%s created NAT gateway %s\",\n tag_prefix, nat_gateway_id)\n\n # Set up public and NAT-protected route tables\n resp = ec2_client.describe_route_tables(\n Filters=[{'Name': \"vpc-id\", 'Values': [vpc_id]}])\n public_route_table_id = None\n private_route_table_id = None\n for route_table in resp['RouteTables']:\n for route in route_table['Routes']:\n if 'GatewayId' in route and route['GatewayId'] == igw_id:\n public_route_table_id = route_table['RouteTableId']\n LOGGER.info(\"%s found public route table %s\",\n tag_prefix, public_route_table_id)\n break\n if ('NatGatewayId' in route and\n route['NatGatewayId'] == nat_gateway_id):\n private_route_table_id = route_table['RouteTableId']\n LOGGER.info(\"%s found private route table %s\",\n tag_prefix, private_route_table_id)\n\n if not public_route_table_id:\n resp = ec2_client.create_route_table(\n DryRun=dry_run,\n VpcId=vpc_id)\n public_route_table_id = resp['RouteTable']['RouteTableId']\n ec2_client.create_tags(\n DryRun=dry_run,\n Resources=[public_route_table_id],\n Tags=[\n {'Key': \"Prefix\", 'Value': tag_prefix},\n {'Key': \"Name\", 'Value': \"%s public\" % tag_prefix}])\n LOGGER.info(\"%s created public subnet route table %s\",\n tag_prefix, public_route_table_id)\n resp = ec2_client.create_route(\n DryRun=dry_run,\n DestinationCidrBlock='0.0.0.0/0',\n GatewayId=igw_id,\n RouteTableId=public_route_table_id)\n\n if not private_route_table_id:\n resp = ec2_client.create_route_table(\n DryRun=dry_run,\n VpcId=vpc_id)\n private_route_table_id = resp['RouteTable']['RouteTableId']\n ec2_client.create_tags(\n DryRun=dry_run,\n Resources=[private_route_table_id],\n Tags=[\n {'Key': \"Prefix\", 'Value': tag_prefix},\n {'Key': \"Name\", 'Value': \"%s internal\" % tag_prefix}])\n private_route_table_id = resp['RouteTable']['RouteTableId']\n LOGGER.info(\"%s created private route table %s\",\n tag_prefix, private_route_table_id)\n for _ in range(0, NB_RETRIES):\n # The NAT Gateway takes some time to be fully operational.\n try:\n resp = ec2_client.create_route(\n DryRun=dry_run,\n DestinationCidrBlock='0.0.0.0/0',\n NatGatewayId=nat_gateway_id,\n RouteTableId=private_route_table_id)\n except botocore.exceptions.ClientError as err:\n if not err.response.get('Error', {}).get(\n 'Code', 'Unknown') == 'InvalidNatGatewayID.NotFound':\n raise\n time.sleep(RETRY_WAIT_DELAY)\n\n resp = ec2_client.describe_route_tables(\n DryRun=dry_run,\n RouteTableIds=[public_route_table_id])\n assocs = resp['RouteTables'][0]['Associations']\n if len(assocs) > 1:\n LOGGER.warning(\"%s found more than one route table association for\"\\\n \" public route table. Using first one in the list.\", tag_prefix)\n if not assocs[0]['Main']:\n LOGGER.warning(\"%s public route table is not the main one for the VPC.\",\n tag_prefix)\n\n for cidr_block, subnet in web_subnet_by_cidrs.items():\n if not subnet:\n # Maybe there was a conflict and we skipped this cidr_block.\n continue\n subnet_id = subnet['SubnetId']\n resp = ec2_client.describe_route_tables(\n DryRun=dry_run,\n Filters=[{\n 'Name': 'association.subnet-id',\n 'Values': [subnet_id]\n }])\n # The Main route table does not show as an explicit association.\n found_association = not bool(resp['RouteTables'])\n if found_association:\n LOGGER.info(\n \"%s found public route table %s associated to web subnet %s\",\n tag_prefix, public_route_table_id, subnet_id)\n else:\n resp = ec2_client.associate_route_table(\n DryRun=dry_run,\n RouteTableId=public_route_table_id,\n SubnetId=subnet_id)\n LOGGER.info(\n \"%s associate public route table %s to web subnet %s\",\n tag_prefix, public_route_table_id, subnet_id)\n\n for cidr_block, subnet in dbs_subnet_by_cidrs.items():\n subnet_id = subnet['SubnetId']\n resp = ec2_client.describe_route_tables(\n DryRun=dry_run,\n Filters=[{\n 'Name': 'association.subnet-id',\n 'Values': [subnet_id]\n }])\n # The Main route table does not show as an explicit association.\n found_association = False\n if resp['RouteTables']:\n found_association = (\n resp['RouteTables'][0]['Associations'][0]['RouteTableId'] ==\n private_route_table_id\n )\n if found_association:\n LOGGER.info(\n \"%s found private route table %s associated to dbs subnet %s\",\n tag_prefix, private_route_table_id, subnet_id)\n else:\n resp = ec2_client.associate_route_table(\n DryRun=dry_run,\n RouteTableId=private_route_table_id,\n SubnetId=subnet_id)\n LOGGER.info(\n \"%s associate private route table %s to dbs subnet %s\",\n tag_prefix, private_route_table_id, subnet_id)\n\n for cidr_block, subnet in app_subnet_by_cidrs.items():\n subnet_id = subnet['SubnetId']\n resp = ec2_client.describe_route_tables(\n DryRun=dry_run,\n Filters=[{\n 'Name': 'association.subnet-id',\n 'Values': [subnet_id]\n }])\n # The Main route table does not show as an explicit association.\n found_association = False\n if resp['RouteTables']:\n found_association = (\n resp['RouteTables'][0]['Associations'][0]['RouteTableId'] ==\n private_route_table_id\n )\n if found_association:\n LOGGER.info(\n \"%s found private route table %s associated to app subnet %s\",\n tag_prefix, private_route_table_id, subnet_id)\n else:\n resp = ec2_client.associate_route_table(\n DryRun=dry_run,\n RouteTableId=private_route_table_id,\n SubnetId=subnet_id)\n LOGGER.info(\n \"%s associate private route table %s to app subnet %s\",\n tag_prefix, private_route_table_id, subnet_id)\n\n # Create the ELB, proxies and databases security groups\n # The app security group (as the instance role) will be specific\n # to the application.\n #pylint:disable=unbalanced-tuple-unpacking\n moat_name, vault_name, gate_name, kitchen_door_name = \\\n _get_security_group_names([\n 'moat', 'vault', 'castle-gate', 'kitchen-door'],\n tag_prefix=sg_tag_prefix)\n moat_sg_id, vault_sg_id, gate_sg_id, kitchen_door_sg_id = \\\n _get_security_group_ids(\n [moat_name, vault_name, gate_name, kitchen_door_name],\n tag_prefix, vpc_id=vpc_id, ec2_client=ec2_client)\n\n update_moat_rules = (not moat_sg_id)\n update_gate_rules = (not gate_sg_id)\n update_vault_rules = (not vault_sg_id)\n update_kitchen_door_rules = (not kitchen_door_sg_id)\n\n if not moat_sg_id:\n resp = ec2_client.create_security_group(\n Description='%s ELB' % tag_prefix,\n GroupName=moat_name,\n VpcId=vpc_id,\n DryRun=dry_run)\n moat_sg_id = resp['GroupId']\n LOGGER.info(\"%s created %s security group %s\",\n tag_prefix, moat_name, moat_sg_id)\n if not gate_sg_id:\n resp = ec2_client.create_security_group(\n Description='%s session managers' % tag_prefix,\n GroupName=gate_name,\n VpcId=vpc_id,\n DryRun=dry_run)\n gate_sg_id = resp['GroupId']\n LOGGER.info(\"%s created %s security group %s\",\n tag_prefix, gate_name, gate_sg_id)\n if not vault_sg_id:\n resp = ec2_client.create_security_group(\n Description='%s databases' % tag_prefix,\n GroupName=vault_name,\n VpcId=vpc_id,\n DryRun=dry_run)\n vault_sg_id = resp['GroupId']\n LOGGER.info(\"%s created %s security group %s\",\n tag_prefix, vault_name, vault_sg_id)\n # kitchen_door_sg_id: Kitchen door security group is created later on\n # if we have ssh keys.\n\n resp = ec2_client.describe_security_groups(\n DryRun=dry_run,\n GroupIds=[moat_sg_id, vault_sg_id, gate_sg_id])\n for security_group in resp['SecurityGroups']:\n if security_group['GroupId'] == moat_sg_id:\n # moat rules\n LOGGER.info(\"%s check ingress rules for %s\", tag_prefix, moat_name)\n check_security_group_ingress(security_group, expected_rules=[\n {'port': 80, 'source': '0.0.0.0/0'},\n {'port': 80, 'source': '::/0'},\n {'port': 443, 'source': '0.0.0.0/0'},\n {'port': 443, 'source': '::/0'},\n ],\n tag_prefix=tag_prefix)\n elif security_group['GroupId'] == gate_sg_id:\n # castle-gate rules\n LOGGER.info(\"%s check ingress rules for %s\", tag_prefix, gate_name)\n check_security_group_ingress(security_group, expected_rules=[\n {'port': 80, 'source': moat_sg_id},\n {'port': 443, 'source': moat_sg_id}\n ],\n tag_prefix=tag_prefix)\n elif security_group['GroupId'] == vault_sg_id:\n # vault rules\n LOGGER.info(\"%s check ingress rules for %s\", tag_prefix, vault_name)\n check_security_group_ingress(security_group, expected_rules=[\n {'port': 5432, 'source': gate_sg_id}\n ],\n tag_prefix=tag_prefix)\n\n # moat allow rules\n if update_moat_rules:\n try:\n resp = ec2_client.authorize_security_group_ingress(\n DryRun=dry_run,\n GroupId=moat_sg_id,\n IpPermissions=[{\n 'FromPort': 80,\n 'IpProtocol': 'tcp',\n 'IpRanges': [{\n 'CidrIp': '0.0.0.0/0'\n }],\n 'ToPort': 80\n }, {\n 'FromPort': 80,\n 'IpProtocol': 'tcp',\n 'Ipv6Ranges': [{\n 'CidrIpv6': '::/0',\n }],\n 'ToPort': 80\n }, {\n 'FromPort': 443,\n 'IpProtocol': 'tcp',\n 'IpRanges': [{\n 'CidrIp': '0.0.0.0/0'\n }],\n 'ToPort': 443\n }, {\n 'FromPort': 443,\n 'IpProtocol': 'tcp',\n 'Ipv6Ranges': [{\n 'CidrIpv6': '::/0',\n }],\n 'ToPort': 443\n }])\n except botocore.exceptions.ClientError as err:\n if not err.response.get('Error', {}).get(\n 'Code', 'Unknown') == 'InvalidPermission.Duplicate':\n raise\n\n if update_gate_rules:\n # castle-gate allow rules\n try:\n resp = ec2_client.authorize_security_group_ingress(\n DryRun=dry_run,\n GroupId=gate_sg_id,\n IpPermissions=[{\n 'IpProtocol': 'tcp',\n 'FromPort': 80,\n 'ToPort': 80,\n 'UserIdGroupPairs': [{'GroupId': moat_sg_id}]\n }])\n except botocore.exceptions.ClientError as err:\n if not err.response.get('Error', {}).get(\n 'Code', 'Unknown') == 'InvalidPermission.Duplicate':\n raise\n try:\n resp = ec2_client.authorize_security_group_ingress(\n DryRun=dry_run,\n GroupId=gate_sg_id,\n IpPermissions=[{\n 'IpProtocol': 'tcp',\n 'FromPort': 443,\n 'ToPort': 443,\n 'UserIdGroupPairs': [{'GroupId': moat_sg_id}]\n }])\n except botocore.exceptions.ClientError as err:\n if not err.response.get('Error', {}).get(\n 'Code', 'Unknown') == 'InvalidPermission.Duplicate':\n raise\n try:\n resp = ec2_client.authorize_security_group_egress(\n DryRun=dry_run,\n GroupId=gate_sg_id,\n IpPermissions=[{\n 'IpProtocol': '-1',\n 'IpRanges': [{\n 'CidrIp': '0.0.0.0/0',\n }]}])\n except botocore.exceptions.ClientError as err:\n if not err.response.get('Error', {}).get(\n 'Code', 'Unknown') == 'InvalidPermission.Duplicate':\n raise\n # vault allow rules\n if update_vault_rules:\n try:\n resp = ec2_client.authorize_security_group_ingress(\n DryRun=dry_run,\n GroupId=vault_sg_id,\n IpPermissions=[{\n 'IpProtocol': 'tcp',\n 'FromPort': 5432,\n 'ToPort': 5432,\n 'UserIdGroupPairs': [{'GroupId': gate_sg_id}]\n }])\n except botocore.exceptions.ClientError as err:\n if not err.response.get('Error', {}).get(\n 'Code', 'Unknown') == 'InvalidPermission.Duplicate':\n raise\n try:\n resp = ec2_client.authorize_security_group_egress(\n DryRun=dry_run,\n GroupId=vault_sg_id,\n IpPermissions=[{\n 'IpProtocol': '-1',\n 'IpRanges': [{\n 'CidrIp': '0.0.0.0/0',\n }]}])\n except botocore.exceptions.ClientError as err:\n if not err.response.get('Error', {}).get(\n 'Code', 'Unknown') == 'InvalidPermission.Duplicate':\n raise\n\n # Create uploads and logs S3 buckets\n # XXX create the identities bucket?\n # XXX need to force private.\n if not s3_identities_bucket:\n s3_identities_bucket = '%s-identities' % tag_prefix\n s3_uploads_bucket = tag_prefix\n s3_client = boto3.client('s3')\n if s3_logs_bucket:\n try:\n resp = s3_client.create_bucket(\n ACL='private',\n Bucket=s3_logs_bucket,\n CreateBucketConfiguration={\n 'LocationConstraint': region_name\n })\n LOGGER.info(\"%s created S3 bucket for logs %s\",\n tag_prefix, s3_logs_bucket)\n except botocore.exceptions.ClientError as err:\n LOGGER.info(\"%s found S3 bucket for logs %s\",\n tag_prefix, s3_logs_bucket)\n if not err.response.get('Error', {}).get(\n 'Code', 'Unknown') == 'BucketAlreadyOwnedByYou':\n raise\n # Apply bucket encryption by default\n found_encryption = False\n try:\n resp = s3_client.get_bucket_encryption(\n Bucket=s3_logs_bucket)\n if resp['ServerSideEncryptionConfiguration']['Rules'][0][\n 'ApplyServerSideEncryptionByDefault'][\n 'SSEAlgorithm'] == 'AES256':\n found_encryption = True\n LOGGER.info(\"%s found encryption AES256 enabled on %s bucket\",\n tag_prefix, s3_logs_bucket)\n except botocore.exceptions.ClientError as err:\n LOGGER.info(\"%s found S3 bucket for logs %s\",\n tag_prefix, s3_logs_bucket)\n if not err.response.get('Error', {}).get('Code', 'Unknown') == \\\n 'ServerSideEncryptionConfigurationNotFoundError':\n raise\n if not found_encryption:\n s3_client.put_bucket_encryption(\n Bucket=s3_logs_bucket,\n ServerSideEncryptionConfiguration={\n 'Rules': [{\n 'ApplyServerSideEncryptionByDefault': {\n 'SSEAlgorithm': 'AES256',\n }\n }]\n })\n LOGGER.info(\"%s enable encryption on %s bucket\",\n tag_prefix, s3_logs_bucket)\n\n # Set versioning and lifecycle policies\n resp = s3_client.get_bucket_versioning(\n Bucket=s3_logs_bucket)\n if 'Status' in resp and resp['Status'] == 'Enabled':\n LOGGER.info(\"%s found versioning enabled on %s bucket\",\n tag_prefix, s3_logs_bucket)\n else:\n s3_client.put_bucket_versioning(\n Bucket=s3_logs_bucket,\n VersioningConfiguration={\n 'MFADelete': 'Disabled',\n 'Status': 'Enabled'\n })\n LOGGER.info(\"%s enable versioning on %s bucket\",\n tag_prefix, s3_logs_bucket)\n found_policy = False\n #pylint:disable=too-many-nested-blocks\n try:\n resp = s3_client.get_bucket_lifecycle_configuration(\n Bucket=s3_logs_bucket)\n for rule in resp['Rules']:\n if rule['Status'] == 'Enabled':\n found_rule = True\n for transition in rule['Transitions']:\n if transition['StorageClass'] == 'GLACIER':\n if transition.get('Days', 0) < 90:\n found_rule = False\n LOGGER.warning(\"%s lifecycle for 'GLACIER'\"\\\n \" is less than 90 days.\", tag_prefix)\n break\n if rule['Expiration'].get('Days', 0) < 365:\n found_rule = False\n LOGGER.warning(\n \"%s lifecycle expiration is less than 365 days.\",\n tag_prefix)\n for transition in rule['NoncurrentVersionTransitions']:\n if transition['StorageClass'] == 'GLACIER':\n if transition.get('NoncurrentDays', 0) < 90:\n found_rule = False\n LOGGER.warning(\n \"%s version lifecycle for 'GLACIER'\"\\\n \" is less than 90 days.\", tag_prefix)\n break\n if rule['NoncurrentVersionExpiration'].get(\n 'NoncurrentDays', 0) < 365:\n found_rule = False\n LOGGER.warning(\"%s lifecycle version expiration is\"\\\n \" less than 365 days.\", tag_prefix)\n if found_rule:\n found_policy = True\n except botocore.exceptions.ClientError as err:\n if not err.response.get('Error', {}).get(\n 'Code', 'Unknown') == 'NoSuchLifecycleConfiguration':\n raise\n if found_policy:\n LOGGER.info(\"%s found lifecycle policy on %s bucket\",\n tag_prefix, s3_logs_bucket)\n else:\n s3_client.put_bucket_lifecycle_configuration(\n Bucket=s3_logs_bucket,\n LifecycleConfiguration={\n \"Rules\": [{\n \"Status\": \"Enabled\",\n \"ID\": \"expire-logs\",\n \"Filter\": {\n \"Prefix\": \"\", # This is required.\n },\n \"Transitions\": [{\n \"Days\": 90,\n \"StorageClass\": \"GLACIER\"\n }],\n \"Expiration\" : {\n \"Days\": 365\n },\n \"NoncurrentVersionTransitions\": [{\n \"NoncurrentDays\": 90,\n \"StorageClass\": \"GLACIER\"\n }],\n 'NoncurrentVersionExpiration': {\n 'NoncurrentDays': 365\n },\n }]})\n LOGGER.info(\"%s update lifecycle policy on %s bucket\",\n tag_prefix, s3_logs_bucket)\n\n # https://docs.aws.amazon.com/elasticloadbalancing/latest/classic/enable-access-logs.html#attach-bucket-policy\n elb_account_ids_per_region = {\n 'us-east-1': '127311923021',\n 'us-east-2': '033677994240',\n 'us-west-1': '027434742980',\n 'us-west-2': '797873946194',\n 'af-south-1': '098369216593',\n 'ca-central-1': '985666609251',\n 'eu-central-1': '054676820928',\n 'eu-west-1': '156460612806',\n 'eu-west-2': '652711504416',\n 'eu-south-1': '635631232127',\n 'eu-west-3': '009996457667',\n 'eu-north-1': '897822967062',\n 'ap-east-1': '754344448648',\n 'ap-northeast-1': '582318560864',\n 'ap-northeast-2': '600734575887',\n 'ap-northeast-3': '383597477331',\n 'ap-southeast-1': '114774131450',\n 'ap-southeast-2': '783225319266',\n 'ap-south-1': '718504428378',\n 'me-south-1': '076674570225',\n 'sa-east-1': '507241528517'\n }\n elb_account_id = elb_account_ids_per_region[region_name]\n s3_client.put_bucket_policy(\n Bucket=s3_logs_bucket,\n Policy=json.dumps({\n \"Version\": \"2008-10-17\",\n \"Id\": \"WriteLogs\",\n \"Statement\": [{\n # billing reports\n \"Effect\": \"Allow\",\n \"Principal\": {\n \"Service\": \"billingreports.amazonaws.com\"\n },\n \"Action\": [\n \"s3:GetBucketAcl\",\n \"s3:GetBucketPolicy\"\n ],\n \"Resource\": \"arn:aws:s3:::%s\" % s3_logs_bucket\n }, {\n \"Effect\": \"Allow\",\n \"Principal\": {\n \"Service\": \"billingreports.amazonaws.com\"\n },\n \"Action\": \"s3:PutObject\",\n \"Resource\": \"arn:aws:s3:::%s/*\" % s3_logs_bucket\n }, {\n # ELB access logs\n \"Effect\": \"Allow\",\n \"Principal\": {\n \"AWS\": \"arn:aws:iam::%s:root\" % elb_account_id\n },\n \"Action\": \"s3:PutObject\",\n \"Resource\":\n \"arn:aws:s3:::%s/var/log/elb/*\" % s3_logs_bucket\n }, {\n \"Effect\": \"Allow\",\n \"Principal\": {\n \"Service\": \"delivery.logs.amazonaws.com\"\n },\n \"Action\": \"s3:PutObject\",\n \"Resource\":\n (\"arn:aws:s3:::%s/var/log/elb/*\" % s3_logs_bucket),\n \"Condition\": {\n \"StringEquals\": {\n \"s3:x-amz-acl\": \"bucket-owner-full-control\"\n }\n }\n }, {\n \"Effect\": \"Allow\",\n \"Principal\": {\n \"Service\": \"delivery.logs.amazonaws.com\"\n },\n \"Action\": \"s3:GetBucketAcl\",\n \"Resource\": \"arn:aws:s3:::%s\" % s3_logs_bucket\n }]\n }))\n\n if s3_uploads_bucket:\n try:\n resp = s3_client.create_bucket(\n ACL='private',\n Bucket=s3_uploads_bucket,\n CreateBucketConfiguration={\n 'LocationConstraint': region_name\n })\n LOGGER.info(\"%s created S3 bucket for uploads %s\",\n tag_prefix, s3_uploads_bucket)\n except botocore.exceptions.ClientError as err:\n LOGGER.info(\"%s found S3 bucket for uploads %s\",\n tag_prefix, s3_uploads_bucket)\n if not err.response.get('Error', {}).get(\n 'Code', 'Unknown') == 'BucketAlreadyOwnedByYou':\n raise\n\n # Create instance profiles ...\n iam_client = boto3.client('iam')\n # ... for webfront instances\n create_instance_profile(\n create_gate_role(gate_name,\n s3_logs_bucket=s3_logs_bucket, s3_uploads_bucket=s3_uploads_bucket,\n iam_client=iam_client, tag_prefix=tag_prefix),\n iam_client=iam_client, region_name=region_name,\n tag_prefix=tag_prefix, dry_run=dry_run)\n # ... for databases instances\n create_instance_profile(\n create_vault_role(vault_name,\n s3_logs_bucket=s3_logs_bucket, s3_uploads_bucket=s3_uploads_bucket,\n iam_client=iam_client, tag_prefix=tag_prefix),\n iam_client=iam_client, region_name=region_name,\n tag_prefix=tag_prefix, dry_run=dry_run)\n\n if ssh_key_name:\n if not ssh_key_content:\n ssh_key_path = os.path.join(os.getenv('HOME'),\n '.ssh', '%s.pub' % ssh_key_name)\n if os.path.exists(ssh_key_path):\n with open(ssh_key_path, 'rb') as ssh_key_obj:\n ssh_key_content = ssh_key_obj.read()\n else:\n LOGGER.warning(\"%s no content for SSH key %s\",\n tag_prefix, ssh_key_name)\n # import SSH keys\n try:\n resp = ec2_client.import_key_pair(\n DryRun=dry_run,\n KeyName=ssh_key_name,\n PublicKeyMaterial=ssh_key_content)\n LOGGER.info(\"%s imported SSH key %s\", tag_prefix, ssh_key_name)\n except botocore.exceptions.ClientError as err:\n if not err.response.get('Error', {}).get(\n 'Code', 'Unknown') == 'InvalidKeyPair.Duplicate':\n raise\n LOGGER.info(\"%s found SSH key %s\", tag_prefix, ssh_key_name)\n\n # ... for sally instances\n create_instance_profile(\n create_logs_role(kitchen_door_name,\n s3_logs_bucket=s3_logs_bucket,\n iam_client=iam_client, tag_prefix=tag_prefix),\n iam_client=iam_client, region_name=region_name,\n tag_prefix=tag_prefix, dry_run=dry_run)\n\n # allows SSH connection to instances for debugging\n update_kitchen_door_rules = (not kitchen_door_sg_id)\n if not kitchen_door_sg_id:\n resp = ec2_client.create_security_group(\n Description='%s SSH access' % tag_prefix,\n GroupName=kitchen_door_name,\n VpcId=vpc_id,\n DryRun=dry_run)\n kitchen_door_sg_id = resp['GroupId']\n LOGGER.info(\"%s created %s security group %s\",\n tag_prefix, kitchen_door_name, kitchen_door_sg_id)\n\n if update_kitchen_door_rules:\n try:\n if sally_ip:\n cidr_block = '%s/32' % sally_ip\n else:\n LOGGER.warning(\"no IP range was specified to restrict\"\\\n \" access to SSH port\")\n cidr_block = '0.0.0.0/0'\n resp = ec2_client.authorize_security_group_ingress(\n DryRun=dry_run,\n GroupId=kitchen_door_sg_id,\n CidrIp=cidr_block,\n IpProtocol='tcp',\n FromPort=22,\n ToPort=22)\n except botocore.exceptions.ClientError as err:\n if not err.response.get('Error', {}).get(\n 'Code', 'Unknown') == 'InvalidPermission.Duplicate':\n raise\n try:\n resp = ec2_client.authorize_security_group_egress(\n DryRun=dry_run,\n GroupId=kitchen_door_sg_id,\n IpPermissions=[{\n 'IpProtocol': '-1',\n 'IpRanges': [{\n 'CidrIp': '0.0.0.0/0',\n }]}])\n except botocore.exceptions.ClientError as err:\n if not err.response.get('Error', {}).get(\n 'Code', 'Unknown') == 'InvalidPermission.Duplicate':\n raise\n try:\n resp = ec2_client.authorize_security_group_ingress(\n DryRun=dry_run,\n GroupId=gate_sg_id,\n IpPermissions=[{\n 'IpProtocol': 'tcp',\n 'FromPort': 22,\n 'ToPort': 22,\n 'UserIdGroupPairs': [{'GroupId': kitchen_door_sg_id}]\n }])\n except botocore.exceptions.ClientError as err:\n if not err.response.get('Error', {}).get(\n 'Code', 'Unknown') == 'InvalidPermission.Duplicate':\n raise\n try:\n resp = ec2_client.authorize_security_group_ingress(\n DryRun=dry_run,\n GroupId=vault_sg_id,\n IpPermissions=[{\n 'IpProtocol': 'tcp',\n 'FromPort': 22,\n 'ToPort': 22,\n 'UserIdGroupPairs': [{'GroupId': kitchen_door_sg_id}]\n }])\n except botocore.exceptions.ClientError as err:\n if not err.response.get('Error', {}).get(\n 'Code', 'Unknown') == 'InvalidPermission.Duplicate':\n raise\n\n # Creates encryption keys (KMS) in region\n if not storage_enckey:\n storage_enckey = _get_or_create_storage_enckey(\n region_name, tag_prefix, dry_run=dry_run)\n\n # Create an Application ELB and WAF\n load_balancer_arn = create_elb(\n tag_prefix, web_subnet_by_cidrs, moat_sg_id,\n s3_logs_bucket=s3_logs_bucket,\n tls_priv_key=tls_priv_key, tls_fullchain_cert=tls_fullchain_cert,\n region_name=region_name)\n create_waf(\n tag_prefix,\n elb_arn=load_balancer_arn,\n s3_logs_bucket=s3_logs_bucket,\n region_name=region_name,\n dry_run=dry_run)", "def quickie():\n #info = { \"instance_type\": { default = \"t2.micro\", all = [ \"t2.micro\" ] }, \"image_id\" : { default = \"\", all = [] }, \"security_groups\" : { default = [], all = [] }, \"key_name\": { default = \"\", all = [] }}\n client = boto3.client(\"EC2\")\n data = client.describe_images()\n info[\"image_id\"][\"all\"]\n args = {}\n for attr in info:\n print(\"Available values for \"+attr+\":\\n\"+\" \".join(info[attr]))\n default = info[attr][0]\n var = raw_input(\"Choose \"+attr+\"[\"+default+\"]:\")\n if var == \"\":\n var = default\n if re.match(\"^.+\\s\", attr):\n args[attr] = [var]\n else:\n args[attr] = args\n reservation = client.run_instances(**args)", "def run_instance():\n ami_id = \"ami-04876f29fd3a5e8ba\" # AMI Id\n instance_type = \"t2.micro\" # Instance Type\n tag_specs = [\n {\n 'ResourceType': 'instance',\n 'Tags': [\n {\n 'Key': 'Name',\n 'Value': 'BoB10@ProductDev.sdk.8084'\n }\n ]\n }\n ]\n\n # Instantiate the service resource object\n ec2_resource = session.resource('ec2', region_name=region)\n # Run an instance\n\n instances = ec2_resource.create_instances(ImageId=ami_id, InstanceType=instance_type,\n MaxCount=1, MinCount=1, KeyName='BoB10@ProductDev.8084',\n TagSpecifications=tag_specs, \n SecurityGroupIds=['sg-06b757b4bb272d98f'],\n UserData=assemble_userdata().as_string())\n Instance_id = instances[0].id\n print('\\nInstance Id: ' + Instance_id)\n print('Image Id: ' + instances[0].image_id)\n print('Instance Type: ' + instances[0].instance_type)\n print('State: ' + instances[0].state['Name'])\n return Instance_id", "def create(self, name, public_key=None):\n data = {\n \"keypair\": {\n \"name\": name\n }\n }\n if public_key is not None:\n data['keypair']['public_key'] = public_key\n \n path = '/os-keypairs'\n res = self.client.call(path, 'POST', data=json.dumps(data), \n token=self.manager.identity.token)\n self.logger.debug('Create/import openstack keypair: %s' % truncate(res))\n return res[0]['keypair']", "async def start_ec2_instance(self, env):\n instanceDef= {\n 'AWS_AMI_ID': os.getenv(\"AWS_AMI_ID\"),\n 'AWS_KEYNAME': os.getenv(\"AWS_KEYNAME\"),\n 'AWS_SECURITY_GROUP': os.getenv('AWS_SECURITY_GROUP'),\n 'AWS_SUBNET': os.getenv(\"AWS_SUBNET\"),\n 'DryRun':False,\n 'AWS_INSTANCE_NAME': 'Jupyter',\n 'AWS_IAM_ARN': os.getenv('AWS_IAM_ARN')\n }\n \n self.log.debug('building instance')\n ip = await self.buildInstance(instanceDef, env)\n return ip", "def create(profile, name, application, cname=None, version=None,\n tier=\"web\", key_pair=None, instance_type=\"t1.micro\",\n instance_profile=None, service_role=None,\n healthcheck_url=None, security_groups=None,\n max_instances=1, min_instances=1, tags=None,\n vpc_id=None, subnets=None, db_subnets=None,\n elb_subnets=None, elb_scheme=None,\n public_ip=None, root_volume_size=None):\n client = boto3client.get(\"elasticbeanstalk\", profile)\n params = {}\n params[\"ApplicationName\"] = application\n params[\"EnvironmentName\"] = name\n if cname:\n params[\"CNAMEPrefix\"] = cname\n if version:\n params[\"VersionLabel\"] = version\n stack = utils.get_multicontainer_docker_solution_stack(profile)\n params[\"SolutionStackName\"] = stack \n if tier == \"web\":\n tier_definition = {\n \"Name\": \"WebServer\",\n \"Type\": \"Standard\",\n \"Version\": \"1.0\",\n }\n elif tier == \"worker\":\n tier_definition = {\n \"Name\": \"Worker\",\n \"Type\": \"SQS/HTTP\",\n \"Version\": \"1.0\",\n }\n else:\n raise Exception(\"tier must be 'web' or 'worker'\")\n params[\"Tier\"] = tier_definition\n if tags:\n params[\"Tags\"] = tags\n options = []\n if key_pair:\n key_pair_option = {\n \"Namespace\": \"aws:autoscaling:launchconfiguration\",\n \"OptionName\": \"EC2KeyName\",\n \"Value\": key_pair,\n }\n options.append(key_pair_option)\n if instance_type:\n instance_type_option = {\n \"Namespace\": \"aws:autoscaling:launchconfiguration\",\n \"OptionName\": \"InstanceType\",\n \"Value\": instance_type,\n }\n options.append(instance_type_option)\n if instance_profile:\n profile_option = {\n \"Namespace\": \"aws:autoscaling:launchconfiguration\",\n \"OptionName\": \"IamInstanceProfile\",\n \"Value\": instance_profile,\n }\n options.append(profile_option)\n if service_role:\n role_option = {\n \"Namespace\": \"aws:elasticbeanstalk:environment\",\n \"OptionName\": \"ServiceRole\",\n \"Value\": service_role,\n }\n options.append(role_option)\n if healthcheck_url:\n healthcheck_url_option = {\n \"Namespace\": \"aws:elasticbeanstalk:application\",\n \"OptionName\": \"Application Healthcheck URL\",\n \"Value\": healthcheck_url,\n }\n options.append(healthcheck_url_option)\n if security_groups:\n security_groups_option = {\n \"Namespace\": \"aws:autoscaling:launchconfiguration\",\n \"OptionName\": \"SecurityGroups\",\n \"Value\": \",\".join(security_groups),\n }\n options.append(security_groups_option)\n if min_instances:\n min_instances_option = {\n \"Namespace\": \"aws:autoscaling:asg\",\n \"OptionName\": \"MinSize\",\n \"Value\": str(min_instances),\n }\n options.append(min_instances_option)\n if max_instances:\n max_instances_option = {\n \"Namespace\": \"aws:autoscaling:asg\",\n \"OptionName\": \"MaxSize\",\n \"Value\": str(max_instances),\n }\n options.append(max_instances_option)\n if vpc_id:\n vpc_id_option = {\n \"Namespace\": \"aws:ec2:vpc\",\n \"OptionName\": \"VPCId\",\n \"Value\": vpc_id,\n }\n options.append(vpc_id_option)\n if subnets:\n subnets_option = {\n \"Namespace\": \"aws:ec2:vpc\",\n \"OptionName\": \"Subnets\",\n \"Value\": \",\".join(subnets),\n }\n options.append(subnets_option)\n if db_subnets:\n db_subnets_option = {\n \"Namespace\": \"aws:ec2:vpc\",\n \"OptionName\": \"DBSubnets\",\n \"Value\": \",\".join(db_subnets),\n }\n options.append(db_subnets_option)\n if elb_subnets:\n elb_subnets_option = {\n \"Namespace\": \"aws:ec2:vpc\",\n \"OptionName\": \"ELBSubnets\",\n \"Value\": \",\".join(elb_subnets),\n }\n options.append(elb_subnets_option)\n if elb_scheme:\n elb_scheme_option = {\n \"Namespace\": \"aws:ec2:vpc\",\n \"OptionName\": \"ELBScheme\",\n \"Value\": elb_scheme,\n }\n options.append(elb_scheme_option)\n if public_ip:\n public_ip_option = {\n \"Namespace\": \"aws:ec2:vpc\",\n \"OptionName\": \"AssociatePublicIpAddress\",\n \"Value\": str(public_ip),\n }\n options.append(public_ip_option)\n if root_volume_size:\n root_volume_size_option = {\n \"Namespace\": \"aws:autoscaling:launchconfiguration\",\n \"OptionName\": \"RootVolumeSize\",\n \"Value\": str(root_volume_size),\n }\n options.append(root_volume_size_option)\n if options:\n params[\"OptionSettings\"] = options\n return client.create_environment(**params)", "def rule_40_can_create_sg(session):\n\n def try_create(session, side):\n res, conn_vpc = session[\"config\"][side][\"res\"], session[\"conn\"][side](\"vpc\")\n subnet = conn_vpc.get_all_subnets([res[\"subnet_id\"]])[0]\n\n try:\n conn_vpc.create_security_group(\n \"foo\", \"bar\", vpc_id = subnet.vpc_id, dry_run = True)\n except EC2ResponseError as e:\n if 412 != e.status:\n raise e\n\n try_create(session, \"server\")\n try_create(session, \"client\")\n\n return True", "def sgup(sg=\"sg_external_ssh\"):\n ip = os.popen(\"/usr/bin/curl ifconfig.co 2>/dev/null\").readline().strip()\n print(\"My Public IP is : \"+ip)\n client = boto3.client(\"ec2\")\n ippermissions = client.describe_security_groups(GroupNames = [ sg ])[\"SecurityGroups\"][0][\"IpPermissions\"]\n print(\"Revoking old IP from group \"+sg)\n client.revoke_security_group_ingress(GroupName = sg, IpPermissions = ippermissions)\n printr(\"Adding new IP to group \"+sg)\n client.authorize_security_group_ingress(GroupName=sg, IpProtocol=\"-1\", FromPort=0, ToPort=0, CidrIp=ip+\"/32\")", "def create(ctx, **kwargs):\n\n cloud_driver = get_cloud_driver(ctx)\n\n vpc = {\n 'description': None,\n 'name': ctx.node_id,\n }\n\n ctx.logger.debug('reading vpc configuration.')\n vpc.update(ctx.properties['network'])\n\n vpc_name = vpc['name']\n cidr = vpc['cidr']\n zone = vpc['zone']\n location = get_location(cloud_driver, zone)\n vpcoffer = vpc['service_offering']\n vpc_offering = get_vpc_offering(cloud_driver, vpcoffer)\n\n ctx.logger.info('Current node {0}{1}'.format(ctx.node_id, ctx.properties))\n\n ctx['vpc_id'] = ctx.properties\n\n if not _vpc_exists(cloud_driver, vpc_name):\n ctx.logger.info('creating vpc: {0}'.format(vpc_name))\n\n vpc = cloud_driver.ex_create_vpc(\n cidr=cidr,\n name=vpc_name,\n display_text=vpc_name,\n vpc_offering=vpc_offering,\n zone_id=location.id)\n else:\n ctx.logger.info('using existing vpc network {0}'.\n format(vpc_name))\n vpc = get_vpc(cloud_driver, vpc_name)\n\n ctx['vpc_id'] = vpc.id\n ctx['vpc_name'] = vpc.name", "def launch_instance(tag, key_name, group_name, inst_type, ami_name, user_data,\n wait=True, returninfo=None):\n ec2 = boto.ec2.connect_to_region(AWS_REGION)\n failures = 0\n max_failures = 10\n while True:\n try:\n reservation = ec2.run_instances(ami_name,\n key_name=key_name,\n security_groups=[group_name],\n instance_type=inst_type,\n user_data=None)\n break\n except Exception, err:\n # Failed to get instance; wait 15 seconds and then try again (up to\n # 10 total times)\n errortext = str(err)\n if errortext.find(\"Not authorized for images\") >= 0:\n print \"**************************************\"\n print \"* Error from AWS suggests that the AMI code in\"\n print \"* CloudSetup.py is deprecated. Please go to\"\n print \"* https://aws.amazon.com/marketplace/ and search for\"\n print \"* \\\"Ubuntu server lts hvm\\\", selecting the most recent\"\n print \"* version. Click \\\"Continue\\\", \\\"Manual Launch\\\",\"\n print \"* and then copy the AMI ID for the US East region.\"\n print \"* Copy that to the AMI_NAME value in CloudSetup.py\"\n print \"* and re-run.\"\n print \"***************************************\"\n print \"* (Full text of error):\"\n print errortext\n print \"***************************************\"\n return None\n elif errortext.find(\"accept terms and subscribe\") >= 0:\n print \"**************************************\"\n print \"* Error from AWS suggests that you have never used this\"\n print \"* AMI before and need to accept its terms and\"\n print \"* subscribe to it. Please follow the link in the below\"\n print \"* error text. Click \\\"Continue\\\", \\\"Manual Launch\\\",\"\n print \"* and \\\"Accept Terms\\\". After receiving email\"\n print \"* confirmation, you can re-run the code.\"\n print \"**************************************\"\n print \"* (Full text of error):\"\n print errortext\n print \"**************************************\"\n return None\n failures += 1\n if failures == max_failures:\n print \"**************************************\"\n print \"* Maximum number of instance launch failures reached.\"\n print \"* (Full text of error):\"\n print errortext\n print \"**************************************\"\n return None\n print \" ** ec2.run_instances failed for tag\", tag, \"; waiting 15\"\n print \" ** seconds and then trying again...\"\n time.sleep(15)\n\n time.sleep(5) # Slow things down -- they're never running super fast anyway\n instance = reservation.instances[0]\n time.sleep(5) # Slow things down -- they're never running super fast anyway\n instance.add_tag(\"tag\", tag)\n time.sleep(5) # Slow things down -- they're never running super fast anyway\n\n if wait:\n print \" Instance requested, waiting for 'running' for tag\", tag\n while instance.state != \"running\":\n print \" %s ...\" % tag\n time.sleep(5)\n try:\n instance.update()\n except boto.exception.EC2ResponseError as e:\n print \"******************\"\n print \"Error caught in instance.update():\"\n print e.strerror\n print \"******************\"\n print \" %s done!\" % tag\n if returninfo:\n returninfo.put(tag)\n return instance", "def dvs_remote_sg_simple(self):\n self.show_step(1)\n self.env.revert_snapshot(\"dvs_vcenter_systest_setup\")\n cluster_id = self.fuel_web.get_last_created_cluster()\n\n os_ip = self.fuel_web.get_public_vip(cluster_id)\n os_conn = os_actions.OpenStackActions(\n os_ip, SERVTEST_USERNAME,\n SERVTEST_PASSWORD,\n SERVTEST_TENANT)\n\n tenant = os_conn.get_tenant(SERVTEST_TENANT)\n\n # Create security group with rules for ssh and ping\n security_group = os_conn.create_sec_group_for_ssh()\n\n self.show_step(2)\n net_1 = os_conn.create_network(\n network_name=self.net_data[0].keys()[0],\n tenant_id=tenant.id)['network']\n\n subnet = os_conn.create_subnet(\n subnet_name=net_1['name'],\n network_id=net_1['id'],\n cidr=self.net_data[0][self.net_data[0].keys()[0]],\n ip_version=4)\n\n # Check that network are created\n assert_true(os_conn.get_network(net_1['name'])['id'] == net_1['id'])\n\n # Create Router_01, set gateway and add interface to external network\n router_1 = os_conn.create_router('router_1', tenant=tenant)\n\n # Add net_1 to router_1\n os_conn.add_router_interface(router_id=router_1[\"id\"],\n subnet_id=subnet[\"id\"])\n\n self.show_step(3)\n sg1 = os_conn.nova.security_groups.create('SG1', \"descr\")\n sg2 = os_conn.nova.security_groups.create('SG2', \"descr\")\n\n self.show_step(4)\n _sg_rules = os_conn.neutron.list_security_group_rules()\n sg_rules = [sg_rule for sg_rule in _sg_rules['security_group_rules']\n if sg_rule['security_group_id'] in [sg1.id, sg2.id]]\n for rule in sg_rules:\n os_conn.neutron.delete_security_group_rule(rule['id'])\n\n self.show_step(5)\n self.show_step(6)\n for sg in [sg1, sg2]:\n for rule in [self.icmp, self.tcp]:\n rule[\"security_group_rule\"][\"security_group_id\"] = sg.id\n rule[\"security_group_rule\"][\"remote_group_id\"] = sg.id\n rule[\"security_group_rule\"][\"direction\"] = \"ingress\"\n os_conn.neutron.create_security_group_rule(rule)\n rule[\"security_group_rule\"][\"direction\"] = \"egress\"\n os_conn.neutron.create_security_group_rule(rule)\n\n # Create access_point to instances from SG1 and SG2\n _, access_point_ip = openstack.create_access_point(\n os_conn=os_conn,\n nics=[{'net-id': net_1['id']}],\n security_groups=[security_group.name, sg1.name, sg2.name])\n\n self.show_step(7)\n istances_sg1 = openstack.create_instances(\n os_conn=os_conn,\n nics=[{'net-id': net_1['id']}],\n vm_count=1,\n security_groups=[sg1.name])\n\n self.show_step(8)\n istances_sg2 = openstack.create_instances(\n os_conn=os_conn,\n nics=[{'net-id': net_1['id']}],\n vm_count=1,\n security_groups=[sg2.name])\n openstack.verify_instance_state(os_conn)\n\n # Get private ips of instances\n ips = {\n 'SG1': [os_conn.get_nova_instance_ip(i, net_name=net_1['name'])\n for i in istances_sg1],\n 'SG2': [os_conn.get_nova_instance_ip(i, net_name=net_1['name'])\n for i in istances_sg2]\n }\n\n self.show_step(9)\n self.show_step(10)\n for group in ips:\n ip_pair = dict.fromkeys(ips[group])\n for key in ip_pair:\n ip_pair[key] = [value for value in ips[group] if key != value]\n openstack.check_connection_through_host(\n access_point_ip, ip_pair, timeout=60 * 5)\n\n self.show_step(11)\n ip_pair = dict.fromkeys(ips['SG1'])\n for key in ip_pair:\n ip_pair[key] = ips['SG2']\n openstack.check_connection_through_host(\n access_point_ip, ip_pair, result_of_command=1, timeout=60 * 5)", "def create_infrastructure(aws_key, aws_secret):\n ec2_client, s3_client, iam_client, redshift_client = create_clients(\n aws_key, aws_secret\n )\n role_arn = create_iam_role(iam_client)\n create_redshift_cluster(redshift_client, role_arn)\n # Loop until the cluster status becomes \"Available\"\n status = \"\"\n while status.lower() != \"available\":\n cluster_properties = get_cluster_properties(redshift_client)\n status = cluster_properties['ClusterStatus']\n print('Cluster status is %s' % status)\n time.sleep(30)\n set_vpc_properties(ec2_client, cluster_properties['VpcId'])\n print_cluster_properties(redshift_client)", "def create(self, args=None, **_):\n name = utils.get_resource_id()\n\n create_args = dict(\n name=name,\n description=ctx.node.properties['description'],\n vpc_id=self._get_connected_vpc()\n )\n\n create_args = utils.update_args(create_args, args)\n\n if ctx.operation.retry_number == 0 and constants.EXTERNAL_RESOURCE_ID \\\n not in ctx.instance.runtime_properties:\n try:\n security_group = self.execute(\n self.client.create_security_group, create_args,\n raise_on_falsy=True)\n except (exception.EC2ResponseError,\n exception.BotoServerError) as e:\n raise NonRecoverableError('{0}'.format(str(e)))\n utils.set_external_resource_id(\n security_group.id, ctx.instance, external=False)\n\n self.resource_id = \\\n ctx.instance.runtime_properties[constants.EXTERNAL_RESOURCE_ID]\n security_group = self.get_resource()\n\n if not security_group:\n return False\n\n self._create_group_rules(security_group)\n\n return True", "def __run_instances(self, number=1, policies={}):\n try:\n self.euca = Euca2ool('k:n:t:g:d:f:z:',\n ['key=', 'kernel=', 'ramdisk=', 'instance-count=', 'instance-type=',\n 'group=', 'user-data=', 'user-data-file=', 'addressing=', 'availability-zone='])\n except Exception, ex:\n sys.exit(1)\n\n instance_type = policies.get('instance_type') or 'm1.small'\n image_id = policies.get('image_id') or self.__get_image_id()[0]\n min_count = number\n max_count = number\n keyname = 'mykey'\n \n kernel_id = None\n ramdisk_id = None\n group_names = []\n user_data = None\n addressing_type = None\n zone = None\n user_data = None\n \n if image_id:\n euca_conn = self.__make_connection()\n try:\n reservation = euca_conn.run_instances(image_id = image_id,\n min_count = min_count,\n max_count = max_count,\n key_name = keyname,\n security_groups = group_names,\n user_data = user_data,\n addressing_type = addressing_type,\n instance_type = instance_type,\n placement = zone,\n kernel_id = kernel_id,\n ramdisk_id = ramdisk_id)\n except Exception, ex:\n self.euca.display_error_and_exit('error:%s' % ex)\n return reservation\n return False", "def create_vpc ( vpc_conn,\n ec2_conn,\n cloudwatch_conn,\n r53_conn,\n aws_account_type,\n region_name,\n base_topicarn,\n base_name,\n params ) :\n nat_subnet = None\n \n # Create VPC\n vpc_name = get_vpc_name( base_name )\n print \"Creating VPC with name \" + vpc_name\n vpc = vpc_conn.create_vpc('10.0.0.0/16')\n \n print \"Waiting for VPC to be created\"\n aws_wait( vpc_conn.get_all_vpcs, vpc.id )\n aws_cmd( ec2_conn.create_tags, [ vpc.id, { \"Name\": vpc_name } ] )\n vpc_conn.modify_vpc_attribute( vpc_id = vpc.id, enable_dns_support = True )\n vpc_conn.modify_vpc_attribute( vpc_id = vpc.id, enable_dns_hostnames = True )\n \n # Create Internate Gateway and attache to new VPC\n print \"Creating Internet Gateway and attaching to new VPC\"\n ig_name = get_ig_name( base_name )\n igw = vpc_conn.create_internet_gateway( )\n aws_wait( vpc_conn.get_all_internet_gateways, igw.id )\n vpc_conn.attach_internet_gateway( igw.id, vpc.id )\n aws_cmd( ec2_conn.create_tags, [ igw.id, { \"Name\": ig_name } ] )\n \n # Create public Route table \n print \"Creating Public Route table and attaching to new VPC\"\n public_rt_name = get_rt_name (base_name, 'PUBLIC')\n public_rt = vpc_conn.create_route_table( vpc.id )\n aws_wait( vpc_conn.get_all_route_tables, public_rt.id )\n aws_cmd( ec2_conn.create_tags, [ public_rt.id, { \"Name\": public_rt_name } ] )\n \n # Create private Route table\n print \"Creating Private Route table and attaching to new VPC\"\n private_rt_name = get_rt_name (base_name, 'PRIVATE')\n private_rt = vpc_conn.create_route_table( vpc.id )\n aws_wait( vpc_conn.get_all_route_tables, private_rt.id )\n aws_cmd( ec2_conn.create_tags, [ private_rt.id, { \"Name\": private_rt_name } ] )\n \n # Create route for everything through the igw\n print \"Attaching new Internet Gateway to the public routing table\"\n all_cidr = \"0.0.0.0/0\"\n vpc_conn.create_route( public_rt.id, all_cidr, gateway_id = igw.id )\n\n # Get the list of all the zones\n zones = ec2_conn.get_all_zones( )\n zone_mappings = {}\n print \"Creating public, private, and static subnets for each zone.\"\n \n idx = 0 \n zone_static_subnets = subnet_cidrs[ 'STATIC' ]\n for zone in zones:\n zm = zone_mapping( )\n zm.zone_letter = zone.name[-1:].upper( )\n zm.public_subnet = create_subnet( vpc_conn,\n ec2_conn,\n vpc.id,\n zone_public_subnet_map[ zm.zone_letter ],\n zone.name,\n base_name + \"-PUBLIC\" )\n \n zm.private_subnet = create_subnet( vpc_conn,\n ec2_conn,\n vpc.id,\n zone_private_subnet_map[ zm.zone_letter ],\n zone.name,\n base_name + \"-PRIVATE\" )\n \n zm.static_subnet = create_subnet( vpc_conn,\n ec2_conn,\n vpc.id,\n zone_static_subnets[ idx ],\n zone.name,\n base_name + \"-STATIC\" )\n idx = idx + 1\n\n # Map the public subnet to the public routing table.\n vpc_conn.associate_route_table( public_rt.id, zm.public_subnet.id )\n \n # Map the private subnet to the private routing table.\n vpc_conn.associate_route_table( private_rt.id, zm.private_subnet.id )\n \n # Map the static subnet to the private routing table.\n vpc_conn.associate_route_table( private_rt.id, zm.static_subnet.id )\n\n if not nat_subnet:\n nat_subnet = zm.public_subnet\n\n zone_mappings[ zone.name ] = zm\n \n nat = create_nat ( vpc_conn = vpc_conn,\n ec2_conn = ec2_conn,\n cloudwatch_conn = cloudwatch_conn,\n r53_conn = r53_conn,\n aws_account_type = aws_account_type,\n region_name = region_name,\n vpc = vpc,\n base_name = base_name,\n base_topicarn = base_topicarn,\n zone_mapping_list = [ zone_mappings[ key ] for key in zone_mappings ],\n private_rt = private_rt,\n nat_subnet = nat_subnet,\n secgrp_rules = nat_secgrp_rules,\n monitor_rules = nat_monitor_rules )\n return vpc", "def dvs_instances_batch_mix_sg(self):\n self.show_step(1)\n self.env.revert_snapshot(\"dvs_vcenter_systest_setup\")\n\n cluster_id = self.fuel_web.get_last_created_cluster()\n\n os_ip = self.fuel_web.get_public_vip(cluster_id)\n os_conn = os_actions.OpenStackActions(\n os_ip, SERVTEST_USERNAME,\n SERVTEST_PASSWORD,\n SERVTEST_TENANT)\n tenant = os_conn.get_tenant(SERVTEST_TENANT)\n\n self.show_step(2)\n net_1 = os_conn.create_network(\n network_name=self.net_data[0].keys()[0],\n tenant_id=tenant.id)['network']\n\n subnet = os_conn.create_subnet(\n subnet_name=net_1['name'],\n network_id=net_1['id'],\n cidr=self.net_data[0][self.net_data[0].keys()[0]],\n ip_version=4)\n\n # Check that network is created\n assert_true(os_conn.get_network(net_1['name'])['id'] == net_1['id'])\n\n # Create Router_01, set gateway and add interface to external network\n router_1 = os_conn.create_router('router_1', tenant=tenant)\n\n # Add net_1 to router_1\n os_conn.add_router_interface(router_id=router_1[\"id\"],\n subnet_id=subnet[\"id\"])\n\n # Get max count of instances which we can create according to\n # resource limit\n vm_count = min(\n [os_conn.nova.hypervisors.resource_class.to_dict(h)['vcpus']\n for h in os_conn.nova.hypervisors.list()])\n\n self.show_step(3)\n self.show_step(4)\n sg1 = os_conn.nova.security_groups.create('SG1', \"descr\")\n sg2 = os_conn.nova.security_groups.create('SG2', \"descr\")\n\n self.icmp[\"security_group_rule\"][\"security_group_id\"] = sg1.id\n self.icmp[\"security_group_rule\"][\"remote_group_id\"] = sg1.id\n self.icmp[\"security_group_rule\"][\"direction\"] = \"ingress\"\n os_conn.neutron.create_security_group_rule(self.icmp)\n self.icmp[\"security_group_rule\"][\"direction\"] = \"egress\"\n os_conn.neutron.create_security_group_rule(self.icmp)\n\n for sg in [sg1, sg2]:\n self.tcp[\"security_group_rule\"][\"security_group_id\"] = sg.id\n self.tcp[\"security_group_rule\"][\"remote_group_id\"] = sg.id\n self.tcp[\"security_group_rule\"][\"direction\"] = \"ingress\"\n os_conn.neutron.create_security_group_rule(self.tcp)\n self.tcp[\"security_group_rule\"][\"direction\"] = \"egress\"\n os_conn.neutron.create_security_group_rule(self.tcp)\n\n # Add rules for ssh and ping\n os_conn.goodbye_security()\n _s_groups = os_conn.neutron.list_security_groups()['security_groups']\n _srv_tenant_id = os_conn.get_tenant(SERVTEST_TENANT).id\n default_sg = [sg for sg in _s_groups\n if sg['tenant_id'] == _srv_tenant_id and\n sg['name'] == 'default'][0]\n\n for step in (5, 9):\n self.show_step(step) # step 5, 9\n step += 1\n self.show_step(step) # step 6, 10\n step += 1\n\n openstack.create_instances(\n os_conn=os_conn,\n nics=[{'net-id': net_1['id']}],\n vm_count=vm_count,\n security_groups=[sg1.name, sg2.name, default_sg['name']])\n openstack.verify_instance_state(os_conn)\n\n self.show_step(step) # step 7, 11\n step += 1\n\n instances = os_conn.nova.servers.list()\n fip = openstack.create_and_assign_floating_ips(os_conn, instances)\n ip_pair = dict.fromkeys(fip)\n for key in ip_pair:\n ip_pair[key] = [value for value in fip if key != value]\n openstack.check_connection_vms(ip_pair,\n timeout=60 * 5,\n interval=10)\n\n self.show_step(step) # step 8, 12\n step += 1\n\n for instance in instances:\n os_conn.nova.servers.delete(instance)\n logger.info(\"Check that instance was deleted.\")\n os_conn.verify_srv_deleted(instance)", "def get_keypair(ec2):\n # call the boto ec2 function to create a key pair\n key_pair = ec2.create_key_pair(KeyName=KEY_PAIR_NAME)\n print(\"\\n===Created a new key pair in AWS.\")\n\n # capture the key and store it in a file\n KeyPairOut = str(key_pair.key_material)\n\n # create a file to store the key locally\n print(\"Saving the keypair.\")\n key_pair_path = KEY_PAIR_NAME + \".pem\"\n with open(key_pair_path, \"w\") as f:\n f.write(KeyPairOut)\n os.chmod(key_pair_path, 0o600)\n print(\"===Changed access permission to read-only.\")", "def mk_rg1(self):\n name = f\"{self.env_name}/sg/project-default\"\n self.sg_project_default = ec2.SecurityGroup(\n \"SecurityGroupProjectDefault\",\n rp_GroupDescription=\"Resources that has this security can talk to each other\",\n p_GroupName=name,\n p_VpcId=self.vpc_id,\n p_SecurityGroupIngress=[\n ec2.PropSecurityGroupIngress(\n rp_IpProtocol=\"-1\",\n p_FromPort=-1,\n p_ToPort=-1,\n p_CidrIp=f\"{authorized_ip}/32\",\n )\n for authorized_ip in self.sg_authorized_ips\n ],\n p_Tags=cf.Tag.make_many(\n Name=name\n ),\n )", "def launch_instance(cloud):\n js = _get_jetstream_conn()\n\n sgs = ['CloudLaunchDefault']\n kp_name = \"cloudman_key_pair\"\n inst_size = 'm1.small'\n network_id = '86a1c3e8-b1fb-41f3-bcaf-8334567fe989'\n lc = js.compute.instances.create_launch_config()\n lc.add_network_interface(network_id)\n\n img_id = '2cf07e4a-62a8-41c2-9282-f3c53962f296' # Gxy Standalone 161021b01\n name = 'ea-galaxy-{0}'.format(strftime(\"%m-%d-%H-%M\", localtime()))\n\n i = js.compute.instances.create(\n name, img_id, inst_size, security_groups=sgs, launch_config=lc,\n key_pair=kp_name)\n return i", "def _Create(self) -> None:\n cmd = util.GcloudCommand(self, 'spanner', 'instances', 'create', self.name)\n cmd.flags['description'] = self._description\n cmd.flags['nodes'] = self._nodes\n cmd.flags['config'] = self._config\n _, _, retcode = cmd.Issue(raise_on_failure=False)\n if retcode != 0:\n logging.error('Create GCP Spanner instance failed.')\n return\n\n cmd = util.GcloudCommand(self, 'spanner', 'databases', 'create',\n self.database)\n cmd.flags['instance'] = self.name\n _, _, retcode = cmd.Issue(raise_on_failure=False)\n if retcode != 0:\n logging.error('Create GCP Spanner database failed.')\n return\n\n cmd = util.GcloudCommand(self, 'spanner', 'databases', 'ddl', 'update',\n self.database)\n cmd.flags['instance'] = self.name\n cmd.flags['ddl'] = self._ddl\n _, _, retcode = cmd.Issue(raise_on_failure=False)\n if retcode != 0:\n logging.error('Update GCP Spanner database schema failed.')\n else:\n logging.info('Created GCP Spanner instance and database.')", "def instance(template, name, ami, type, keypair, interfaces,\n availability_zone=None, user_data=None, placement_group=None, role='unknown', iam_role=None,\n volume_size=None, tags=None):\n i = Instance(name, template=template)\n i.ImageId = ami\n i.InstanceType = type\n i.KeyName = Ref(keypair)\n\n i.Tags = Tags(Name=aws_name(i.title))\n if role:\n i.Tags += Tags(Role=role)\n\n if tags:\n i.Tags += Tags(**tags)\n\n if iam_role:\n if isinstance(iam_role, str):\n i.IamInstanceProfile = iam_role\n else:\n i.DependsOn = iam_role.title\n i.IamInstanceProfile = Ref(iam_role)\n\n if availability_zone:\n i.AvailabilityZone = availability_zone\n\n if placement_group:\n i.PlacementGroupName = Ref(placement_group)\n\n if volume_size:\n i.BlockDeviceMappings = [\n BlockDeviceMapping(DeviceName=\"/dev/sda1\", Ebs=EBSBlockDevice(VolumeSize=volume_size))\n ]\n\n if interfaces:\n i.NetworkInterfaces = [NetworkInterfaceProperty(DeviceIndex=index,\n NetworkInterfaceId=Ref(interface))\n for (index, interface) in enumerate(interfaces)]\n\n if user_data:\n i.UserData = Base64(Join('', [line + '\\n' for line in user_data.splitlines()]))\n\n return i", "def __enter__(self):\n self.key = self.make_new_key()\n self.sec_grp, self.group_id = self.make_new_group()\n\n printy(\"New key {} created\".format(self.key.name))\n printy(\"new security group {} created\".format(self.sec_grp.group_name))\n\n return self", "def create_gce_instance(args, ip_address):\n Print.GN('Creating GCE VM.')\n instance_name = GCE_INSTANCE_NAME.lower()\n firewall_tag = 'https-server' if args.https_only else LEO_FIREWALL_RULE\n cmd = ['gcloud', 'compute', 'instances', 'create',\n instance_name,\n '--image-family', 'ubuntu-1604-lts',\n '--image-project', 'ubuntu-os-cloud',\n '--project', args.project,\n '--scopes', 'cloud-platform',\n '--zone', args.zone,\n '--address', ip_address,\n '--machine-type', args.gce_instance_type,\n '--service-account', args.service_account,\n '--boot-disk-size', str(args.gce_disk_size),\n '--labels', 'instance-creator=leonardo-easy-deploy',\n '--tags', firewall_tag,\n '--boot-disk-auto-delete',\n # 'metadata-from-file' must be the last argument.\n '--metadata-from-file']\n with tempfile.NamedTemporaryFile(mode='w') as startup_file:\n gce_vars = GCE_INIT_SCRIPT_VARS.format(\n server_host=args.host,\n user=LOCAL_USER,\n docker_image=_to_gcr_path(args.project, 'leonardo', args.branch),\n server_ssl_key=args.ssl_key,\n server_ssl_cert=args.ssl_cert,\n server_ca_bundle=args.ssl_ca_bundle,\n rootca_key=args.rootca_key,\n kms_key=args.kms_key,\n kms_keyring=args.kms_keyring,\n kms_project=args.kms_project,\n kms_location=args.kms_location,\n ssl_test_file=args.ssl_test_file,\n )\n startup_file.write(gce_vars + '\\n' + GCE_INIT_SCRIPT_LOGIC)\n startup_file.flush()\n cmd.append('startup-script=%s' % startup_file.name)\n subprocess.check_call(cmd)\n # Startup script always takes time during which the instance\n # is unavailable.\n time.sleep(15)\n print('Successfully created instance: %s' % instance_name)\n return instance_name", "def create_sally_resources(region_name, app_name, image_name,\n storage_enckey=None,\n s3_logs_bucket=None,\n identities_url=None,\n ssh_key_name=None,\n ssh_port=None,\n company_domain=None,\n ldap_host=None,\n security_group_name=None,\n instance_type=None,\n web_subnet_id=None,\n vpc_id=None,\n vpc_cidr=None,\n tag_prefix=None,\n dry_run=False):\n subnet_id = web_subnet_id\n sg_tag_prefix = tag_prefix\n if not security_group_name:\n security_group_name = 'kitchen-door'\n sg_name = _get_security_group_names(\n [security_group_name], tag_prefix=sg_tag_prefix)[0]\n\n ec2_client = boto3.client('ec2', region_name=region_name)\n if not vpc_id:\n vpc_id, _ = _get_vpc_id(tag_prefix, ec2_client=ec2_client,\n region_name=region_name)\n if not subnet_id:\n #pylint:disable=unused-variable\n web_subnet_cidrs, _, _ = _split_cidrs(\n vpc_cidr, ec2_client=ec2_client, region_name=region_name)\n web_subnet_by_cidrs = _get_subnet_by_cidrs(\n web_subnet_cidrs, tag_prefix,\n vpc_id=vpc_id, ec2_client=ec2_client)\n # Use first valid subnet that does not require a public IP.\n subnet_id = next(iter(web_subnet_by_cidrs.values()))['SubnetId']\n\n group_ids = _get_security_group_ids(\n [sg_name], tag_prefix,\n vpc_id=vpc_id, ec2_client=ec2_client)\n\n iam_client = boto3.client('iam')\n instance_profile_arn = create_instance_profile(\n create_logs_role(sg_name,\n s3_logs_bucket=s3_logs_bucket,\n iam_client=iam_client, tag_prefix=tag_prefix),\n iam_client=iam_client, region_name=region_name,\n tag_prefix=tag_prefix, dry_run=dry_run)\n\n instances = create_instances(region_name, app_name, image_name,\n storage_enckey=storage_enckey,\n s3_logs_bucket=s3_logs_bucket,\n identities_url=identities_url,\n ssh_key_name=ssh_key_name,\n company_domain=company_domain,\n ldap_host=ldap_host,\n instance_type=instance_type,\n instance_profile_arn=instance_profile_arn,\n security_group_ids=group_ids,\n subnet_type=SUBNET_PUBLIC,\n subnet_id=subnet_id,\n vpc_id=vpc_id,\n vpc_cidr=vpc_cidr,\n tag_prefix=tag_prefix,\n dry_run=dry_run,\n ec2_client=ec2_client,\n ssh_port=ssh_port)\n\n return [instance['InstanceId'] for instance in instances]", "def create_instance(driver,\n user_id, sig_server_addr, sig_server_port, zone='us-central1-b',\n tags=[], branch='aosp-master', target='aosp_cf_x86_phone-userdebug'):\n\n target = target.replace('_','-')\n instance_name = f'halyard-{user_id}'\n image_family = f'halyard-{branch}-{target}'\n\n try:\n driver.ex_get_image_from_family(image_family)\n except:\n utils.fatal_error(f'Image family {image_family} does not exist.\\n \\\n New base images can be created using the `create_base_image` endpoint.')\n\n # Stops execution if instance already exists\n instance = utils.find_instance(driver, instance_name, zone)\n if instance:\n utils.fatal_error(f'Instance {instance_name} already exists.')\n\n build_node = driver.create_node(\n instance_name,\n 'n1-standard-4',\n None,\n location=zone,\n ex_image_family=image_family,\n ex_service_accounts=[{'scopes': ['storage-ro']}],\n ex_disk_size=30,\n ex_tags=tags)\n\n utils.wait_for_instance(instance_name, zone)\n\n print('successfully created new instance', instance_name)\n\n launch_cvd(instance_name, zone, sig_server_addr, sig_server_port, False)\n\n return {\"name\": instance_name}", "def create(cls, config_file=None, logical_volume = None, cfg = None, **params):\r\n if config_file:\r\n cfg = Config(path=config_file)\r\n if cfg.has_section('EC2'):\r\n # include any EC2 configuration values that aren't specified in params:\r\n for option in cfg.options('EC2'):\r\n if option not in params:\r\n params[option] = cfg.get('EC2', option)\r\n getter = CommandLineGetter()\r\n getter.get(cls, params)\r\n region = params.get('region')\r\n ec2 = region.connect()\r\n cls.add_credentials(cfg, ec2.aws_access_key_id, ec2.aws_secret_access_key)\r\n ami = params.get('ami')\r\n kp = params.get('keypair')\r\n group = params.get('group')\r\n zone = params.get('zone')\r\n # deal with possibly passed in logical volume:\r\n if logical_volume != None:\r\n cfg.set('EBS', 'logical_volume_name', logical_volume.name) \r\n cfg_fp = StringIO.StringIO()\r\n cfg.write(cfg_fp)\r\n # deal with the possibility that zone and/or keypair are strings read from the config file:\r\n if isinstance(zone, Zone):\r\n zone = zone.name\r\n if isinstance(kp, KeyPair):\r\n kp = kp.name\r\n reservation = ami.run(min_count=1,\r\n max_count=params.get('quantity', 1),\r\n key_name=kp,\r\n security_groups=[group],\r\n instance_type=params.get('instance_type'),\r\n placement = zone,\r\n user_data = cfg_fp.getvalue())\r\n l = []\r\n i = 0\r\n elastic_ip = params.get('elastic_ip')\r\n instances = reservation.instances\r\n if elastic_ip != None and instances.__len__() > 0:\r\n instance = instances[0]\r\n print 'Waiting for instance to start so we can set its elastic IP address...'\r\n # Sometimes we get a message from ec2 that says that the instance does not exist.\r\n # Hopefully the following delay will giv eec2 enough time to get to a stable state:\r\n time.sleep(5) \r\n while instance.update() != 'running':\r\n time.sleep(1)\r\n instance.use_ip(elastic_ip)\r\n print 'set the elastic IP of the first instance to %s' % elastic_ip\r\n for instance in instances:\r\n s = cls()\r\n s.ec2 = ec2\r\n s.name = params.get('name') + '' if i==0 else str(i)\r\n s.description = params.get('description')\r\n s.region_name = region.name\r\n s.instance_id = instance.id\r\n if elastic_ip and i == 0:\r\n s.elastic_ip = elastic_ip\r\n s.put()\r\n l.append(s)\r\n i += 1\r\n return l", "def _make_cloudformation_template(\n project_dir,\n user_data,\n s3_bucket_name,\n sam_template_name,\n elb_name,\n ami_id,\n instance_type,\n autoscaling_min_size,\n autoscaling_desired_capacity,\n autoscaling_max_size,\n):\n\n template_file_path = os.path.join(project_dir, sam_template_name)\n with open(template_file_path, \"a\") as f:\n f.write(\n \"\"\"\\\nAWSTemplateFormatVersion: 2010-09-09\nTransform: AWS::Serverless-2016-10-31\nDescription: BentoML load balanced template\nParameters:\n AmazonLinux2LatestAmiId:\n Type: AWS::SSM::Parameter::Value<AWS::EC2::Image::Id>\n Default: {ami_id}\nResources:\n SecurityGroupResource:\n Type: AWS::EC2::SecurityGroup\n Properties:\n GroupDescription: \"security group for bentoservice\"\n SecurityGroupIngress:\n -\n IpProtocol: tcp\n CidrIp: 0.0.0.0/0\n FromPort: 5000\n ToPort: 5000\n -\n IpProtocol: tcp\n CidrIp: 0.0.0.0/0\n FromPort: 22\n ToPort: 22\n VpcId: !Ref Vpc1\n\n Ec2InstanceECRProfile:\n Type: AWS::IAM::InstanceProfile\n Properties:\n Path: /\n Roles: [!Ref EC2Role]\n\n EC2Role:\n Type: AWS::IAM::Role\n Properties:\n AssumeRolePolicyDocument:\n Statement:\n - Effect: Allow\n Principal:\n Service: [ec2.amazonaws.com]\n Action: ['sts:AssumeRole']\n Path: /\n Policies:\n - PolicyName: ecs-service\n PolicyDocument:\n Statement:\n - Effect: Allow\n Action:\n - 'ecr:GetAuthorizationToken'\n - 'ecr:BatchGetImage'\n - 'ecr:GetDownloadUrlForLayer'\n Resource: '*'\n\n LaunchTemplateResource:\n Type: AWS::EC2::LaunchTemplate\n Properties:\n LaunchTemplateName: {template_name}\n LaunchTemplateData:\n IamInstanceProfile:\n Arn: !GetAtt Ec2InstanceECRProfile.Arn\n ImageId: !Ref AmazonLinux2LatestAmiId\n InstanceType: {instance_type}\n UserData: \"{user_data}\"\n SecurityGroupIds:\n - !GetAtt SecurityGroupResource.GroupId\n\n TargetGroup:\n Type: AWS::ElasticLoadBalancingV2::TargetGroup\n Properties:\n VpcId: !Ref Vpc1\n Protocol: HTTP\n Port: 5000\n TargetType: instance\n HealthCheckEnabled: true\n HealthCheckIntervalSeconds: {target_health_check_interval_seconds}\n HealthCheckPath: {target_health_check_path}\n HealthCheckPort: {target_health_check_port}\n HealthCheckProtocol: HTTP\n HealthCheckTimeoutSeconds: {target_health_check_timeout_seconds}\n HealthyThresholdCount: {target_health_check_threshold_count}\n\n LoadBalancerSecurityGroup:\n Type: AWS::EC2::SecurityGroup\n Properties:\n GroupDescription: \"security group for loadbalancing\"\n VpcId: !Ref Vpc1\n SecurityGroupIngress:\n -\n IpProtocol: tcp\n CidrIp: 0.0.0.0/0\n FromPort: 80\n ToPort: 80\n\n InternetGateway:\n Type: AWS::EC2::InternetGateway\n\n Gateway:\n Type: AWS::EC2::VPCGatewayAttachment\n Properties:\n InternetGatewayId: !Ref InternetGateway\n VpcId: !Ref Vpc1\n\n PublicRouteTable:\n Type: AWS::EC2::RouteTable\n Properties:\n VpcId: !Ref Vpc1\n\n PublicRoute:\n Type: AWS::EC2::Route\n DependsOn: Gateway\n Properties:\n DestinationCidrBlock: 0.0.0.0/0\n GatewayId: !Ref InternetGateway\n RouteTableId: !Ref PublicRouteTable\n\n RouteTableSubnetTwoAssociationOne:\n Type: AWS::EC2::SubnetRouteTableAssociation\n Properties:\n RouteTableId: !Ref PublicRouteTable\n SubnetId: !Ref Subnet1\n RouteTableSubnetTwoAssociationTwo:\n Type: AWS::EC2::SubnetRouteTableAssociation\n Properties:\n RouteTableId: !Ref PublicRouteTable\n SubnetId: !Ref Subnet2\n\n Vpc1:\n Type: AWS::EC2::VPC\n Properties:\n CidrBlock: 172.31.0.0/16\n EnableDnsHostnames: true\n EnableDnsSupport: true\n InstanceTenancy: default\n\n Subnet1:\n Type: AWS::EC2::Subnet\n Properties:\n VpcId: !Ref Vpc1\n AvailabilityZone:\n Fn::Select:\n - 0\n - Fn::GetAZs: \"\"\n CidrBlock: 172.31.16.0/20\n MapPublicIpOnLaunch: true\n\n Subnet2:\n Type: AWS::EC2::Subnet\n Properties:\n VpcId: !Ref Vpc1\n AvailabilityZone:\n Fn::Select:\n - 1\n - Fn::GetAZs: \"\"\n CidrBlock: 172.31.0.0/20\n MapPublicIpOnLaunch: true\n\n LoadBalancer:\n Type: AWS::ElasticLoadBalancingV2::LoadBalancer\n Properties:\n IpAddressType: ipv4\n Name: {elb_name}\n Scheme: internet-facing\n SecurityGroups:\n - !Ref LoadBalancerSecurityGroup\n Subnets:\n - !Ref Subnet1\n - !Ref Subnet2\n Type: application\n\n Listener:\n Type: AWS::ElasticLoadBalancingV2::Listener\n Properties:\n DefaultActions:\n - Type: forward\n TargetGroupArn: !Ref TargetGroup\n LoadBalancerArn: !Ref LoadBalancer\n Port: 80\n Protocol: HTTP\n\n AutoScalingGroup:\n Type: AWS::AutoScaling::AutoScalingGroup\n DependsOn: Gateway\n Properties:\n MinSize: {autoscaling_min_size}\n MaxSize: {autoscaling_max_size}\n DesiredCapacity: {autoscaling_desired_capacity}\n AvailabilityZones:\n - Fn::Select:\n - 0\n - Fn::GetAZs: \"\"\n - Fn::Select:\n - 1\n - Fn::GetAZs: \"\"\n LaunchTemplate:\n LaunchTemplateId: !Ref LaunchTemplateResource\n Version: !GetAtt LaunchTemplateResource.LatestVersionNumber\n TargetGroupARNs:\n - !Ref TargetGroup\n VPCZoneIdentifier:\n - !Ref Subnet1\n - !Ref Subnet2\n UpdatePolicy:\n AutoScalingReplacingUpdate:\n WillReplace: true\n\nOutputs:\n S3Bucket:\n Value: {s3_bucket_name}\n Description: Bucket to store sam artifacts\n AutoScalingGroup:\n Value: !Ref AutoScalingGroup\n Description: Autoscaling group name\n TargetGroup:\n Value: !Ref TargetGroup\n Description: Target group for load balancer\n Url:\n Value: !Join ['', ['http://', !GetAtt [LoadBalancer, DNSName]]]\n Description: URL of the bento service\n\n\"\"\".format(\n ami_id=ami_id,\n template_name=sam_template_name,\n instance_type=instance_type,\n user_data=user_data,\n elb_name=elb_name,\n autoscaling_min_size=autoscaling_min_size,\n autoscaling_desired_capacity=autoscaling_desired_capacity,\n autoscaling_max_size=autoscaling_max_size,\n s3_bucket_name=s3_bucket_name,\n target_health_check_interval_seconds=TARGET_HEALTH_CHECK_INTERVAL,\n target_health_check_path=TARGET_HEALTH_CHECK_PATH,\n target_health_check_port=TARGET_HEALTH_CHECK_PORT,\n target_health_check_timeout_seconds=TARGET_HEALTH_CHECK_TIMEOUT_SECONDS,\n target_health_check_threshold_count=TARGET_HEALTH_CHECK_THRESHOLD_COUNT,\n )\n )\n return template_file_path", "def new(cls, string=None):\n # Generates warner ECDSA objects\n if string:\n # deterministic private key\n ecdsaPrivkey = SigningKey.from_string(\n string=string, curve=SECP256k1)\n else:\n # random private key\n ecdsaPrivkey = SigningKey.generate(\n curve=SECP256k1, entropy=None)\n return cls.fromPrivkey(ecdsaPrivkey)", "def new_ec_key(name='secp256r1'):\n if name not in EC_CURVES:\n raise ValueError('Unknown curve')\n return ec.generate_private_key(curve=EC_CURVES[name], backend=get_backend())", "def create_vpc(ec2):\n # create a new VPC\n print(\"\\n===Creating VPC...\")\n vpc = ec2.create_vpc(CidrBlock=VPC_CIDR_BLOCK,\n TagSpecifications=[{\"ResourceType\": \"vpc\",\n \"Tags\":[{\"Key\": \"Name\", \n \"Value\": VPC_NAME},\n ]\n }])\n \n # wait till available and return VPC ID\n vpc.wait_until_available()\n print(f\"===VPC {VPC_NAME} is available!\")\n return vpc", "def aws_instance(LOGGER, VM, TERRAFORM_SECURITY_GROUPS):\n VM_INFO = dict()\n LOGGER.info('Adding %s: %s to inventory.' %\n (VM['data_type'], VM['inventory_hostname']))\n\n VM_INFO.update(\n {\n 'inventory_hostname': VM['inventory_hostname'],\n 'ami': VM['ami'],\n 'data_type': VM['data_type'],\n 'ansible_groups': VM['ansible_groups'],\n 'availability_zone': VM['availability_zone'],\n 'instance_type': VM['instance_type'],\n 'key_name': VM['key_name'],\n 'network_interface_id': VM['network_interface_id'],\n 'private_dns': VM['private_dns'],\n 'private_ip': VM['private_ip'],\n 'public_dns': VM['public_dns'],\n 'public_ip': VM['public_ip'],\n 'subnet_id': VM['subnet_id'],\n 'target': VM['target'],\n 'vpc_security_group_ids': VM['vpc_security_group_ids']\n }\n )\n\n for VPC_SECURITY_GROUP_ID in VM['vpc_security_group_ids']:\n for SECURITY_GROUP in TERRAFORM_SECURITY_GROUPS:\n if SECURITY_GROUP['id'] == VPC_SECURITY_GROUP_ID:\n VM_INFO.update(\n {\n 'vpc_security_groups':\n SECURITY_GROUP['security_groups']\n }\n )\n\n return VM_INFO", "def aws():\n env.hosts = 'ec2-54-187-201-203.us-west-2.compute.amazonaws.com'\n env.user = 'ubuntu'\n env.key_filename = '/Users/jenniferchen/Downloads/hs698v2.pem'\n env.virtualenv = {'dir': '/server', 'name': 'venv'}", "def ec2(self):\n try:\n config = configparser.ConfigParser()\n config.read(self)\n aws_access_key_id = config.get(\"saml\", \"aws_access_key_id\")\n aws_secret_access_key = config.get(\"saml\", \"aws_secret_access_key\")\n aws_session_token = config.get(\"saml\", \"aws_session_token\")\n aws_region = config.get(\"saml\", \"region\")\n\n except Exception as e:\n print(\"Error with credentials. %s\" % e)\n\n try:\n session = boto3.session.Session(profile_name='saml')\n ec2 = session.client('ec2', region_name=aws_region, verify=True, aws_access_key_id=aws_access_key_id, aws_secret_access_key=aws_secret_access_key, aws_session_token=aws_session_token)\n return ec2\n\n except Exception as e:\n print(\"Error while creating boto3 session. %s\" % e)", "def createAllSG():\n\tfor info in conf_HVM:\n\t\tec2 = boto.ec2.connect_to_region(info['region']+'-'+info['zone'])\n\t\tcreateSG(ec2,'SG-Cassandra-'+info['region']+'-'+info['zone'],CASSANDRA_RULES)", "def do_new(argv):\n\n global PRIVATE_KEY\n\n if not PRIVATE_KEY:\n PRIVATE_KEY = wallet.get_private_key()\n else:\n get_new = yes_or_no(\"Private key already exist, do you want generate new one ?\")\n if get_new:\n PRIVATE_KEY = wallet.get_private_key()\n print(\"Private Key: '\" + PRIVATE_KEY + \"'\")\n cmpr_pub_key = wallet.get_compressed_public_key(PRIVATE_KEY, 1)\n addr = wallet.public_key_to_address(cmpr_pub_key, 0)\n open(\"data/address\", \"w\").write(addr)\n print(\"Public key was saved to 'data/cmpr_pub_key'\")", "def createAllKP():\n\tif not os.path.exists(keysDir):\n\t\tos.makedirs(keysDir)\n\tfor info in conf_HVM:\n\t\tkeyName = 'Key-'+info['region']+'-'+info['zone']\n\t\ttry:\n\t\t\tos.remove(keysDir+'/'+keyName+'.pem')\n\t\texcept OSError:\n\t\t\tpass\n\t\tprint \"Key creation :\",keyName\n\t\tec2 = boto.ec2.connect_to_region(info['region']+'-'+info['zone'])\n\t\t# check if the key pair exists\n\t\tkps = [kp for kp in ec2.get_all_key_pairs() if kp.name == keyName]\n\t\tif kps:\n\t\t\tec2.delete_key_pair(keyName)\t\n\t\tkey = ec2.create_key_pair(keyName)\n\t\tkey.save(keysDir)", "def create_key_pair(self, keypair, **kwargs):\n\n if not isinstance(keypair, models.CreateKeyPairReq):\n raise HuaweiCloudSDKException(\n message=\"The datatype of parameter(keypair) \"\n \"is not CreateKeyPairReq\")\n body_params = keypair.serialize()\n\n header_params = {}\n header_params['Accept'] = util.select_header_accept(\n ['application/xml', 'application/json'])\n\n header_params['Content-Type'] = util.select_header_content_type(\n ['application/json', 'application/xml'])\n\n return_code, return_data, _ = self.api_client.handle_raw_request(\n 'compute', 'POST',\n '/os-keypairs',\n headers=header_params,\n body=body_params,\n timeout=kwargs.get('_request_timeout', None),\n _preload_content=kwargs.get('_preload_content', True))\n\n if return_data is not None:\n return_data = json.loads(return_data)\n else:\n return_data = {}\n if return_code not in [200, 201]:\n raise HuaweiCloudSDKException(\n return_code,\n \"Run create_key_pair failed, \"\n \"message=%s\" % return_data.get(\"message\"))\n return models.CreateKeyPairResp().deserialize(return_data)", "def ec_generate_key(security):\n assert isinstance(security, unicode)\n assert security in _curves\n ec = M2Crypto.EC.gen_params(_curves[security])\n ec.gen_key()\n return ec", "def create_key_pair(self) -> Keypair:\n res = self.context.post(\n \"/dsum/create_key_pair\", None, None, \"DSum: failed creating a Curve 25519 Keypair\")\n return Keypair(res['private_key_id'], res['public_key_id'])", "def spawn(\n configfile,\n time,\n flavor,\n operating_system,\n region,\n currency,\n sshkey,\n kickstart,\n force):\n if configfile:\n try:\n with open(configfile, 'r') as file:\n data = file.read()\n file.close()\n data = json.loads(data)\n except FileNotFoundError as err: # Sublime give an error, but it's not.\n print(Bcolors.FAIL + 'ERROR: Config File path entered not found.' + Bcolors.ENDC)\n print(str(err))\n sys.exit(2)\n except PermissionError as err:\n print(Bcolors.FAIL + 'ERROR: Config File path entered, Permission Denied.' + Bcolors.ENDC)\n print(str(err))\n sys.exit(2)\n else:\n sshkey = file_to_string(sshkey) if sshkey else None\n kickstart = file_to_string(kickstart) if kickstart else None\n data = {\n 'hours_time': time,\n 'flavor': flavor,\n 'operating_system': operating_system,\n 'region': region,\n 'ssh_key': sshkey,\n 'kickstart': kickstart,\n 'currency': currency\n }\n\n validation = False\n\n if not force:\n while not validation:\n print(json.dumps(data, indent=4, sort_keys=True))\n val_question = input('Is theses parameter are correct ? [Y / N] : ')\n if val_question in ['Y', 'y']:\n validation = True\n elif val_question in ['N', 'n']:\n print(Bcolors.FAIL + 'Instance creation/spawning stoped.' + Bcolors.ENDC)\n sys.exit(2)\n\n api_returned_info = api_post('create', data)\n\n if api_returned_info and api_returned_info.get('status') == 200:\n instance_info = api_returned_info.get('Request_instance')\n status = instance_info.get('Status')\n transaction = instance_info.get('Transaction')\n color = Bcolors.OKGREEN if status == 'SUCCESS' else Bcolors.FAIL\n\n print('New Instance requested... ' + color + status + Bcolors.ENDC)\n for message in instance_info.get('Message'):\n print(Bcolors.OKBLUE + message + Bcolors.ENDC)\n\n if transaction and status == 'SUCCESS':\n print(' ')\n print('---------- QR CODE ----------')\n print(convert_to_qrcode(transaction.get('Address')))\n print(Bcolors.WARNING + json.dumps(transaction, indent=4, sort_keys=True) + Bcolors.ENDC)\n print(' ')\n print('You can now look at the transaction and instance status, using the subcommand \"status\" with above \"Transaction_ID\".')\n print('E.G. : \"facelesscloud status -i 13c3febe-ac0a-448f-9404-005b4475063e\" (transaction_id)')\n print(' ')\n return True # For assert test.\n else:\n print(Bcolors.FAIL + 'ERROR : ' + Bcolors.ENDC + 'Something went wrong calling the server.')\n print(json.dumps(api_returned_info, indent=4, sort_keys=True))\n sys.exit(2)\n else:\n print(Bcolors.FAIL + 'ERROR : ' + Bcolors.ENDC + 'Something went wrong calling the server.')\n print(json.dumps(api_returned_info, indent=4, sort_keys=True))\n sys.exit(2)", "def test_deploy_instance_with_new_network_and_all_params(self):\n suffix = datetime.utcnow().strftime('%Y%m%d%H%M%S')\n instance_name = TEST_SERVER_PREFIX + \"_network_all_params_\" + suffix\n instance_meta = {\"test_item\": \"test_value\"}\n keypair_name = TEST_KEYPAIR_PREFIX + \"_\" + suffix\n sec_group_name = TEST_SEC_GROUP_PREFIX + \"_\" + suffix\n network_name = TEST_NETWORK_PREFIX + \"_\" + suffix\n network_cidr = TEST_CIDR_PATTERN % 248\n self.__deploy_instance_helper__(instance_name=instance_name,\n network_name=network_name,\n network_cidr=network_cidr,\n metadata=instance_meta,\n keypair_name=keypair_name,\n sec_group_name=sec_group_name)", "def launch_spot():\n ec2 = boto3.client('ec2')\n ec2r = boto3.resource('ec2')\n ec2spec = dict(ImageId=AMI,\n KeyName = KeyName,\n SecurityGroupIds = [SecurityGroupId, ],\n InstanceType = \"p2.xlarge\",\n Monitoring = {'Enabled': True,},\n IamInstanceProfile = IAM_ROLE)\n output = ec2.request_spot_instances(DryRun=False,\n SpotPrice=\"0.4\",\n InstanceCount=1,\n LaunchSpecification = ec2spec)\n spot_request_id = output[u'SpotInstanceRequests'][0][u'SpotInstanceRequestId']\n logging.info(\"instance requested\")\n time.sleep(30)\n waiter = ec2.get_waiter('spot_instance_request_fulfilled')\n waiter.wait(SpotInstanceRequestIds=[spot_request_id,])\n instance_id = get_status(ec2, spot_request_id)\n while instance_id is None:\n time.sleep(30)\n instance_id = get_status(ec2,spot_request_id)\n instance = ec2r.Instance(instance_id)\n with open(\"host\",'w') as out:\n out.write(instance.public_ip_address)\n logging.info(\"instance allocated\")\n time.sleep(10) # wait while the instance starts\n env.hosts = [instance.public_ip_address,]\n fh = open(\"connect.sh\", 'w')\n fh.write(\"#!/bin/bash\\n\" + \"ssh -i \" + env.key_filename + \" \" + env.user + \"@\" + env.hosts[0] + \"\\n\")\n fh.close()\n local(\"fab deploy_ec2\") # this forces fab to set new env.hosts correctly", "def create_node(self, **kwargs):\n image = kwargs[\"image\"]\n size = kwargs[\"size\"]\n params = {\n 'Action': 'RunInstances',\n 'ImageId': image.id,\n 'MinCount': kwargs.get('ex_mincount','1'),\n 'MaxCount': kwargs.get('ex_maxcount','1'),\n 'InstanceType': size.id\n }\n\n if 'ex_securitygroup' in kwargs:\n if not isinstance(kwargs['ex_securitygroup'], list):\n kwargs['ex_securitygroup'] = [kwargs['ex_securitygroup']]\n for sig in range(len(kwargs['ex_securitygroup'])):\n params['SecurityGroup.%d' % (sig+1,)] = kwargs['ex_securitygroup'][sig]\n\n if 'location' in kwargs:\n availability_zone = getattr(kwargs['location'], 'availability_zone',\n None)\n if availability_zone:\n if availability_zone.region_name != self.region_name:\n raise AttributeError('Invalid availability zone: %s'\n % (availability_zone.name))\n params['Placement.AvailabilityZone'] = availability_zone.name\n\n if 'ex_keyname' in kwargs:\n params['KeyName'] = kwargs['ex_keyname']\n\n if 'ex_userdata' in kwargs:\n params['UserData'] = base64.b64encode(kwargs['ex_userdata'])\n\n if 'ex_clienttoken' in kwargs:\n params['ClientToken'] = kwargs['ex_clienttoken']\n\n object = self.connection.request(self.path, params=params).object\n nodes = self._to_nodes(object, 'instancesSet/item')\n\n if len(nodes) == 1:\n return nodes[0]\n else:\n return nodes", "def create_ami_from_instance ( aws_account_type,\n ec2_conn,\n instance,\n ami_name,\n ami_description = None,\n wait_for_available = True ) :\n ami_id = instance.create_image( ami_name, ami_description )\n ami = aws_wait( ec2_conn.get_all_images, ami_id, [ ami_id ] )\n if not ami :\n print \"AMI is not available after a long time! \" + ami.name\n return None\n\n if wait_for_available :\n ami_available = wait_on_object_state( ami, 'available' ,max_wait=3600)\n if not ami_available :\n print \"AMI is not available after a long time! \" + ami.name\n return None\n\n # Allow other AWS accounts the ability to see this AMI.\n if aws_account_type == 'esp-nonprod' :\n priv_account_id = esp_prod[ 'accountid' ]\n else :\n priv_account_id = esp_nonprod[ 'accountid' ]\n\n ami.set_launch_permissions( user_ids = [ priv_account_id ] )\n\n return ami", "def clone_asg(client, asg, name, lc_name):\n PARAMS_TO_CLONE = [\n 'MinSize',\n 'MaxSize',\n 'DesiredCapacity',\n 'DefaultCooldown',\n 'AvailabilityZones',\n 'LoadBalancerNames',\n 'HealthCheckType',\n 'HealthCheckGracePeriod',\n 'PlacementGroup',\n 'VPCZoneIdentifier',\n 'TerminationPolicies',\n 'NewInstancesProtectedFromScaleIn',\n ]\n params = {\n key: asg[key] for key in PARAMS_TO_CLONE if key in asg\n }\n params['Tags'] = [{\n 'ResourceId': name,\n 'ResourceType': 'auto-scaling-group',\n 'Key': tag['Key'],\n 'Value': tag['Value'],\n 'PropagateAtLaunch': tag['PropagateAtLaunch'],\n } for tag in asg['Tags']]\n\n client.create_auto_scaling_group(\n AutoScalingGroupName=name,\n LaunchConfigurationName=lc_name,\n **params\n )\n return client.describe_auto_scaling_groups(\n AutoScalingGroupNames=[name]\n )['AutoScalingGroups'][0]", "def create(self,\n name=None,\n image=None,\n network=None,\n size=None,\n location=None,\n timeout=360,\n key=None,\n secgroup=None,\n ip=None,\n user=None,\n public=True,\n group=None,\n metadata=None,\n cloud=None,\n label=None,\n **kwargs):\n image_use = None\n flavor_use = None\n\n # keyname = Config()[\"cloudmesh\"][\"profile\"][\"user\"]\n # ex_keyname has to be the registered keypair name in cloud\n\n \"\"\"\n https://docs.openstack.org/openstacksdk/latest/user/connection.html#openstack.connection.Connection.create_server\n\n \"\"\"\n\n if 'flavor' in kwargs and size is None:\n size = kwargs['flavor']\n\n if network is not None:\n pass\n elif 'network' in kwargs:\n network = kwargs['network']\n elif 'network' in self.default:\n network = self.default['network']\n\n # Guess user name\n\n if user is None:\n user = Image.guess_username(image)\n # image_name = image.lower()\n # if image_name.startswith(\"cc-\"):\n # user = \"cc\"\n # if \"centos\" in image_name:\n # user = \"centos\"\n # elif \"ubuntu\" in image_name:\n # user = \"ubuntu\"\n\n # get IP\n\n if not ip and public:\n ip = self.find_available_public_ip()\n # pprint(entry)\n\n elif ip is not None:\n entry = self.list_public_ips(ip=ip, available=True)\n if len(entry) == 0:\n print(\"ip not available\")\n raise ValueError(f\"The ip can not be assigned {ip}\")\n\n if type(group) == str:\n groups = Parameter.expand(group)\n\n vm_label = label or name\n\n\n banner(\"Create Server\")\n Console.msg(f\" Cloud: {self.cloud}\")\n Console.msg(f\" Name: {name}\")\n Console.msg(f\" Label: {vm_label}\")\n Console.msg(f\" User: {user}\")\n Console.msg(f\" IP: {ip}\")\n Console.msg(f\" Image: {image}\")\n Console.msg(f\" Size: {size}\")\n Console.msg(f\" Network: {network}\")\n Console.msg(f\" Public: {public}\")\n Console.msg(f\" Key: {key}\")\n Console.msg(f\" Location: {location}\")\n Console.msg(f\" Timeout: {timeout}\")\n Console.msg(f\" Secgroup: {secgroup}\")\n Console.msg(f\" Group: {group}\")\n Console.msg(f\" Groups: {groups}\")\n Console.msg(\"\")\n\n # image = self.cloudman.compute.find_image(image)\n # flavor = self.cloudman.compute.find_flavor(size)\n # network = self.cloudman.network.find_network(network)\n\n try:\n server = self.cloudman.create_server(name,\n network=network,\n flavor=size,\n image=image,\n key_name=key,\n security_groups=[secgroup],\n timeout=timeout\n # tags=groups,\n # wait=True\n )\n\n \"\"\"\n server = self.cloudman.create_server(name,\n networks=[\n {\"uuid\": \"0fa8824d-8a3f-4890-90e1-c3596b3511c6\"}],\n flavor=size,\n image=image,\n key_name=key,\n security_groups=[secgroup],\n timeout=timeout\n # tags=groups,\n # wait=True\n )\n \"\"\"\n server['user'] = user\n server = self.cloudman.wait_for_server(server)\n server = self.cloudman.add_ips_to_server(server, ips=ip)\n variables = Variables()\n variables['vm'] = name\n if metadata is None:\n metadata = {}\n\n #\n # due to metadata limitation in openstack do not add the creation time\n #\n\n if 'created' in metadata:\n del metadata['created']\n\n metadata['image'] = image\n metadata['flavor'] = size\n metadata['label'] = vm_label\n\n self.cloudman.set_server_metadata(server, metadata)\n\n self.add_secgroup(name=secgroup)\n\n # server = self.cloudman.compute.wait_for_server(server)\n\n # print(\"ssh -i {key} root@{ip}\".format(\n # key=PRIVATE_KEYPAIR_FILE,\n # ip=server.access_ipv4))\n\n except openstack.exceptions.ResourceTimeout:\n Console.error(\"Problem starting vm in time.\")\n raise TimeoutError\n\n except Exception as e:\n Console.error(\"Problem starting vm\", traceflag=True)\n print(e)\n raise RuntimeError\n\n return self.update_dict(server, kind=\"vm\")[0]", "def auth_secgroupeg(self, args):\n region = args[\"Region\"]\n sgid = args[\"Security-group-ID\"]\n protocol = args[\"protocol\"]\n from_port = int(args[\"FromPort\"])\n to_port = int(args[\"ToPort\"])\n ip_range = args[\"IpRange\"]\n message = MessageClass()\n\n # Boto3 client creation by providing the access_id and access_secret\n ec2 = boto3.client(service_name='ec2', region_name=region, api_version=None, use_ssl=True,\n verify=None, endpoint_url=None, aws_access_key_id=self.aws_access_key,\n aws_secret_access_key=self.aws_secret_token, aws_session_token=None,\n config=None)\n\n data = ec2.authorize_security_group_egress(\n GroupId=sgid,\n IpPermissions=[\n {'IpProtocol': protocol,\n 'FromPort': from_port,\n 'ToPort': to_port,\n 'IpRanges': [{'CidrIp': ip_range}]}\n ])\n attachment = MessageAttachmentsClass()\n attachment.title = data\n message.message_text = \"Egress Successfully Set :\"\n message.attach(attachment)\n\n return message.to_json()", "def ssh_to_ec2(instance):\n subprocess.Popen(['ssh', instance.dns_name])", "def create_keypair(self):\n # NOTE: currently we rely on zmq for convenience, but we may use libnacl directly\n # if we want to isolate this module from zmq dependency.\n public_key, private_key = zmq.curve_keypair()\n return public_key, private_key", "def test_non_additive_instance_creation(self):\n\n # local imports of code-under-test ensure moto has mocks\n # registered before any possible calls out to AWS\n from awstools.awstools import launch_instances, run_block_device_dict, farm_security_group_setup\n\n # launch_instances requires vpc setup as done by firesim/scripts/setup_firesim.py\n from awstools.aws_setup import aws_setup\n aws_setup()\n farm_security_group_setup()\n\n type = 'f1.2xlarge'\n\n instances = launch_instances(type, 1,\n instancemarket=\"ondemand\", spotinterruptionbehavior=None, spotmaxprice=None,\n blockdevices=run_block_device_dict(),\n tags = {'fsimcluster': 'testcluster'},\n always_expand=False)\n instances.should.have.length_of(1)\n\n instances = launch_instances(type, 1,\n instancemarket=\"ondemand\", spotinterruptionbehavior=None, spotmaxprice=None,\n blockdevices=run_block_device_dict(),\n tags = {'fsimcluster': 'testcluster'},\n always_expand=False)\n instances.should.have.length_of(1)\n\n # There should be one instance total now, across one reservation\n ec2_client = boto3.client('ec2')\n paginator = ec2_client.get_paginator('describe_instances')\n page_iterator = paginator.paginate()\n\n all_reservations = []\n for page in page_iterator:\n page['ResponseMetadata']['HTTPStatusCode'].should.equal(200)\n all_reservations += page['Reservations']\n all_reservations.should.have.length_of(1)\n\n [i for r in all_reservations for i in r['Instances']].should.have.length_of(1)", "def create_auto_scaling_group(autoscaling_client: BaseClient,\n asg_name: str,\n launch_template_ids: Dict[str, str],\n vpc_subnets: List[str],\n min_size: int,\n max_size: int,\n instance_types: Optional[List[str]] = None,\n spot_bid: Optional[float] = None,\n spot_cheapest: bool = False,\n tags: Optional[Dict[str, str]] = None) -> None:\n\n if instance_types is None:\n instance_types = []\n\n if instance_types is not None and len(instance_types) > 20:\n raise RuntimeError(f\"Too many instance types ({len(instance_types)}) in group; AWS supports only 20.\")\n\n if len(vpc_subnets) == 0:\n raise RuntimeError(\"No VPC subnets specified to launch into; not clear where to put instances\")\n\n def get_launch_template_spec(instance_type):\n \"\"\"\n Get a LaunchTemplateSpecification for the given instance type.\n \"\"\"\n return {'LaunchTemplateId': launch_template_ids[instance_type], 'Version': '$Default'}\n\n # We always write the ASG with a MixedInstancesPolicy even when we have only one type.\n # And we use a separate launch template for every instance type, and apply it as an override.\n # Overrides is the only way to get multiple instance types into one ASG; see:\n # https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/autoscaling.html#AutoScaling.Client.create_auto_scaling_group\n # We need to use a launch template per instance type so that different\n # instance types with specified EBS storage size overrides will get their\n # storage.\n mip = {'LaunchTemplate': {'LaunchTemplateSpecification': get_launch_template_spec(next(iter(instance_types))),\n 'Overrides': [{'InstanceType': t, 'LaunchTemplateSpecification': get_launch_template_spec(t)} for t in instance_types]}}\n\n if spot_bid is not None:\n # Ask for spot instances by saying everything above base capacity of 0 should be spot.\n mip['InstancesDistribution'] = {'OnDemandPercentageAboveBaseCapacity': 0,\n 'SpotAllocationStrategy': 'capacity-optimized' if not spot_cheapest else 'lowest-price',\n 'SpotMaxPrice': str(spot_bid)}\n\n asg = {'AutoScalingGroupName': asg_name,\n 'MixedInstancesPolicy': prune(mip),\n 'MinSize': min_size,\n 'MaxSize': max_size,\n 'VPCZoneIdentifier': ','.join(vpc_subnets)}\n\n if tags:\n # Tag the ASG itself.\n asg['Tags'] = flatten_tags(tags)\n\n logger.debug(\"Creating Autoscaling Group across subnets: %s\", vpc_subnets)\n\n # Don't prune the ASG because MinSize and MaxSize are required and may be 0.\n autoscaling_client.create_auto_scaling_group(**asg)", "def install_mongo ( vpc_conn, ec2_conn, cloudwatch_conn, vpc, base_name, aws_account_type, base_topicarn, params ):\n app_type = 'MONGO'\n mongo_keypair = get_keypair_name( aws_account_type, vpc.region.name, app_type )\n print mongo_keypair\n mongo_sec_grp_name = get_secgrp_name( base_name, app_type )\n \n print \"mongo_sec_grp_name\" + mongo_sec_grp_name\n \n mongo_sec_grp = find_secgrp(ec2_conn, mongo_sec_grp_name)\n print mongo_sec_grp\n \n if not mongo_sec_grp :\n mongo_sec_grp = create_secgrp( ec2_conn,\n vpc,\n mongo_sec_grp_name,\n 'Controls access to the ' + app_type )\n print mongo_sec_grp \n \n remove_all_rules( ec2_conn, [ mongo_sec_grp ],deep = True,base_name = base_name)\n grant_ssh_access( ec2_conn, [ mongo_sec_grp ], find_group( ec2_conn, base_name, 'NAT' ) )\n grant_grp_self_access ( ec2_conn, mongo_sec_grp, 0, 65535, protocol = 'tcp' )\n \n mongo_ami_name = base_name + '-' + app_type\n \n mongo_ami = get_ami_by_name( ec2_conn, mongo_ami_name )\n if not mongo_ami :\n mongo_ami = get_ami_by_name( ec2_conn, default_mongo_ami )\n\n print \"Creating mongoDB Instances\"\n \n mongo_subnets = get_vpc_subnets( vpc_conn, vpc, 'STATIC' )\n mongo_sec_grps = ec2_conn.get_all_security_groups( filters = { \"group-name\" : [ mongo_sec_grp_name ] } )\n \n primary_ip = get_primary_node(mongo_subnets)\n mongo_instances = []\n mongo_config = get_mongo_config(mongo_subnets)\n \n user_data = None\n primary_instance_id = None\n primary_subnet = None\n for subnet in mongo_subnets :\n cidr_block = subnet.cidr_block\n \n if cidr_block == primary_ip :\n primary_subnet = subnet\n \n # First launch the secondary instances\n if cidr_block != primary_ip :\n instance_private_ip = get_static_ip(subnet.cidr_block, \"0/24\", mongo_ip_block)\n zone_letter = subnet.availability_zone[-1:].upper( )\n mongo = launch_instance_vpc( ec2_conn,\n mongo_ami,\n base_name = base_name,\n instance_type = app_type,\n keypair = mongo_keypair,\n machine_type = 'm3.xlarge',\n security_group_id = mongo_sec_grps[0].id ,\n subnet_id = subnet.id,\n public_ip = False,\n user_data = user_data,\n static_ip_address = instance_private_ip )\n mongo_instances.append( mongo )\n\n print \"Setting alarms on the mongo instance\"\n add_monitors_to_instance( cloudwatch_conn, base_name, mongo.id, app_type, base_topicarn, mongo_monitor_rules )\n\n user_data = base64.b64encode(mongo_config)\n \n print \"Launching primary mongo instance\"\n print \"primary_subnet \" +primary_subnet.cidr_block\n instance_private_ip = get_static_ip(primary_subnet.cidr_block, \"0/24\", mongo_ip_block)\n zone_letter = subnet.availability_zone[-1:].upper( )\n mongo = launch_instance_vpc( ec2_conn,\n mongo_ami,\n base_name = base_name,\n instance_type = app_type,\n keypair = mongo_keypair,\n machine_type = 'm3.xlarge',\n security_group_id = mongo_sec_grps[0].id ,\n subnet_id = primary_subnet.id,\n public_ip = False,\n user_data = user_data,\n static_ip_address = instance_private_ip )\n mongo_instances.append( mongo)\n \n print \"Setting alarms on the mongo instance\"\n add_monitors_to_instance( cloudwatch_conn, base_name, mongo.id, app_type, base_topicarn, mongo_monitor_rules )", "def generate(self):\n if self.curvetype == KeyType.ECDSA_P256v1:\n self.private_key_obj = ec.generate_private_key(ec.SECP256R1(), default_backend())\n elif self.curvetype == KeyType.ECDSA_SECP256k1:\n self.private_key_obj = ec.generate_private_key(ec.SECP256K1(), default_backend())\n self.public_key_obj = self.private_key_obj.public_key()\n self._get_naive_private_key_bytes()\n self._get_naive_public_key_bytes()", "def test_create_keypair_only(self):\n self.keypair_creator = create_keypairs.OpenStackKeypair(self.os_creds,\n create_keypairs.KeypairSettings(name=keypair_name))\n self.keypair_creator.create()\n\n keypair = nova_utils.keypair_exists(self.keypair_creator.nova, self.keypair_creator.keypair)\n self.assertEquals(self.keypair_creator.keypair, keypair)", "def create_ig(ec2):\n ## create internet gateway\n print(\"\\n===Creating Internet Gateway...\")\n ig = ec2.create_internet_gateway(TagSpecifications=[{\n \"ResourceType\":\"internet-gateway\",\n \"Tags\":[{\"Key\": \"Name\", \"Value\": IG_NAME},\n ]}])\n print(\"===Internet gateway is reay!!\")\n return ig", "def create_essid(self, essid):\n self.cli.essids.createessid(essid)", "def test_aws_service_api_keypair_generate_post(self):\n pass", "def create(self, objectType, initialParameters):\n command_line = 'rsg create -ot=\"%s\" -pl=\"%s\" -ht=\"%s\"' % (\n objectType, initialParameters, self.host_name)\n return self._execTool(command_line)", "def _AddPerInstanceSshkey(self):\n if self._ssh_public_key_path:\n rsa = self._LoadSshPublicKey(self._ssh_public_key_path)\n logger.info(\"ssh_public_key_path is specified in config: %s, \"\n \"will add the key to the instance.\",\n self._ssh_public_key_path)\n self._metadata[\"sshKeys\"] = \"{0}:{2}\\n{1}:{2}\".format(getpass.getuser(),\n constants.GCE_USER,\n rsa)\n else:\n logger.warning(\n \"ssh_public_key_path is not specified in config, \"\n \"only project-wide key will be effective.\")" ]
[ "0.7219868", "0.68883014", "0.67771643", "0.6717516", "0.67171246", "0.66807824", "0.6674608", "0.66468847", "0.65976626", "0.6490849", "0.6442486", "0.63030493", "0.62883216", "0.6278427", "0.6269443", "0.61487126", "0.61450726", "0.61255866", "0.6090022", "0.6052359", "0.6048319", "0.60351664", "0.6017799", "0.6004755", "0.5988949", "0.595655", "0.5952687", "0.5948316", "0.59315515", "0.59129", "0.5911795", "0.5896086", "0.5889266", "0.58777916", "0.58716625", "0.5853599", "0.5835991", "0.58292055", "0.5813298", "0.5811145", "0.5790039", "0.5781442", "0.57776845", "0.5763991", "0.5753412", "0.57445806", "0.5736881", "0.57302886", "0.5722526", "0.5711165", "0.57079846", "0.56820875", "0.56752855", "0.5668685", "0.56638753", "0.5658054", "0.5648391", "0.5643839", "0.5628607", "0.5625751", "0.56206363", "0.56029624", "0.5577988", "0.55638266", "0.55534893", "0.5543421", "0.553652", "0.5516093", "0.5512808", "0.5512734", "0.550705", "0.5494807", "0.5494147", "0.549209", "0.5478564", "0.547531", "0.54727364", "0.54546857", "0.5433996", "0.5422379", "0.5417239", "0.54154676", "0.5413716", "0.5398384", "0.53972846", "0.53811353", "0.5375668", "0.5374019", "0.5363951", "0.53609157", "0.5356295", "0.5354198", "0.53469574", "0.53337115", "0.53306603", "0.5327651", "0.53257537", "0.5322023", "0.53219163", "0.5317263" ]
0.7226294
0
Create a new SecurityGroup
def createSG(ec2,name,rules): # check if the security group exists group = None sgGroups = [sg for sg in ec2.get_all_security_groups() if sg.name == name] if sgGroups: group = sgGroups[0] ec2.delete_security_group(name=name, group_id=group) print "Creating %s Security Group" % name group = ec2.create_security_group(name, 'group for %s' % name) if group: # Set the inbound rules for rule in rules: if rule.src_group_name: group.authorize(ip_protocol=rule.ip_protocol,from_port=rule.from_port,to_port=rule.to_port,cidr_ip=rule.cidr_ip,src_group=group) else: group.authorize(ip_protocol=rule.ip_protocol,from_port=rule.from_port,to_port=rule.to_port,cidr_ip=rule.cidr_ip,src_group=None) return True else: logError('Error during '+name+' Security Group update') return False
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def create_security_group(self, body=None):\r\n return self.post(self.security_groups_path, body=body)", "def create(self, body: CloudSecurityGroup) -> Dict:\n\t\treturn self._post(route=AWSSecurityGroupConsts.CLOUD_SECURITY_GROUP.value, body=body)", "def security_group_create(auth=None, **kwargs):\n cloud = get_operator_cloud(auth)\n kwargs = _clean_kwargs(keep_name=True, **kwargs)\n return cloud.create_security_group(**kwargs)", "def create(self, name, desc, tenant_id):\n data = {\"security_group\": {\"name\": name, \n \"description\": desc, \n \"tenant_id\":tenant_id}}\n\n path = '%s/security-groups' % self.ver\n res = self.client.call(path, 'POST', data=json.dumps(data), \n token=self.manager.identity.token)\n self.logger.debug('Create openstack security group: %s' % truncate(res))\n return res[0]['security_group']", "def create(self, name, desc):\n body = {'security_group': {'name': name,\n 'description': desc,\n 'tenant_id': self.request.user.project_id}}\n secgroup = self.client.create_security_group(body)\n return SecurityGroup(secgroup.get('security_group'))", "def create_security_group(group_name):\n ec2 = boto.ec2.connect_to_region(AWS_REGION)\n for g in ec2.get_all_security_groups():\n if g.name == group_name:\n return # We already have this group setup\n group = ec2.create_security_group(group_name,\n \"%s SSH access group\" % group_name)\n group.authorize(\"tcp\", 22, 22, \"0.0.0.0/0\") # SSH is on port 22, all IPs\n group.authorize(\"tcp\", 80, 80, \"0.0.0.0/0\")\n group.authorize(\"tcp\", 61000, 65000, \"0.0.0.0/0\")\n print \"Created new security group\"", "def create_sec_group(ec2, sec_group_name):\n sec = ec2.create_security_group(sec_group_name, 'Jvivian Boto SecGroup')\n port = 22\n sec.authorize('tcp', port, port, '0.0.0.0/0')", "def create_sec_group(self, conn, name, project):\n sec_group = conn.create_security_group(\n name=name, description=\"Security Group\",\n project_id=project.id)\n conn.create_security_group_rule(sec_group)\n return sec_group", "def create_secgroup(self, args):\n message = MessageClass()\n region = args[\"Region\"]\n sgid = args[\"Group-Name\"]\n desc = args[\"Description\"]\n\n # Boto3 client creation by providing the access_id and access_secret\n ec2 = boto3.client(service_name='ec2', region_name=region, api_version=None, use_ssl=True,\n verify=None, endpoint_url=None, aws_access_key_id=self.aws_access_key,\n aws_secret_access_key=self.aws_secret_token, aws_session_token=None,\n config=None)\n response = ec2.describe_vpcs()\n vpc_id = response.get('Vpcs', [{}])[0].get('VpcId', '')\n\n response = ec2.create_security_group(GroupName=sgid,\n Description=desc,\n VpcId=vpc_id)\n attachment = MessageAttachmentsClass()\n d = response[\"GroupId\"]\n attachment.title = d\n message.message_text = \"Security group created:\"\n message.attach(attachment)\n\n return message.to_json()", "def create(self, args=None, **_):\n name = utils.get_resource_id()\n\n create_args = dict(\n name=name,\n description=ctx.node.properties['description'],\n vpc_id=self._get_connected_vpc()\n )\n\n create_args = utils.update_args(create_args, args)\n\n if ctx.operation.retry_number == 0 and constants.EXTERNAL_RESOURCE_ID \\\n not in ctx.instance.runtime_properties:\n try:\n security_group = self.execute(\n self.client.create_security_group, create_args,\n raise_on_falsy=True)\n except (exception.EC2ResponseError,\n exception.BotoServerError) as e:\n raise NonRecoverableError('{0}'.format(str(e)))\n utils.set_external_resource_id(\n security_group.id, ctx.instance, external=False)\n\n self.resource_id = \\\n ctx.instance.runtime_properties[constants.EXTERNAL_RESOURCE_ID]\n security_group = self.get_resource()\n\n if not security_group:\n return False\n\n self._create_group_rules(security_group)\n\n return True", "def create_sg(vpc_id, description, group_name):\n client = boto3.client('ec2')\n security_group = str(group_name + \"_sg\")\n\n # get the security groups\n idle_sg = get_sg()\n\n print(idle_sg)\n print(security_group)\n\n # if security group doesnt exist, create it\n if security_group not in idle_sg:\n print(\"Creating SG\")\n return client.create_security_group(\n Description=description,\n GroupName=security_group,\n VpcId=vpc_id\n )\n return get_sg_id(security_group)", "def ex_create_security_group(self, name, description):\n params = {'Action': 'CreateSecurityGroup',\n 'GroupName': name,\n 'GroupDescription': description}\n return self.connection.request(self.path, params=params).object", "def add_secgroup(self, name=None, description=None):\n # print (\"UUUU\")\n if self.cloudman:\n if description is None:\n description = name\n try:\n self.cloudman.network.create_security_group(\n name=name,\n description=description)\n except:\n Console.warning(f\"secgroup {name} already exists in cloud. \"\n f\"skipping.\")\n else:\n raise ValueError(\"cloud not initialized\")", "def post_security_group_create(self, resource_dict):\n pass", "def create_security_group(self, name, description, vpc_id=None):\r\n params = {\r\n 'GroupName': name,\r\n 'GroupDescription': description\r\n }\r\n\r\n if vpc_id is not None:\r\n params['VpcId'] = vpc_id\r\n\r\n group = self.get_object('CreateSecurityGroup', params,\r\n SecurityGroup, verb='POST')\r\n group.name = name\r\n group.description = description\r\n return group", "def ex_create_security_group(self, resource_group, security_group, security_rules=None, location=None):\n if location is None:\n if self.default_location:\n location = self.default_location\n else:\n raise ValueError(\"location is required.\")\n\n target = \"/subscriptions/%s/resourceGroups/%s/providers/Microsoft.Network/networkSecurityGroups/%s\" % (\n self.subscription_id, resource_group, security_group)\n params = {\"api-version\": \"2016-09-01\"}\n data = {\n 'location': location.id,\n 'tags': {}\n }\n\n if security_rules:\n data.update({'properties': {'securityRules': security_rules}})\n\n r = self.connection.request(action=target,\n params=params,\n data=data,\n method=\"PUT\")\n\n while r.object is None:\n time.sleep(1)\n\n return AzureSecurityGroup(r.object[\"id\"], r.object[\"name\"], r.object[\"location\"], r.object[\"properties\"])", "def create_groups(**kwargs):\n for gname in SEC_GROUP_NAMES.itervalues():\n Group.objects.get_or_create(name=gname)", "def save_security_group(resp, payload):\n if resp.get(\"code\") != 0:\n return None, SaveDataError(\"Create security group failed\")\n uuid = resp[\"data\"][\"ret_set\"][0][\"id\"]\n name = payload.get(\"description\")\n sg_id = payload.get(\"sg_id\")\n zone_name = payload.get(\"zone\")\n user_name = payload.get(\"owner\")\n zone = ZoneModel.get_zone_by_name(zone_name)\n user = User.objects.get(username=user_name)\n _security_group_ins, err = RdsSecurityGroupModel.objects.create(uuid,\n sg_id,\n name,\n zone,\n user)\n return _security_group_ins, err", "def security_group_rule_create(auth=None, **kwargs):\n cloud = get_operator_cloud(auth)\n kwargs = _clean_kwargs(**kwargs)\n return cloud.create_security_group_rule(**kwargs)", "def do_add_security_group(cs, args):\n opts = {}\n opts['id'] = args.container\n opts['security_group'] = args.security_group\n opts = zun_utils.remove_null_parms(**opts)\n try:\n cs.containers.add_security_group(**opts)\n print(\"Request to add security group for container %s \"\n \"has been accepted.\" % args.container)\n except Exception as e:\n print(\"Add security group for container %(container)s \"\n \"failed: %(e)s\" % {'container': args.container, 'e': e})", "def __create_new_group(self, group_name) -> None:\n group = Group(name=group_name)\n group.save()\n\n self.__add_permission_to_group(group)", "def create():\n name = request.json['name']\n level = request.json['level']\n manager = request.json['manager']\n if models.user.Group.get(name):\n raise Conflict('Group already exists.', creation=False)\n else:\n authorize(manager, level=level)\n group = models.user.Group(name=name, level=level, manager=manager)\n models.db.session.add(group)\n models.db.session.commit()\n return response(200, creation=True)", "def test_create_group(self):\n pass", "def test_create_group(self):\n pass", "def test_create_resource_group(self):\n pass", "def create_seurity_group(self):\n return True", "def create_group(group_id, group_name):\n\n kwargs = config.DEFAULT_REST_KWARGS\n kwargs[\"data\"] = {\"id\": group_id, \"name\": group_name}\n http_response = call_rest_api(\"/identities/groups/\", \"post\", **kwargs)\n if http_response.status_code != 201: # 201 = 'new group created'\n raise ValueError(http_response.text)\n logger.log(f\"New custom group, {group_name}, with ID: {group_id}, was created successfully.\")", "def handle_region(self, region, args):\n result = [CHECKMARK, str(region), \"created security group '{}'\".format(GROUP_NAME)]\n\n try:\n # Create the security group\n response = region.conn.create_security_group(\n Description='Security group for Alia replicas and clients.',\n GroupName=GROUP_NAME,\n )\n\n # Get the newly created group id\n group_id = response[\"GroupId\"]\n\n # Allow all network traffic from within the security group\n response = region.conn.authorize_security_group_ingress(\n GroupId = group_id,\n IpPermissions = [\n {\n \"IpProtocol\": \"tcp\", \"FromPort\": 0, \"ToPort\": 65535,\n \"UserIdGroupPairs\": [\n {\n \"GroupId\": group_id,\n \"Description\": \"allow all traffic from the same group\",\n }\n ]\n }\n ]\n )\n\n # Open Alia-specific ports for access\n reponse = region.conn.authorize_security_group_ingress(\n GroupId = group_id,\n IpPermissions = [\n {\n \"IpProtocol\": \"tcp\", \"FromPort\": 22, \"ToPort\": 22,\n \"IpRanges\": [\n {\n \"CidrIp\": \"0.0.0.0/0\",\n \"Description\": \"allow remote SSH access\"\n }\n ]\n },\n {\n \"IpProtocol\": \"tcp\", \"FromPort\": 3264, \"ToPort\": 3285,\n \"IpRanges\": [\n {\n \"CidrIp\": \"0.0.0.0/0\",\n \"Description\": \"external Alia service access\",\n }\n ],\n \"Ipv6Ranges\": [\n {\n \"CidrIpv6\": \"::/0\",\n \"Description\": \"external Alia service IPv6 access\"\n }\n ]\n },\n {\n \"IpProtocol\": \"tcp\", \"FromPort\": 5356, \"ToPort\": 5356,\n \"IpRanges\": [\n {\n \"CidrIp\": \"0.0.0.0/0\",\n \"Description\": \"research services access\"\n }\n ]\n },\n {\n \"IpProtocol\": \"tcp\", \"FromPort\": 4157, \"ToPort\": 4157,\n \"IpRanges\": [\n {\n \"CidrIp\": \"0.0.0.0/0\",\n \"Description\": \"master services access\",\n }\n ]\n },\n ]\n )\n\n\n except Exception as e:\n result[0] = CROSSMARK\n result[2] = str(e)\n\n\n return result", "def pre_security_group_create(self, resource_dict):\n pass", "def create_group(self, name):\n\t\tdata = {\"name\":name}\n\t\tresponse = self.client.post(self._endpoint + \"/group\", content=data)\n\t\treturn Group(\n\t\t\tresponse.json['group_id'],\n\t\t\tself.user_id,\n\t\t\tself.site_id,\n\t\t\tdata=response.json\n\t\t)", "def create_group():\n groupname = request.get_json().get(\"name\")\n description = request.get_json().get(\"description\")\n grp = admin.create_group(current_app.scoped_session(), groupname, description)\n if grp:\n response = admin.get_group_info(current_app.scoped_session(), groupname)\n else:\n response = {\"result\": \"group creation failed\"}\n response = jsonify(response)\n return response", "def create_group(self, group_name, group_type):\n grp_data = {\"name\": group_name, \"type\": group_type}\n return requests.post(self.groups_url, data=json.dumps(grp_data),\n headers=self.headers)", "def createGroup(self, name):\n new_group = ET.SubElement(self._root,'group')\n group_name = ET.SubElement(new_group, 'name')\n group_name.text = name\n # update the document's groups\n self._groups = self._root.findall('group') \n print 'Creating group, \\'%s\\'' % name\n return CAGroup(new_group)", "def createGroup(self):\n return _libsbml.GroupsModelPlugin_createGroup(self)", "def create_group(self, groupname):\n data = {\"groupname\": groupname}\n headers = {\"user-agent\": self.u_agent}\n req_url = self.normalize_admin_url(\"groups\")\n res = requests.post(\n req_url,\n headers=headers,\n auth=self.auth,\n data=json.dumps(data),\n verify=False,\n )\n if res.status_code == 201:\n return Response(0, u\"Group {} has been created\".format(groupname))\n else:\n return Response(res.status_code, res)", "async def create(\n self,\n resource_group_name: str,\n project_name: str,\n group_name: str,\n group: Optional[\"models.Group\"] = None,\n **kwargs\n ) -> \"models.Group\":\n cls = kwargs.pop('cls', None) # type: ClsType[\"models.Group\"]\n error_map = {\n 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError\n }\n error_map.update(kwargs.pop('error_map', {}))\n api_version = \"2018-06-01\"\n content_type = kwargs.pop(\"content_type\", \"application/json\")\n accept = \"application/json\"\n\n # Construct URL\n url = self.create.metadata['url'] # type: ignore\n path_format_arguments = {\n 'subscriptionId': self._serialize.url(\"self._config.subscription_id\", self._config.subscription_id, 'str'),\n 'resourceGroupName': self._serialize.url(\"resource_group_name\", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\\w\\._\\(\\)]+$'),\n 'projectName': self._serialize.url(\"project_name\", project_name, 'str'),\n 'groupName': self._serialize.url(\"group_name\", group_name, 'str'),\n }\n url = self._client.format_url(url, **path_format_arguments)\n\n # Construct parameters\n query_parameters = {} # type: Dict[str, Any]\n query_parameters['api-version'] = self._serialize.query(\"api_version\", api_version, 'str')\n\n # Construct headers\n header_parameters = {} # type: Dict[str, Any]\n header_parameters['Content-Type'] = self._serialize.header(\"content_type\", content_type, 'str')\n header_parameters['Accept'] = self._serialize.header(\"accept\", accept, 'str')\n\n body_content_kwargs = {} # type: Dict[str, Any]\n if group is not None:\n body_content = self._serialize.body(group, 'Group')\n else:\n body_content = None\n body_content_kwargs['content'] = body_content\n request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)\n pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)\n response = pipeline_response.http_response\n\n if response.status_code not in [200, 201]:\n map_error(status_code=response.status_code, response=response, error_map=error_map)\n raise HttpResponseError(response=response, error_format=ARMErrorFormat)\n\n response_headers = {}\n if response.status_code == 200:\n response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id'))\n deserialized = self._deserialize('Group', pipeline_response)\n\n if response.status_code == 201:\n response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id'))\n deserialized = self._deserialize('Group', pipeline_response)\n\n if cls:\n return cls(pipeline_response, deserialized, response_headers)\n\n return deserialized", "def get_or_make_group(ec2, name, vpc_id=None, quiet=False):\n groups = ec2.security_groups.all()\n groups = [g for g in groups if g.group_name == name and g.vpc_id == vpc_id]\n if len(groups) > 0:\n return groups[0]\n else:\n if not quiet:\n print(\"Creating security group \" + name)\n vpc_id = vpc_id if vpc_id is not None else ''\n sg = ec2.create_security_group(\n GroupName=name,\n Description='AbStar cluster group',\n VpcId=vpc_id)\n return sg", "def create_acl(self, context, sg):\n self.security_group_driver.create_acl(context, sg)", "def create_group(self):\n # TODO: Assuming first server is good - need to make fallback logic\n return self.session.get_any(\"{base}{request_url}\".format(base=self.servers[0],\n request_url=F\"/GroupV2/Create/\"))", "def create_groups(self, role):\n security_group_names = self._get_all_group_names()\n\n cluster_group_name = self.get_cluster_group_name()\n if not cluster_group_name in security_group_names:\n self.ec2Connection.create_security_group(cluster_group_name, \"Hadoop cluster (%s)\" % (self.name))\n self.ec2Connection.authorize_security_group(cluster_group_name, cluster_group_name)\n # Allow SSH from anywhere\n self.ec2Connection.authorize_security_group(cluster_group_name, ip_protocol=\"tcp\", from_port=22, to_port=22, cidr_ip=\"0.0.0.0/0\")\n\n role_group_name = self.group_name_for_role(role)\n if not role_group_name in security_group_names:\n self.ec2Connection.create_security_group(role_group_name, \"Hadoop %s (%s)\" % (role, self.name))", "def create_group(self, **kwargs):\n post_body = json.dumps({'group': kwargs})\n resp, body = self.post('groups', post_body)\n self.expected_success(201, resp.status)\n body = json.loads(body)\n return rest_client.ResponseBody(resp, body)", "def test_createGroup(self):\n\t\tself.client.force_authenticate(user=User.objects.get(id=1))\n\t\turl = \"/groups/\"\n\t\tdata = {\n\t\t\t'name' : 'testGroup3',\n\t\t\t'description' : 'This is another test group that just created.',\n\t\t\t'isPublic' : True\n\t\t}\n\t\tresponse = self.client.post(url, data, format='json')\n\t\tself.assertEqual(response.status_code, status.HTTP_201_CREATED)\n\t\tself.assertEqual(response.data[\"id\"], 3)\n\t\tself.assertEqual(response.data[\"name\"], 'testGroup3')", "def create_security_group_rule(self, body=None):\r\n return self.post(self.security_group_rules_path, body=body)", "def ensure_security_groups_created(vpc, environment):\n conglomerate_name = environment + '-conglomerate'\n load_balancer_name = environment + '-load-balancer'\n\n existing = vpc.security_groups.filter(Filters=[\n { 'Name': 'group-name', 'Values': [ conglomerate_name, load_balancer_name ] }\n ])\n ret = {}\n for security_group in existing:\n if security_group.group_name == conglomerate_name:\n ret['conglomerate'] = security_group\n elif security_group.group_name == load_balancer_name:\n ret['load-balancer'] = security_group\n else:\n raise Exception(\"Unexpected security group name: \" + security_group.group_name)\n\n if not ret['conglomerate']:\n # untested\n ret['conglomerate'] = vpc.create_security_group(\n GroupName=conglomerate_name,\n Description=conglomerate_name\n )\n if not ret['load-balancer']:\n # untested\n ret['load-balancer'] = vpc.create_security_group(\n GroupName=load_balancer_name,\n Description=load_balancer_name\n )\n\n try:\n ret['conglomerate'].authorize_ingress(IpPermissions=[\n { 'IpProtocol': 'icmp', 'FromPort': 0, 'ToPort': 255, 'IpRanges': [ { 'CidrIp': '0.0.0.0/0' } ] },\n { 'IpProtocol': 'tcp', 'FromPort': 9000, 'ToPort': 9000, 'UserIdGroupPairs': [ { 'GroupId': ret['load-balancer'].id } ] },\n ])\n except botocore.exceptions.ClientError as e:\n if e.response['Error']['Code'] != 'InvalidPermission.Duplicate':\n raise e\n\n try:\n ret['load-balancer'].authorize_ingress(IpPermissions=[\n { 'IpProtocol': 'tcp', 'FromPort': 80, 'ToPort': 80 },\n { 'IpProtocol': 'tcp', 'FromPort': 443, 'ToPort': 443 },\n { 'IpProtocol': 'icmp', 'FromPort': 0, 'ToPort': 255, 'IpRanges': [ { 'CidrIp': '0.0.0.0/0' } ] },\n { 'IpProtocol': 'tcp', 'FromPort': 1024, 'ToPort': 65535, 'IpRanges': [ { 'CidrIp': Constants['VpcCidr'] } ] },\n ])\n except botocore.exceptions.ClientError as e:\n if e.response['Error']['Code'] != 'InvalidPermission.Duplicate':\n raise e\n\n return ret", "def test_aws_service_api_vm_security_group_put(self):\n pass", "def create_new_group(self, a, b):\n self.groups[self.group_id] = set([a,b])\n self.node_id[a] = self.node_id[b] = self.group_id\n self.group_id += 1", "async def create_group(ctx, name: str, role: str, group_type: str=None, comp: str=None, rating: int=None, time: str=None):\n\n owner = ctx.message.author.name\n \n if comp:\n comp = [int(i) for i in comp.split()] # convert string input to array\n\n new_group = Group(owner, name, role, group_type, rating, time, comp)\n bg_bot.manager.add_group(owner, new_group)\n \n await ctx.send(f'Created new {group_type} group for leader {owner}!')", "def groups_create(self, mar, request):\n if not permissions.CanCreateGroup(mar.perms):\n raise permissions.PermissionException(\n 'The user is not allowed to create groups.')\n\n user_dict = self._services.user.LookupExistingUserIDs(\n mar.cnxn, [request.groupName])\n if request.groupName.lower() in user_dict:\n raise exceptions.GroupExistsException(\n 'group %s already exists' % request.groupName)\n\n if request.ext_group_type:\n ext_group_type = str(request.ext_group_type).lower()\n else:\n ext_group_type = None\n group_id = self._services.usergroup.CreateGroup(\n mar.cnxn, self._services, request.groupName,\n str(request.who_can_view_members).lower(),\n ext_group_type)\n\n return api_pb2_v1.GroupsCreateResponse(\n groupID=group_id)", "def create_group_command(client: MsGraphClient, args: dict) -> tuple[str, dict, dict]:\n required_properties = {\n 'displayName': str(args.get('display_name')),\n 'mailNickname': str(args.get('mail_nickname')),\n 'mailEnabled': args.get('mail_enabled') == 'true',\n 'securityEnabled': args.get('security_enabled')\n }\n\n # create the group\n group = client.create_group(required_properties)\n\n # display the new group and it's properties\n group_readable, group_outputs = parse_outputs(group)\n human_readable = tableToMarkdown(name=f\"{required_properties['displayName']} was created successfully:\",\n t=group_readable,\n headers=['ID', 'Display Name', 'Description', 'Created Date Time', 'Mail',\n 'Security Enabled', 'Mail Enabled'],\n removeNull=True)\n entry_context = {f'{INTEGRATION_CONTEXT_NAME}(val.ID === obj.ID)': group_outputs}\n return human_readable, entry_context, group", "def createGroup(self):\n return _libsbml.ListOfGroups_createGroup(self)", "def create(self, group_name):\n METHOD = 'POST'\n API_PATH = '/groups/create'\n\n data = {'group_name': group_name}\n\n # Make REST call\n resp = self._rest_call[METHOD](API_PATH, data=data)\n if resp.status_code == 200:\n return resp.json()\n\n elif resp.status_code == 403:\n raise AuthorizationError(\"User is not authorized or token is incorrect.\")\n\n else:\n if resp.json().get(\"error_code\") in ERROR_CODES:\n raise ERROR_CODES[resp.json().get('error_code')](resp.json().get('message'))\n else:\n raise APIError(\"Response code {0}: {1} {2}\".format(resp.status_code,\n resp.json().get('error_code'),\n resp.json().get('message')))", "def create_group(self, group):\n if self.dryrun:\n self.logger.info(\"Would create group %s\", group)\n return FakeGroupId()\n result = self.conn.usergroup.create(name=group)\n groupid = result['usrgrpids'][0]\n self.logger.info(\"Create group %s with id %s\", group, groupid)\n return groupid", "def mk_rg1(self):\n name = f\"{self.env_name}/sg/project-default\"\n self.sg_project_default = ec2.SecurityGroup(\n \"SecurityGroupProjectDefault\",\n rp_GroupDescription=\"Resources that has this security can talk to each other\",\n p_GroupName=name,\n p_VpcId=self.vpc_id,\n p_SecurityGroupIngress=[\n ec2.PropSecurityGroupIngress(\n rp_IpProtocol=\"-1\",\n p_FromPort=-1,\n p_ToPort=-1,\n p_CidrIp=f\"{authorized_ip}/32\",\n )\n for authorized_ip in self.sg_authorized_ips\n ],\n p_Tags=cf.Tag.make_many(\n Name=name\n ),\n )", "def add_group():\n name = request.form['name']\n data, code, message = FIELD_SERVICE.add_group(name)\n return __result(data, code, message)", "def test_create_group(self):\n groupid = 'villains'\n\n # create the group\n resp = self.app.post('/groups', data=json.dumps({'name':groupid}))\n assert resp.status_code == 200\n\n # Fetch the group to check that it persists\n resp = self.app.get('/groups/{}'.format(groupid))\n assert resp.status_code == 200", "def capacitygroup_create(cmd_ctx, cpc, **options):\n cmd_ctx.execute_cmd(\n lambda: cmd_capacitygroup_create(cmd_ctx, cpc, options))", "def request_group_create():\n return Response(render_template('admin/group/create-update.html',\n csrf_token=(\n get_raw_jwt() or {}).get(\"csrf\"),\n target=\"/admin/group/create\"),\n mimetype='text/html')", "def post(self):\n args = parser.parse_args()\n user_group = UserGroup()\n user_group.name = args['name']\n user_group.createdby = auth.username()\n db_session.add(user_group)\n db_session.commit()\n return user_group, 201", "def test_08_security_group(self):\n # Validate the following:\n # 1. Create a project\n # 2. Assign some security groups to that project\n # 3. Verify the security groups can only be assigned to VM belonging\n # to that project.\n\n security_group = SecurityGroup.create(\n self.apiclient,\n self.services[\"security_group\"],\n projectid=self.project.id\n )\n self.debug(\"Created security group with ID: %s\" % security_group.id)\n # Default Security group should not have any ingress rule\n sercurity_groups = SecurityGroup.list(\n self.apiclient,\n projectid=self.project.id\n )\n self.assertEqual(\n isinstance(sercurity_groups, list),\n True,\n \"Check for list security groups response\"\n )\n\n self.assertNotEqual(\n len(sercurity_groups),\n 0,\n \"Check List Security groups response\"\n )\n # Authorize Security group to SSH to VM\n ingress_rule = security_group.authorize(\n self.apiclient,\n self.services[\"security_group\"],\n projectid=self.project.id\n )\n self.assertEqual(\n isinstance(ingress_rule, dict),\n True,\n \"Check ingress rule created properly\"\n )\n\n self.debug(\n \"Authorizing ingress rule for sec group ID: %s for ssh access\"\n % security_group.id)\n self.virtual_machine = VirtualMachine.create(\n self.apiclient,\n self.services[\"server\"],\n serviceofferingid=self.service_offering.id,\n securitygroupids=[security_group.id],\n projectid=self.project.id\n )\n self.debug(\"Deployed VM (ID: %s) in project: %s\" % (\n self.virtual_machine.id,\n self.project.id\n ))\n self.assertEqual(\n self.virtual_machine.state,\n 'Running',\n \"VM state should be running after deployment\"\n )\n # Deploy another VM with same security group outside the project\n self.debug(\n \"Deploying VM with security group: %s outside project:%s\" % (\n security_group.id,\n self.project.id\n ))\n with self.assertRaises(Exception):\n VirtualMachine.create(\n self.apiclient,\n self.services[\"server\"],\n serviceofferingid=self.service_offering.id,\n accountid=self.account.name,\n domainid=self.account.domainid,\n securitygroupids=[security_group.id],\n )\n return", "def create_group(self, group_name, user_ids=[], role_ids=[]):\n payload = {}\n payload['name'] = group_name\n payload['user_ids'] = user_ids\n payload['role_ids'] = role_ids\n return Client._post(self, payload)", "def test_create_team_user_group(client):\n group = client.create_team_user_group(TEAM_ID, {\n \"name\": \"Python group\",\n \"is_reviewer\": True,\n \"is_admin\": True,\n \"admin_rights\": [\"upload\"]\n })\n assert group.team_id == TEAM_ID\n assert group.group_id == NEW_GROUP_ID\n assert group.name == \"Python group\"\n assert group.permissions['is_admin']\n assert group.permissions['is_reviewer']\n assert group.permissions['admin_rights'] == [\"upload\"]", "def upload_secgroup(self, name=None):\n\n if name is None:\n groups = Secgroup().list()\n for group in groups:\n print(\"upload group:\", group['name'])\n self.upload_secgroup(name=group['name'])\n return\n\n groups = Secgroup().list()\n\n if name is not None:\n for group in groups:\n if group['name'] == name:\n break\n\n rules = SecgroupRule().list()\n\n data = {}\n for rule in rules:\n data[rule['name']] = rule\n\n sgroups = self.list_secgroups(name)\n\n group_exists = False\n if len(sgroups) > 0:\n print(\" Warning group already exists\")\n group_exists = True\n\n if not group_exists:\n self.add_secgroup(name=name, description=group['description'])\n\n for r in group['rules']:\n if r != 'nothing':\n found = data[r]\n print(\" \", \"rule:\", found['name'])\n self.add_secgroup_rule(\n name=name,\n port=found[\"ports\"],\n protocol=found[\"protocol\"],\n ip_range=found[\"ip_range\"])\n\n else:\n\n for r in group['rules']:\n if r != 'nothing':\n found = data[r]\n print(\" \", \"rule:\", found['name'])\n self.add_rules_to_secgroup(\n name=name,\n rules=[found['name']])", "def __create_group(self):\n\n group = time.strftime(_GROUP_NAME_FORMAT, time.localtime())\n LOG.info(\"Creating backup group '%s'.\", group)\n\n group_path = self.group_path(group)\n\n try:\n os.mkdir(group_path)\n except EnvironmentError as e:\n if e.errno != errno.EEXIST:\n raise Error(\"Unable to create a new backup group '{}': {}.\",\n group_path, psys.e(e))\n\n self.__on_group_created(group)\n\n return group", "def createGroup(self, group, members):\n connection = self.sock\n\n connection.send(\"create_group\".encode())\n\n status_code = connection.recv(2)\n\n if status_code != SUCCESS:\n print(\"Error\")\n return -1\n message = []\n message.append(\"gname:\")\n message.append(group)\n message.append(\";\")\n message.append(\"members:\")\n for i in members:\n message.append(i)\n message.append(\",\")\n if members:\n message.pop()\n message = ''.join(message)\n message = message.encode()\n connection.send(message)\n result = connection.recv(2)\n if result != SUCCESS:\n return -1\n\n packed_gid = connection.recv(4)\n gid = struct.unpack(\"<L\", packed_gid)\n repoids.append(gid)\n return 1", "def create_group(self, event):\n body = event['body']\n body = json.loads(body)\n\n # Required field in POST body\n if 'group_name' not in body:\n return self.get_bad_request('POST body missing group_name')\n\n group_name = body['group_name']\n user = self.mealShareUsers.get_user_cognito_data(event)\n user_id = user['user_id']\n \n # Add the creator to the group, as the initial member\n group_id = self.mealShareGroups.create_group(group_name)\n success = self.mealShareGroups.add_user_to_group(user_id, group_id)\n if success:\n return {\n 'statusCode': 200,\n 'statusMessage': 'Successfully created group {} with ID {}'.format(group_name, group_id),\n 'group_id': group_id,\n 'group_name': group_name,\n 'user_id': user_id\n }\n else:\n return {\n 'statusCode': 500,\n 'statusMessage': 'FAILED to create group {} by user {}'.format(group_name, user_id),\n 'group_id': group_id,\n 'group_name': group_name,\n 'user_id': user_id\n }", "def createGroup(self, *group):\n if not self.rank:\n logging.info('Creating atom group {}'.format(group))\n\n if not len(group):\n for idSS in self.pargs['idSS']:\n self.lmp.command('group group{} type {}'.format(idSS, idSS))\n else:\n self.lmp.command('group ' + ('{} ' * len(group)).format(*group))", "def create_group(self, create_group_details, **kwargs):\n resource_path = \"/groups\"\n method = \"POST\"\n\n # Don't accept unknown kwargs\n expected_kwargs = [\n \"retry_strategy\",\n \"opc_retry_token\"\n ]\n extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs]\n if extra_kwargs:\n raise ValueError(\n \"create_group got unknown kwargs: {!r}\".format(extra_kwargs))\n\n header_params = {\n \"accept\": \"application/json\",\n \"content-type\": \"application/json\",\n \"opc-retry-token\": kwargs.get(\"opc_retry_token\", missing)\n }\n header_params = {k: v for (k, v) in six.iteritems(header_params) if v is not missing and v is not None}\n\n retry_strategy = self.retry_strategy\n if kwargs.get('retry_strategy'):\n retry_strategy = kwargs.get('retry_strategy')\n\n if retry_strategy:\n if not isinstance(retry_strategy, retry.NoneRetryStrategy):\n self.base_client.add_opc_retry_token_if_needed(header_params)\n return retry_strategy.make_retrying_call(\n self.base_client.call_api,\n resource_path=resource_path,\n method=method,\n header_params=header_params,\n body=create_group_details,\n response_type=\"Group\")\n else:\n return self.base_client.call_api(\n resource_path=resource_path,\n method=method,\n header_params=header_params,\n body=create_group_details,\n response_type=\"Group\")", "def create_rds_security_group_rule(payload):\n rules = payload.pop(\"rules\")\n succ_count = 0\n succ_sgr_ids = []\n _code, _msg, _status = 0, \"Success\", 200\n sg_id = payload.pop(\"sg_id\")\n security_group = RdsSecurityGroupModel.get_security_by_id(sg_id=sg_id)\n sg_uuid = security_group.uuid\n for rule in rules:\n if rule.get(\"priority\") == None:\n rule.update({\"priority\": 1})\n rule.update({\"direction\": \"INGRESS\"})\n priority = rule.get(\"priority\")\n sgr_id = make_security_group_rule_id()\n\n # if str(sg_id).strip().startswith(DEFAULT_SECURITY_GROUP_PREFIX):\n # return console_response(\n # SecurityErrorCode.DEFAULT_SECURITY_CANNOT_MODIFIED,\n # \"cannot add, delete or modified rules in default security group\")\n payload.pop(\"protocol\", None)\n payload.pop(\"port_range_min\", None)\n payload.pop(\"port_range_max\", None)\n payload.pop(\"remote_ip_prefix\", None)\n payload.pop(\"remote_group_id\", None)\n payload.update(rule)\n payload.update({\"sgr_id\": sgr_id})\n payload.update({\"security_group_id\": sg_uuid})\n create_status = {}\n # resp = api.get(payload, timeout=10)\n resp = api.get(payload=deepcopy(payload))\n\n if resp.get(\"code\") != 0:\n create_status[sg_id] = resp.get(\"msg\")\n _code = resp.get(\"code\")\n _msg = resp.get(\"msg\")\n if str(_msg).find(\"already exists\") != -1:\n _code = SecurityErrorCode.SECURITY_GROUP_RULE_ALREADY_EXIST\n\n else:\n _security_group_rule_info = resp[\"data\"][\"ret_set\"][0]\n _security_group_rule_info.update({\"sgr_id\": sgr_id})\n _security_group_rule_info.update({\"priority\": priority})\n\n create_status[sgr_id] = _security_group_rule_info\n _security_group_rule, err = save_security_group_rule(resp, payload)\n if err is not None:\n create_status[sg_id] = str(err)\n _code = SecurityErrorCode.SAVE_SECURITY_GROUP_RULE_FAILED\n _msg = str(err)\n succ_count = succ_count + 1\n succ_sgr_ids.append({\"sgr_id\":sgr_id})\n return console_response(_code, _msg, succ_count, [succ_sgr_ids])", "def createNewGroup():\n if request.method == 'POST':\n groupname = request.form['groupname1']\n internal = request.form['internal1']\n external = request.form['external1']\n userNo = request.form['usersNo1']\n if 'node1' in request.form:\n node = int(request.form['node1'])\n else:\n node = -1\n\n if int(userNo) == 0:\n if hl.createGroup(groupname, internal, external, node):\n return True\n elif int(userNo) > 0:\n if hl.createGroup(groupname, internal, external, node, genUsers=True, numUsers=int(userNo)):\n return True\n\n return False", "def test_user_group_controller_create(self):\n pass", "def process_security_group ( ec2_conn, vpc, base_name, params, secgrp_type = None, secgrp_description = None ) :\n if not secgrp_type :\n secgrp_type = params[ 'type' ]\n if not secgrp_description :\n secgrp_description = params[ 'description' ]\n\n secgrp = find_group( ec2_conn, base_name, secgrp_type )\n if not secgrp :\n if params.get( 'create', 'NO' ) == 'YES' :\n secgrp_name = get_secgrp_name( base_name, secgrp_type )\n print \"Creating security group with name: \" + secgrp_name\n secgrp = create_secgrp( ec2_conn, vpc, secgrp_name, secgrp_description )\n else :\n print \"ERROR: Could not find group with name \" + get_secgrp_name( base_name, secgrp_type )\n sys.exit( 1 )\n\n print \"Prepping rules for security group \" + secgrp.name\n remove_all_rules( ec2_conn, [ secgrp ], True, base_name )\n\n # Reload group to retrieve new object with no rules.\n secgrp = find_group( ec2_conn, base_name, secgrp_type )\n\n is_public = params.get( 'public' ) == 'YES'\n if is_public :\n nat_secgrp = None\n if params.get( 'os-update' ) == 'YES' :\n ec2_conn.authorize_security_group_egress( group_id = secgrp.id,\n ip_protocol = \"tcp\",\n from_port = 80,\n to_port = 80,\n cidr_ip = all_ip_cidr )\n if params.get( 'public-tcp-ports' ) :\n public_ports = params[ 'public-tcp-ports' ]\n for port in public_ports :\n secgrp.authorize( ip_protocol = \"tcp\",\n from_port = port,\n to_port = port,\n cidr_ip = all_ip_cidr )\n\n if params.get( 'incoming-cidr-rules' ) :\n for incoming_rule in params[ 'incoming-cidr-rules' ] :\n start_port = incoming_rule.get( 'port' )\n end_port = start_port\n\n protocol = get_secgrp_protocol_param( incoming_rule )\n cidr_list = get_cidr_param( incoming_rule[ 'cidr' ] )\n\n secgrp.authorize( ip_protocol = protocol,\n from_port = start_port,\n to_port = end_port,\n cidr_ip = cidr_list )\n\n else :\n nat_secgrp = find_group( ec2_conn, base_name, 'NAT' )\n # Grant NAT access to login to the machine\n if not nat_secgrp :\n print \"ERROR: Could not find NAT security group!\"\n sys.exit( 1 )\n grant_ssh_access( ec2_conn, [ secgrp ], nat_secgrp )\n if params.get( 'os-update' ) == 'YES' :\n grant_cidr_access( ec2_conn, all_ip_cidr, [ secgrp ], 80, nat_secgrp )\n # Need to reload secgrp so it contains latest rules\n secgrp = find_group( ec2_conn, base_name, secgrp_type )\n # Need to reload NAT secgrp so it contains latest rules\n nat_secgrp = find_group( ec2_conn, base_name, 'NAT' )\n\n if params.get( 'outgoing-cidr-rules' ) :\n for outgoing_rule in params[ 'outgoing-cidr-rules' ] :\n start_port = outgoing_rule.get( 'port' )\n end_port = start_port\n protocol = get_secgrp_protocol_param( outgoing_rule )\n cidr_list = get_cidr_param( outgoing_rule[ 'cidr' ] )\n\n for cidr in cidr_list :\n grant_cidr_access( ec2_conn, cidr, [ secgrp ], start_port, nat_secgrp, protocol )\n\n if params.get( 'outgoing-group-rules' ) :\n for outgoing_rule in params[ 'outgoing-group-rules' ] :\n start_port = outgoing_rule.get( 'port' )\n end_port = start_port\n protocol = get_secgrp_protocol_param( outgoing_rule )\n target_secgrp_type = outgoing_rule[ 'group-type' ]\n target_secgrp = find_group( ec2_conn, base_name, target_secgrp_type )\n grant_grp_access( ec2_conn, [ secgrp ], target_secgrp, start_port, protocol )\n \n if params.get( 'incoming-group-rules' ) :\n for incoming_rule in params[ 'incoming-group-rules' ] :\n start_port = incoming_rule.get( 'port' )\n end_port = start_port\n protocol = get_secgrp_protocol_param( incoming_rule )\n incoming_secgrp_type = incoming_rule[ 'group-type' ]\n incoming_secgrp = find_group( ec2_conn, base_name, incoming_secgrp_type )\n grant_grp_access( ec2_conn, [ incoming_secgrp ], secgrp, start_port, protocol )\n\n if params.get( 'self-rules' ) :\n for self_rule in params[ 'self-rules' ] :\n start_port = self_rule.get( 'port' )\n end_port = start_port\n\n if not start_port :\n start_port = self_rule[ 'port-range' ][ 'start' ]\n end_port = self_rule[ 'port-range' ][ 'end' ]\n\n protocol = get_secgrp_protocol_param( self_rule )\n\n grant_grp_self_access( ec2_conn, secgrp, start_port, end_port, protocol )\n\n # Reload the security group so it contains all the latest rules\n secgrp = find_group( ec2_conn, base_name, secgrp_type )\n return ( secgrp_type, secgrp )", "async def command_create(self, context):\n # await self._create_new_role(context, name, target=GROUP_CATEGORY_NAME)\n print('main create')", "def new_group(request):\n return edit_group(request, None)", "def create_group(self, name, pswd, key=None):\n\t\tif not self.did_handshake:\n\t\t\treturn False\n\t\thpo = hashlib.sha256(pswd)\n\t\thp = hpo.digest()\n\t\ttozip = [name, hp]\n\t\tif key is not None:\n\t\t\ttozip.append(key)\n\t\ttosend = \"\\x00\".join([base64.b64encode(e) for e in tozip])\n\t\tself.sendMessage(ID_CTRL + \"CREATE\"+tosend, True)\n\t\tself.createstate = 1\n\t\ttry:\n\t\t\twhile self.createstate == 1:\n\t\t\t\tpass\n\t\texcept KeyboardInterrupt:\n\t\t\t_stop()\n\t\t\treturn False\n\t\tif self.createstate == 2:\n\t\t\tself.createstate = 0\n\t\t\tself.__key = pswd\n\t\t\treturn True\n\t\telse:\n\t\t\tself.createstate = 0\n\t\t\treturn False", "def test_verify_that_you_can_create_a_new_group():", "def create_group(user):\n if connexion.request.is_json:\n users_group = [User.from_dict(d) for d in connexion.request.get_json()]\n response = (\"success\", 201)\n if len(users_group) > 4:\n response = (\"Max number of player is 4\", 400)\n else:\n groupId = GroupStorageController.add_new_group(users_group)\n return response", "def create_security_group_rule(self, sg_id, protocol='', cidr='0.0.0.0/0',\n from_port='', to_port='',\n direction=\"ingress\"):\n _url = \"http://\" + self.host_ip + \":9696/v2.0/security\"\\\n \"-group-rules.json\"\n _headers = {'Content-type': 'application/json',\n 'x-auth-token': self.project_info[\"token_project\"]}\n _tenant_sec_data = {\"security_group_rule\":\n {\"security_group_id\": sg_id,\n \"remote_ip_prefix\": cidr,\n \"direction\": direction\n }\n }\n if protocol:\n _tenant_sec_data[\"security_group_rule\"]['protocol'] = protocol\n if from_port and to_port:\n _tenant_sec_data[\"security_group_rule\"][\n \"port_range_min\"] = from_port\n _tenant_sec_data[\"security_group_rule\"][\"port_range_max\"] = to_port\n\n _body = json.dumps(_tenant_sec_data)\n response = self.request(\"POST\", _url, _headers, _body)\n\n if response is None:\n LOG_OBJ.error(\"No response from Server while creating\"\n \"security groups for tenant: %s\"\n % self.project_info[\"project_id\"])\n return\n\n if response.status not in [200, 201, 202, 203, 204]:\n LOG_OBJ.error(\"Adding Security Group Rule failed\"\n \" with status %s \" % response.status)\n return\n\n LOG_OBJ.debug(\"Created Security Group Rule.\")\n return True", "def create_firewall_group(self, **kwargs):\n self._lookup_ingress_egress_firewall_policy_ids(kwargs)\n if 'ports' in kwargs:\n kwargs['ports'] = self._get_port_ids(kwargs['ports'])\n return self.network.create_firewall_group(**kwargs)", "def AddSecurityGroupEntry(self, security_group, host=None, port=None):\n if self._conn:\n security_groups = self._conn.get_all_security_groups(groupnames=security_group)\n for sg in security_groups:\n if sg.name == security_group:\n return self._conn.authorize_security_group(sg.name, ip_protocol='tcp', from_port=port, to_port=port, cidr_ip='%s/32' % host)", "def crea_grupo(self):\r\n \r\n self.comprueba_casos_seleccionados()", "def create_group(self, properties: dict[str, Any | None]) -> dict:\n group = self.ms_client.http_request(method='POST', url_suffix='groups', json_data=properties)\n return group", "def _set_security_group(client, instance_id_list, security_groups):\n logging.info('Setting the security group of instances.')\n for instance_id in instance_id_list:\n client.modify_instance_attribute(InstanceId=instance_id, Groups=security_groups)", "def _assert_create_group(self, personality, response=400):\n group_response = self.autoscale_behaviors.create_scaling_group_given(\n lc_personality=personality)\n self.assertEquals(group_response.status_code, response, msg='Create group '\n 'with invalid lc_personality returned {0} as against '\n '{1}'.format(group_response.status_code, response))\n if response is 200:\n group = group_response.entity\n self.resources.add(group, self.empty_scaling_group)\n return group", "def _make_group(self, _rk, _group_hint):\n\n if isinstance(_group_hint, dict):\n # _group_hint is a single key/value pair\n g = _group_hint[list(_group_hint)[0]]\n\n r_type = g.get(\"type\", \"none\")\n if r_type != \"OS::Nova::ServerGroup\":\n return \"support only ServerGroup resource\"\n\n properties = g.get(\"properties\", {})\n if len(properties) == 0:\n return \"no properties\"\n\n group_name = properties.get(\"name\", None)\n if group_name is None:\n return \"no group name\"\n group_name = group_name.strip()\n\n policies = properties.get(\"policies\", [])\n if len(policies) == 0:\n return \"no policy of the group\"\n\n if len(policies) > 1:\n return \"multiple policies\"\n\n # TODO: exclude soft-affinity and soft-anti-affinity?\n\n if group_name in self.groups.keys():\n group = self.groups[group_name]\n else:\n group = Group(group_name)\n\n policy = policies[0].strip()\n if policy == \"anti-affinity\":\n group_type = \"diversity\"\n else:\n group_type = policy\n\n group.group_type = group_type\n group.factory = \"server-group\"\n group.level = \"host\"\n\n self.groups[group_name] = group\n else:\n # group hint is uuid string.\n rg = self.resource.get_group_by_uuid(_group_hint)\n if rg is None:\n return \"unknown group found while making group\"\n\n # TODO: exclude soft-affinity and soft-anti-affinity?\n\n if rg.name in self.groups.keys():\n group = self.groups[rg.name]\n else:\n group = Group(rg.name)\n\n group.group_type = rg.group_type\n group.factory = rg.factory\n group.level = \"host\"\n\n self.groups[rg.name] = group\n\n if group is not None:\n group.server_list.append(self.app_name + \":\" + _rk)\n\n return \"ok\"", "def create_new_scaling_group(self, request, data):\n group_cfg = data['groupConfiguration']\n\n group_cfg.setdefault('maxEntities', MAX_ENTITIES)\n group_cfg.setdefault('metadata', {})\n\n if group_cfg['minEntities'] > group_cfg['maxEntities']:\n raise InvalidMinEntities(\n \"minEntities must be less than or equal to maxEntities\")\n\n if data['launchConfiguration']['type'] == 'launch_server':\n validate_launch_config_servicenet(data['launchConfiguration'])\n\n deferred = get_supervisor().validate_launch_config(\n self.log, self.tenant_id, data['launchConfiguration'])\n\n deferred.addCallback(\n lambda _: self.store.create_scaling_group(\n self.log, self.tenant_id,\n group_cfg,\n normalize_launch_config(data['launchConfiguration']),\n data.get('scalingPolicies', None)))\n\n def _do_obey_config_change(result):\n group_id = result['id']\n config = result['groupConfiguration']\n launch = result['launchConfiguration']\n group = self.store.get_scaling_group(\n self.log, self.tenant_id, group_id)\n log = self.log.bind(scaling_group_id=group_id)\n d = controller.modify_and_trigger(\n self.dispatcher,\n group,\n bound_log_kwargs(log),\n partial(\n controller.obey_config_change, log,\n transaction_id(request), config, launch_config=launch),\n modify_state_reason='create_new_scaling_group')\n return d.addCallback(lambda _: result)\n\n deferred.addCallback(_do_obey_config_change)\n\n def _add_to_bobby(result, client):\n d = client.create_group(self.tenant_id, result['id'])\n return d.addCallback(lambda _: result)\n\n bobby = get_bobby()\n if bobby is not None:\n deferred.addCallback(_add_to_bobby, bobby)\n\n def _format_output(result):\n uuid = result['id']\n result[\"state\"] = format_state_dict(result[\"state\"])\n request.setHeader(\n \"Location\",\n get_autoscale_links(self.tenant_id, uuid, format=None))\n result[\"links\"] = get_autoscale_links(self.tenant_id, uuid)\n linkify_policy_list(\n result['scalingPolicies'], self.tenant_id, uuid)\n result['scalingPolicies_links'] = get_policies_links(\n result['scalingPolicies'],\n self.tenant_id, uuid, rel='policies')\n return {\"group\": result}\n\n deferred.addCallback(_format_output)\n deferred.addCallback(json.dumps)\n return deferred", "def create_new_user_group(self, token, userGroup):\n requestUser = self.get_username_from_token(token)\n dataBase = self.read_database()\n userGroups = dataBase['userGroups']\n if userGroup not in userGroups:\n newGroup = dict()\n newGroup['owners'] = [requestUser]\n newGroup['members'] = list()\n newGroup['masterKey'] = self.generate_master_key().decode('cp855')\n dataBase['userGroups'][userGroup] = newGroup\n self.write_database(dataBase)\n else:\n raise GroupAlreadyExistsException(\"Specified user group already exists.\")", "def create_group(username: str, gid: int=None, system: bool=False) -> Result[Group]:\n try:\n group = get_group(username)\n except KeyError:\n return add_group(username, gid, system)\n else:\n if group.gr_gid != gid:\n raise ValueError(\"Group {!r} has GID {}, expected {}\"\n .format(username, group.gr_gid, gid))\n return Result(State.unchanged, group)", "def createMainGroup(self):\n\t\tmc.group( n = self.grp.name, em = True )", "def create(self, context=None):\n values = self.obj_get_changes()\n db_nodegroup = self.dbapi.create_nodegroup(values)\n self._from_db_object(self, db_nodegroup)", "def pairing_group_create(curve='MNT224'):\n return PairingGroup(curve)", "def handle(self, *args, **options):\n new_group, created = Group.objects.get_or_create(name=options.get('group_name')) \n self.stdout.write(f\"Group {options.get('group_name')} created\")", "def create_new_group(self, group_id, poll_id, name):\n obj = self.table()\n obj.group_id = str(group_id)\n obj.poll_id = poll_id\n obj.name = name\n self.db.session.add(obj)\n self.db.session.commit()", "def create_groups(groups):\n for group_name in groups:\n try:\n Group.objects.get_or_create(name=group_name)\n except Exception as e:\n raise CouldNotCreateGroup(group_name, e)", "def copy_to_region(self, region, name=None):\r\n if region.name == self.region:\r\n raise BotoClientError('Unable to copy to the same Region')\r\n conn_params = self.connection.get_params()\r\n rconn = region.connect(**conn_params)\r\n sg = rconn.create_security_group(name or self.name, self.description)\r\n source_groups = []\r\n for rule in self.rules:\r\n grant = rule.grants[0]\r\n for grant in rule.grants:\r\n if grant.name:\r\n if grant.name not in source_groups:\r\n source_groups.append(grant.name)\r\n sg.authorize(None, None, None, None, grant)\r\n else:\r\n sg.authorize(rule.ip_protocol, rule.from_port, rule.to_port,\r\n grant.cidr_ip)\r\n return sg", "def test_add_group(self):\n pass", "def save_security_group_rule(resp, payload):\n if resp.get(\"code\") != 0:\n return None, SaveDataError(\"Create security group failed\")\n uuid = resp[\"data\"][\"ret_set\"][0][\"id\"]\n sgr_id = payload.get(\"sgr_id\")\n sg_uuid = payload.get(\"security_group_id\")\n port_range_min = payload.get(\"port_range_min\")\n port_range_max = payload.get(\"port_range_max\")\n remote_ip_prefix = payload.get(\"remote_ip_prefix\")\n protocol = payload.get(\"protocol\")\n priority = payload.get(\"priority\")\n direction = payload.get(\"direction\")\n remote_group_id = payload.get(\"remote_group_id\")\n zone = payload.get(\"zone\")\n zone_record = ZoneModel.get_zone_by_name(zone)\n\n _security_group = RdsSecurityGroupModel.\\\n get_security_by_uuid(uuid=sg_uuid, zone=zone_record)\n _security_group_rule_ins, err = RdsSecurityGroupRuleModel.\\\n objects.create(uuid,\n sgr_id,\n _security_group,\n protocol,\n priority,\n direction,\n port_range_min,\n port_range_max,\n remote_ip_prefix,\n remote_group_id)\n return _security_group_rule_ins, err", "def create_group(name, nodes, description=None):\n group, created = Group.get_or_create(name=name)\n if created:\n print('Group created with PK={} and name {}'.format(group.pk, group.name))\n else:\n print('Group with name {} and pk {} already exists. Do you want to add nodes?[y/n]'.format(group.name, group.pk))\n answer = raw_input()\n if answer.strip().lower() == 'y':\n pass\n else:\n return\n nodes2 = []\n nodes2_pks = []\n for node in nodes:\n try:\n node = int(node)\n except ValueError:\n pass\n nodes2_pks.append(node)\n try:\n nodes2.append(load_node(node))\n except:# NotExistentError:\n pass\n\n group.add_nodes(nodes2)\n print('added nodes: {} to group {} {}'.format(nodes2_pks, group.name, group.pk))\n\n if description:\n group.description = description\n\n return group", "def create_placement_group(self, name, strategy='cluster'):\r\n params = {'GroupName':name, 'Strategy':strategy}\r\n group = self.get_status('CreatePlacementGroup', params, verb='POST')\r\n return group", "def test_aws_service_api_security_groups_get(self):\n pass", "def product_group_create(obj, name, department):\n client = get_client(obj)\n\n with Action('Creating product_group: {}'.format(name), nl=True):\n pg = client.product_group_create(name, department)\n\n print(json.dumps(pg, indent=4))" ]
[ "0.81746614", "0.8173531", "0.8123306", "0.8045393", "0.7995554", "0.7991185", "0.79661214", "0.7898956", "0.7896103", "0.7488821", "0.7426497", "0.7370995", "0.72724205", "0.7255197", "0.702857", "0.6984043", "0.6908669", "0.6881222", "0.68289787", "0.6826571", "0.6761301", "0.67422557", "0.67327243", "0.67327243", "0.67221844", "0.67192316", "0.668766", "0.66871226", "0.66796595", "0.6675779", "0.6657891", "0.6638546", "0.6633008", "0.6618633", "0.6612936", "0.6576306", "0.656166", "0.65556335", "0.6552682", "0.65453666", "0.6543213", "0.6533606", "0.64452034", "0.64232236", "0.64179444", "0.6415018", "0.6374976", "0.6361716", "0.6355385", "0.6339941", "0.6325472", "0.6314155", "0.6310135", "0.6286816", "0.62831855", "0.62776005", "0.62713814", "0.62649894", "0.62498665", "0.6240105", "0.62168276", "0.62043417", "0.6198626", "0.61945295", "0.61612684", "0.61598855", "0.6150186", "0.61068165", "0.6104158", "0.6097063", "0.6094603", "0.608419", "0.60668534", "0.60447115", "0.6034305", "0.60244083", "0.6014055", "0.600185", "0.59932977", "0.5988975", "0.5975259", "0.5972196", "0.59705883", "0.59698266", "0.5960779", "0.595596", "0.5952637", "0.59506375", "0.5948397", "0.5945014", "0.59435296", "0.59396344", "0.59339756", "0.5920264", "0.59141725", "0.58972985", "0.587664", "0.5876513", "0.5871154", "0.5869041" ]
0.72574544
13
List all instances for a specific region and zone
def listInstancesRegionZone(region,zone): print "-"*80 print "# Region :",region," Zone", zone print "-"*80 instances = getInstancesRegionZone(region,zone) if instances: for instance in instances: print "[",instance.ami_launch_index,"]",instance.ip_address," (",instance.private_ip_address,") ",instance.instance_type," key=",instance.key_name
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_instances(self, region):\n try:\n conn = ec2.connect_to_region(region, **self.credentials)\n region_instances = []\n reservations = conn.get_all_reservations()\n for reservation in reservations:\n for instance in reservation.instances:\n region_instances.append(instance)\n except boto.exception.EC2ResponseError:\n return []\n return region_instances", "def get_all_reservations(config):\n reservations = []\n region_list = regions(aws_access_key_id=config.keys.api,\n aws_secret_access_key=config.keys.secret)\n for region in region_list:\n _logger.info(\"Searching %s\", region)\n cnx = region.connect(aws_access_key_id=config.keys.api,\n aws_secret_access_key=config.keys.secret)\n for reservation in cnx.get_all_instances():\n _logger.info(\"Found %s %s\", reservation,\n [str(i.id) for i in reservation.instances])\n reservations.append(reservation)\n return reservations", "def list_instances(self):\n\n response = self.client.service.instances().aggregatedList(\n project=self.client.project_id).execute()\n\n zones = response.get('items', {})\n instances = []\n for zone in zones.values():\n for instance in zone.get('instances', []):\n instances.append(instance)\n\n return instances", "def yield_instances_in_zone(self, zone, instance_filter=None):\n if instance_filter and set(\"\\\"\\\\'\").intersection(instance_filter):\n raise ValueError('Invalid instance filter: %s' % instance_filter)\n page_token = None\n while True:\n params = {'maxResults': 250}\n if instance_filter:\n params['filter'] = 'name eq \"%s\"' % instance_filter\n if page_token:\n params['pageToken'] = page_token\n try:\n resp = self.call_api(\n '/zones/%s/instances' % zone, params=params, deadline=120)\n except net.Error as exc:\n if not page_token and exc.status_code == 400:\n return # no such zone, this is fine...\n raise\n for instance in resp.get('items', []):\n yield instance\n page_token = resp.get('nextPageToken')\n if not page_token:\n break", "def List(self, zone):\n project = properties.VALUES.core.project.Get(required=True)\n request = self.messages.ComputeInstancesListRequest(\n zone=zone, project=project)\n instances = list_pager.YieldFromList(\n service=self.client.instances,\n request=request,\n method='List',\n field='items')\n\n result_set = []\n for instance in instances:\n if self._VMCreatedByExecGroup(instance):\n result_set.append(instance)\n\n return result_set", "def list_instances():\n print(str_sep)\n print(\"Listing EC2 instances from [{}] region.\".format(SESSION.region_name))\n print(\"{:20s}{:15s}{:10s}{}\".format(\"ID\", \"TYPE\", \"STATE\", \"NAME\"))\n print(str_sep)\n\n try:\n for instance in EC2_MANAGER.list_instances():\n # get the instance name in the tags list\n name = next((item for item in instance.tags if item[\"Key\"] == \"Name\"),\n {'Key': 'Name', 'Value': 'None'})\n\n print(\"{:20s}{:15s}{:10s}{}\".format(instance.id,\n instance.instance_type,\n instance.state['Name'],\n name['Value']))\n except ClientError as e:\n ErrManager.err_manager(e)\n\n print(str_sep)", "def list_ec2(region, filter_by_kwargs):\n conn = boto.ec2.connect_to_region(region)\n instances = conn.get_only_instances()\n return lookup(instances, filter_by=filter_by_kwargs)", "def list_instances(self):\n print '# AWS EC2 instances'\n self.compute.list_instances()", "def aws_get_instances_by_id(region, instance_id, raw=True):\n client = boto3.session.Session().client('ec2', region)\n try:\n matching_reservations = client.describe_instances(InstanceIds=[instance_id]).get('Reservations', [])\n except ClientError as exc:\n if exc.response.get('Error', {}).get('Code') != 'InvalidInstanceID.NotFound':\n raise\n return []\n instances = []\n [[instances.append(_aws_instance_from_dict(region, instance, raw)) # pylint: disable=expression-not-assigned\n for instance in reservation.get('Instances')] for reservation in matching_reservations if reservation]\n return instances", "def _get_running_ec2_instances(theargs):\n mapstr = ''\n if theargs.profile is not None:\n boto3.setup_default_session(profile_name=theargs.profile)\n ec2 = boto3.client('ec2', region_name='us-west-2')\n\n response = ec2.describe_regions()\n for region in response['Regions']:\n rname = region['RegionName']\n sys.stdout.write('Running ec2 query in region: ' + rname + '\\n')\n ec2 = boto3.client('ec2', region_name=rname)\n mapstr += 'Region: ' + rname + '\\n'\n respy = ec2.describe_instances()\n for reso in respy['Reservations']:\n for entry in reso['Instances']:\n namey = ''\n try:\n for keyval in entry['Tags']:\n if keyval['Key'] == 'Name':\n namey = keyval['Value']\n break\n except KeyError:\n pass\n\n mapstr += ('\\t\\t' + entry['PublicDnsName'] + '\\n' +\n '\\t\\tLaunch Date: ' + str(entry['LaunchTime']) +\n '\\n' + \n '\\t\\tId: ' + entry['InstanceId'] + '\\n' +\n '\\t\\tType: ' + entry['InstanceType'] + '\\n' +\n '\\t\\tName: ' + namey + '\\n' +\n '\\t\\tState: ' + entry['State']['Name'] + '\\n\\n')\n sys.stdout.write('\\nResults:\\n\\n')\n return mapstr", "def get_all_vpc_instances ( ec2_conn, vpc ) :\n return ec2_conn.get_only_instances( filters = { \"vpc-id\" : vpc.id } )", "def yield_instances_in_zones(self, zones, instance_filter=None):\n for zone in zones:\n for instance in self.yield_instances_in_zone(zone, instance_filter):\n yield instance", "def list_aws_instances(verbose=False, state='all'):\n conn = get_ec2_connection()\n\n reservations = conn.get_all_reservations()\n instances = []\n for res in reservations:\n for instance in res.instances:\n if state == 'all' or instance.state == state:\n instance = {\n 'id': instance.id,\n 'type': instance.instance_type,\n 'image': instance.image_id,\n 'state': instance.state,\n 'instance': instance,\n }\n instances.append(instance)\n env.instances = instances\n if verbose:\n import pprint\n pprint.pprint(env.instances)", "def list_(args):\n\n # Get Config.py\n cloud = get_current_cloud(args.cloud)\n\n instances = cloud.list_instances()\n print_table(print_instance_summary, headers, instances,\n use_color=args.color)\n return instances", "def get_instances_in_instance_group(\n self, name, zone, max_results=None, page_token=None):\n params = {}\n if max_results:\n params['maxResults'] = max_results\n if page_token:\n params['pageToken'] = page_token\n return self.call_api(\n '/zones/%s/instanceGroups/%s/listInstances' % (zone, name),\n method='POST',\n params=params,\n )", "def list_instances():\n if request.method == \"GET\":\n return render_template(\"instances.html\")", "def api_get_regions():\n db_session = DBSession()\n\n rows = []\n criteria = '%'\n if request.args and request.args.get('q'):\n criteria += request.args.get('q') + '%'\n else:\n criteria += '%'\n\n regions = db_session.query(Region).filter(Region.name.like(criteria)).order_by(Region.name.asc()).all()\n if len(regions) > 0:\n if request.args.get('show_all'):\n rows.append({'id': 0, 'text': 'ALL'})\n for region in regions:\n rows.append({'id': region.id, 'text': region.name})\n\n return jsonify(**{'data': rows})", "def list_rds(region, filter_by_kwargs):\n conn = boto.rds.connect_to_region(region)\n instances = conn.get_all_dbinstances()\n return lookup(instances, filter_by=filter_by_kwargs)", "def instances(self):\r\n # It would be more efficient to do this with filters now\r\n # but not all services that implement EC2 API support filters.\r\n instances = []\r\n rs = self.connection.get_all_instances()\r\n for reservation in rs:\r\n uses_group = [g.name for g in reservation.groups if g.name == self.name]\r\n if uses_group:\r\n instances.extend(reservation.instances)\r\n return instances", "def run(self):\n ilist = []\n key_filter = filters[self.args['filter_group']]\n for item in self.client.describe_instances()['Reservations']:\n for instance in item['Instances']:\n idict = {}\n for tag in instance['Tags']:\n if not any(t['Key'] == 'Name' for t in instance['Tags']):\n tag['Value'] = 'Unnamed'\n idict['Name'] = tag['Value']\n if tag['Key'] == 'Name':\n if tag['Value'] == \"\":\n tag['Value'] = 'Unnamed'\n idict['Name'] = tag['Value']\n for key in key_filter:\n try:\n if key in ['AvailabilityZone','Tenancy']:\n idict[key] = instance['Placement'][key]\n elif key == 'SecurityGroups':\n sg_list = []\n for sg in instance[key]:\n sg_list.append(sg['GroupId'])\n if self.args['output'] == 'csv':\n sg_string = \" \\n\"\n idict[key] = sg_string.join(sg_list)\n else:\n idict[key] = ','.join(sg_list)\n elif key == 'BlockDeviceMappings':\n devices = []\n for dev in instance[key]:\n devices.append(dev['DeviceName'])\n if self.args['output'] == 'csv':\n dev_string = \" \\n\"\n idict[key] = dev_string.join(devices)\n else:\n idict[key] = ','.join(devices)\n elif key == 'State':\n idict[key] = instance[key]['Name']\n else:\n if instance[key]:\n idict[key] = instance[key]\n except Exception as e:\n idict[key] = 'N/A'\n ilist.append(idict)\n self.template(self.sortList(ilist))", "def _get_instances(instance_tags, region):\n return ec2_conn[region].get_all_instances(filters={\"tag:Name\": instance_tags})", "def list(self, arguments):\n instance_name = arguments['<instance>']\n instance_name = self.activate(instance_name)\n\n vmrun = VMrun(self.vmx, user=self.user, password=self.password)\n print(vmrun.listSnapshots())", "def get_availability_zones(self, context, filters=None, fields=None,\n sorts=None, limit=None, marker=None,\n page_reverse=False):", "def get_instances() -> dict:\n url = f\"{app.config['COMPUTE_SERVERS_REF']}/detail\"\n instances_rq = request(\n method=\"GET\", url=url, headers=build_header(), params={\"vm_state\": \"active\"},\n )\n\n if not instances_rq.ok:\n HTTPError(instances_rq.status_code)\n\n answer = {\"servers\": list()}\n for instance in instances_rq.json()[\"servers\"]:\n instance_info = dict(name=instance[\"name\"])\n instance_info[\"ip_addresses\"] = list()\n for network, info in instance[\"addresses\"].items():\n instance_info[\"ip_addresses\"].extend(entry[\"addr\"] for entry in info)\n answer[\"servers\"].append(instance_info)\n\n return answer", "def show_instances():\n return get_instances()", "def getInstancesD(region):\n instances = getInstances(region)\n instancesDicts = {\"id\": i.id,\n \"KEEP-tag\": getKeepTag(i),\n \"instance_type\": i.instance_type,\n \"state\": i.state,\n \"launch_time\": i.launch_time,\n \"security_groups\": getGroups(i),\n \"region\": i.region.name,\n \"PROD\": isProduction(i)\n }", "def list_zones(self, kwargs):\n verbose = kwargs.get(\"verbose\", False)\n\n if not verbose:\n attributes = [\"dc\", \"objectClass\"]\n else:\n attributes = ALL\n\n self.display(\n self.engine.query(\n self.engine.ZONES_FILTER(),\n attributes, base=','.join([\"CN=MicrosoftDNS,DC=DomainDNSZones\", self.engine.base_dn])\n ),\n verbose\n )", "def list_instances(self):\n instances = []\n try:\n pages = self.compute.virtual_machines.list(\n CONF.azure.resource_group)\n except Exception as e:\n msg = six.text_type(e)\n LOG.exception(msg)\n ex = exception.InstanceListFailure(reason=six.text_type(e))\n raise ex\n else:\n if pages:\n for i in pages:\n instances.append(i.name)\n return instances", "def list_running_instances(self):\n print '# Running AWS EC2 instances'\n self.compute.list_running_instances()", "def list_instances_detail(self):\n\n # TODO(imsplitbit): need to ask around if this is the best way to do\n # this. This causes some redundant vzlist commands as get_info is run\n # on every item returned from this command but it didn't make sense\n # to re-implement get_info as get_info_all.\n infos = []\n try:\n # get a list of CT names which will be nova friendly.\n # NOTE: This can be an issue if nova decides to change\n # the format of names. We would need to have a migration process\n # to change the names in the name field of the CTs.\n out, err = utils.execute('sudo', 'vzlist', '--all', '-o',\n 'name', '-H')\n if err:\n LOG.error(err)\n except ProcessExecutionError as err:\n LOG.error(err)\n raise exception.Error('Problem listing Vzs')\n\n for name in out.splitlines():\n name = name.split()[0]\n status = self.get_info(name)\n infos.append(driver.InstanceInfo(name, status['state']))\n\n return infos", "def main(self, _):\n all_addresses = find_addresses.probe_regions()\n\n print(\"\")\n if not all_addresses:\n print(\"No namespace elastic IP addresses found.\")\n\n for region in consts.REGIONS:\n region_addresses = [address for address in all_addresses\n if address['region'] == region]\n if not region_addresses:\n continue\n\n print(f\"{region}: {len(region_addresses)} address(es) found:\")\n for address in region_addresses:\n if 'instance_name' in address:\n print(f\" {address['ip']} ({address['instance_name']})\")\n elif 'association_id' in address:\n print(f\" {address['ip']} (unknown association)\")\n else:\n print(f\" {address['ip']} (not associated)\")", "def get_instance_list():\n return parse_list_output(Popen('nova list --all-tenants'.split(),\n stdout=STDOUT, stderr=STDERR).communicate()[0])", "def get_all_db_region(self, context):\n zone_objs = self.dns_manager.get_all_db_region(context)\n return zone_objs", "def list_instances(self):\n nodes = self._driver.list_nodes()\n return [[n.name, n.state, n.public_ips] for n in nodes]", "def list_instances():\n js = _get_jetstream_conn()\n il = js.compute.instances.list()\n if not il:\n msg = \"You don't have any instances available.\"\n else:\n msg = (\"You have {0} instances available. Here are up to 3 most \"\n \"recent: \".format(len(il)))\n msg_ex = \"\"\n content = \"\"\n for i in il[:3]:\n msg_ex += \"{0},\".format(i.name)\n content += \"{0} ({1})\\n\".format(\n i.name, i.public_ips[0] if i.public_ips else i.private_ips[0])\n return statement(msg + msg_ex).simple_card(title=msg, content=content)", "def Get_Running_Instances():\n ec2 = boto3.resource('ec2') \n #call the features resource from the boto3 library\n instances = ec2.instances.filter(Filters=[{'Name': 'instance-state-name', 'Values': ['pending', 'running',]},])\n #filter the instances returned using the state name\n #you can also filter using Tags by adding the filters: \n #[{'Name': 'tag-key', 'Values': ['Role','Name',]}, {'Name': 'tag-value', 'Values': ['*test*', '*TEST*',]},]\n return [instance.id for instance in instances]\n #return a liste with the ids of the instances", "def get_zone(self, conn, host):\n fl = 'name=\"%s\"' % host\n request = conn.instances().aggregatedList(project=PROJECT, filter=fl)\n \twhile request is not None:\n \t\tresponse = request.execute()\n \t\tzones = response.get('items', {})\n \t\tfor zone in zones.values():\n \t\t\tfor inst in zone.get('instances', []):\n \t\t\t\tif inst['name'] == host:\n \t\t\t\t\treturn inst['zone'].split(\"/\")[-1]\n \t\trequest = conn.instances().aggregatedList_next(previous_request=request, previous_response=response)\n \traise Exception(\"Unable to determin the zone for instance %s\" % (host))", "def index(self, req):\n # Ask the ZoneManager in the Scheduler for most recent data,\n # or fall-back to the database ...\n items = api.get_zone_list(req.environ['nova.context'])\n items = common.limited(items, req)\n items = [_scrub_zone(item) for item in items]\n return dict(zones=items)", "def machine_lookup_all(session, hostname, public_ip = True):\n client = session.client('ec2')\n response = client.describe_instances(Filters=[{\"Name\":\"tag:Name\", \"Values\":[hostname]},\n {\"Name\":\"instance-state-name\", \"Values\":[\"running\"]}])\n\n addresses = []\n items = response['Reservations']\n if len(items) > 0:\n for i in items:\n item = i['Instances'][0]\n if 'PublicIpAddress' in item and public_ip:\n addresses.append(item['PublicIpAddress'])\n elif 'PrivateIpAddress' in item and not public_ip:\n addresses.append(item['PrivateIpAddress'])\n return addresses", "def list_instances(self):\n # list instances\n self._list_instances()", "def test_aws_service_api_availability_zones_get(self):\n pass", "def region_clients(self, **kwargs):\n return stats.region_clients(self._host, self._session, **kwargs)", "def list_as_instances(access_key, secret_key, region, autoscaling_group):\n\n aws_as = init_aws_as_conn(access_key, secret_key, region)\n aws_ec2 = init_aws_ec2_conn(access_key, secret_key, region)\n autoscaling_instances = []\n\n vm = aws_as.get_all_groups([autoscaling_group])\n autoscaling_instances_id = [j.instance_id for i in vm for j in i.instances]\n\n for instance_id in autoscaling_instances_id:\n vm = boto.ec2.instance.Instance(aws_ec2)\n vm.id = instance_id\n vm.update()\n autoscaling_instances.append(vm)\n\n return autoscaling_instances", "def describe_instances():\n # Instantiate the service resource object\n ec2_resource = session.resource('ec2', region_name=region)\n # Describe instances\n instances = ec2_resource.instances.all()\n for instance in instances:\n print('State of the instance \"' + instance.id + '\" is: \"' + instance.state['Name'] + '\"')\n return", "def list_ebss_by_instance():\n\n ec2 = u.create_ec2_resource()\n instances = [(u.seconds_from_datetime(i.launch_time), i) for i in ec2.instances.all()]\n sorted_instances = sorted(instances, key=itemgetter(0))\n\n for (seconds, instance) in sorted_instances:\n\n volumes = instance.volumes.all()\n volume_strs = []\n for v in volumes:\n volume_strs.append(\"%s (%s)\"%(v.id, v.size))\n print(\"%s: %s\" % (u.get_name(instance.tags), ','.join(volume_strs)))", "def locate_instances(project, service, zones, label_key, label_value, should_disable):\n try:\n full_instance_list = []\n for zone in zones:\n instances = service.instances().list(project=project, zone=zone, filter=f\"labels.{label_key}={label_value}\").execute()\n if 'items' in instances:\n for instance in instances['items']:\n if instance['status'] == \"RUNNING\" and should_disable:\n disable_vm(project, service, instance['name'], zone)\n elif not should_disable: # should not disable = enable\n enable_vm(project, service, instance['name'], zone)\n\n except Exception as error:\n return error", "def yield_instances(self, instance_filter=None):\n if instance_filter and set(\"\\\"\\\\'\").intersection(instance_filter):\n raise ValueError('Invalid instance filter: %s' % instance_filter)\n page_token = None\n while True:\n params = {'maxResults': 250}\n if instance_filter:\n params['filter'] = 'name eq \"%s\"' % instance_filter\n if page_token:\n params['pageToken'] = page_token\n resp = self.call_api('/aggregated/instances', params=params, deadline=120)\n items = resp.get('items', {})\n for zone in sorted(items):\n for instance in items[zone].get('instances', []):\n yield instance\n page_token = resp.get('nextPageToken')\n if not page_token:\n break", "def show_instances(cls, args, config):\n instance_list = config.get_all_instances()\n if len(instance_list) > 0:\n table_data = []\n for i in instance_list:\n provider_obj = config.get_object_by_id(i.provider_id, 'Provider')\n if provider_obj is None:\n continue\n provider_name = provider_obj.name\n #print \"provider_obj.type\",provider_obj.type\n if i.worker_group_id is not None:\n name = config.get_object_by_id(i.worker_id, 'WorkerGroup').name\n itype = 'worker'\n else:\n name = config.get_object_by_id(i.controller_id, 'Controller').name\n itype = 'controller'\n table_data.append([i.id, provider_name, i.provider_instance_identifier, itype, name])\n table_print(['ID', 'provider', 'instance id', 'type', 'name'], table_data)\n else:\n print \"No instance found\"", "def _get_available_regions():\n session = boto3.session.Session()\n\n return session.get_available_regions(service_name='s3')", "def aws_get_instances_by_name(region, name, raw=True):\n return _aws_get_instance_by_tag(region, name, 'tag:Name', raw)", "def find_instances(\n instance_id=None,\n name=None,\n tags=None,\n region=None,\n key=None,\n keyid=None,\n profile=None,\n return_objs=False,\n in_states=None,\n filters=None,\n):\n conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)\n\n try:\n filter_parameters = {\"filters\": {}}\n\n if instance_id:\n filter_parameters[\"instance_ids\"] = [instance_id]\n\n if name:\n filter_parameters[\"filters\"][\"tag:Name\"] = name\n\n if tags:\n for tag_name, tag_value in tags.items():\n filter_parameters[\"filters\"][\"tag:{}\".format(tag_name)] = tag_value\n\n if filters:\n filter_parameters[\"filters\"].update(filters)\n\n reservations = conn.get_all_reservations(**filter_parameters)\n instances = [i for r in reservations for i in r.instances]\n log.debug(\n \"The filters criteria %s matched the following instances:%s\",\n filter_parameters,\n instances,\n )\n\n if in_states:\n instances = [i for i in instances if i.state in in_states]\n log.debug(\n \"Limiting instance matches to those in the requested states: %s\",\n instances,\n )\n if instances:\n if return_objs:\n return instances\n return [instance.id for instance in instances]\n else:\n return []\n except boto.exception.BotoServerError as exc:\n log.error(exc)\n return []", "def get_ec2_instances(client):\n reservations = client.describe_instances().get(\"Reservations\")\n instances = list(map(lambda x: x.get(\"Instances\"), reservations))\n instances = list(itertools.chain.from_iterable(instances))\n return list(map(lambda x: {\n 'name': next((t['Value'] for t in x.get('Tags', []) if t.get('Key') == 'Name'), 'Unknown'),\n 'id': x.get('InstanceId'),\n 'state': x.get('State'),\n }, instances))", "def do_instance_list(cs, args):\n instances = cs.instances.list()\n\n fields = [\"OCCI ID\"]\n if args.detailed:\n fields.extend([\"Name\", \"State\", \"Network\"])\n occi_attrs = (\"occi.compute.hostname\",\n \"occi.compute.state\")\n\n pt = prettytable.PrettyTable([f for f in fields], caching=False)\n pt.align = 'l'\n\n for instance in instances:\n row = []\n attrs = instance.get('attributes', {})\n instance_id = attrs.get('occi.core.id', None)\n row.append(instance_id)\n\n if args.detailed and instance_id:\n if not all([i in attrs for i in occi_attrs]):\n instance = cs.instances.detail(instance_id)\n attrs = instance.get('attributes', {})\n\n name = attrs.get(\"occi.core.title\", None)\n if name is None:\n name = attrs.get(\"occi.compute.hostname\", None)\n row.append(name)\n row.append(attrs.get(\"occi.compute.state\", None))\n\n links = instance.get(\"links\", [])\n network = []\n for link in links:\n if occi.CATEGORIES[\"network\"] in link[\"kind\"][\"related\"]:\n # get IPv4\n ip = link[\"attributes\"].get(\n \"occi.networkinterface.address\",\n None\n )\n if not ip:\n ip = link[\"attributes\"].get(\n \"occi.networkinterface.ip6\",\n None\n )\n network.append(ip)\n row.append(network)\n\n pt.add_row(row)\n\n print(pt.get_string())", "def _instancelist(self):\n\n rv = []\n self.iname = {}\n for resv in self.conn.get_all_reservations():\n for inst in resv.instances:\n if inst.state != 'terminated':\n name = inst.tags.get('Name',None)\n rv.append([inst.id,inst.state])\n if name is not None:\n rv.append([name,inst.state])\n else:\n rv.append([inst.id+'-needsName',inst.state])\n self.iname[name] = inst.id\n self.iname[inst.id] = inst.id\n return rv", "def get_instances(self):\n for server in self.cs.servers.list():\n match = self.cluster_re.match(server.name)\n if match:\n for ip in server.networks['public']:\n if ip.count('.'):\n v4ip = ip\n yield (match.group('role'), v4ip)", "def list_instances(self):\n try:\n out, err = utils.execute(\n 'sudo', 'vzlist', '--all', '--no-header', '--output', 'ctid')\n if err:\n LOG.error(err)\n except ProcessExecutionError:\n raise exception.Error('Failed to list VZs')\n\n ctids = []\n for line in out.splitlines():\n ctid = line.split()[0]\n ctids.append(ctid)\n\n return ctids", "def test_aws_service_api_regions_get(self):\n pass", "def list_elb(region, filter_by_kwargs):\n conn = boto.ec2.elb.connect_to_region(region)\n instances = conn.get_all_load_balancers()\n return lookup(instances, filter_by=filter_by_kwargs)", "def RegionList(self):\n command = \"\"\"\n IPython.notebook.kernel.execute(\"RegionList=\" + JSON.stringify(JS9.GetShapes(\"regions\", {{display: '{wid}JS9'}})));\n \"\"\".format(wid=self.wid)\n get_ipython().run_cell_magic('javascript', '', command)", "def get_zones(self, latitude, longitude):\n result = self.__request(\n \"GET\",\n \"https://api.voiapp.io/v1/zones?lat={}&lng={}\".format(latitude, longitude),\n )\n if result and \"zones\" in result:\n return result[\"zones\"]", "def describe_rds_instances(rds, account, region, output_bucket):\n rds_list = rds.describe_db_instances().get('DBInstances')\n\n for rds_obj in rds_list:\n #print rds_obj\n output_bucket.append(misc.format_line((\n misc.check_if(account.get('name')),\n misc.check_if(region.get('RegionName')),\n misc.check_if(rds_obj.get('DBSubnetGroup').get('VpcId')),\n misc.check_if(rds_obj.get('DBInstanceIdentifier')),\n misc.check_if(rds_obj.get('DBInstanceClass')),\n misc.check_if(str(rds_obj.get('PubliclyAccessible'))),\n misc.check_if(rds_obj.get('Endpoint').get('Address')),\n misc.lookup(rds_obj.get('Endpoint').get('Address')),\n misc.check_if(str(rds_obj.get('Endpoint').get('Port')))\n )))", "def get_all_instances(self, instance_ids=None, filters=None):\r\n params = {}\r\n if instance_ids:\r\n self.build_list_params(params, instance_ids, 'InstanceId')\r\n if filters:\r\n if 'group-id' in filters:\r\n warnings.warn(\"The group-id filter now requires a security \"\r\n \"group identifier (sg-*) instead of a group \"\r\n \"name. To filter by group name use the \"\r\n \"'group-name' filter instead.\", UserWarning)\r\n self.build_filter_params(params, filters)\r\n return self.get_list('DescribeInstances', params,\r\n [('item', Reservation)], verb='POST')", "def list_instances(self, label_filters: Optional[dict] = None) -> List[\"GCPNode\"]:\n return", "def get_all_zones():\n cf = CloudFlare.CloudFlare(raw=True)\n page_number = 0\n total_pages = 1\n all_zones = []\n while page_number < total_pages:\n page_number += 1\n raw_results = cf.zones.get(params={'per_page':100, 'page':page_number})\n zones = raw_results['result']\n all_zones += zones\n total_pages = raw_results['result_info']['total_pages']\n return all_zones", "def reserved_compare(options):\n running_instances = defaultdict(dict)\n reserved_purchases = defaultdict(dict)\n regions = boto.ec2.regions()\n good_regions = [r for r in regions if r.name not in ['us-gov-west-1',\n 'cn-north-1']]\n for region in good_regions:\n if options.trace:\n print \" Scanning region {0}\".format(region.name)\n conn = region.connect()\n filters = {'instance-state-name': 'running'}\n zones = defaultdict(dict)\n\n if options.trace:\n print \" Fetching running instances\"\n reservations = conn.get_all_instances(filters=filters)\n for reservation in reservations:\n for inst in reservation.instances:\n if options.debug:\n print instance_string(inst, options, verbose=True)\n if inst.state != 'running':\n if options.debug:\n print \"Skip {0.id} state {0.state}\".format(inst)\n continue\n if inst.spot_instance_request_id:\n if options.debug:\n print \"Skip {0.id} has spot id {0.spot_instance_request_id}\".format(inst)\n continue\n if 'aws:autoscaling:groupName' in inst.tags:\n if options.debug:\n print \"Skip {0.id} is an autoscale instance\".format(inst)\n continue\n if inst.platform == 'Windows' or inst.platform == 'windows':\n if options.debug:\n print \"Skip {0.id} has platform {0.platform}\".format(inst)\n continue\n if inst.instance_type not in zones[inst.placement]:\n zones[inst.placement][inst.instance_type] = []\n zones[inst.placement][inst.instance_type].append(inst)\n\n if zones:\n running_instances[region.name] = zones\n\n purchased = defaultdict(dict)\n if options.trace:\n print \" Fetching reservations\"\n\n reserved = conn.get_all_reserved_instances()\n for r in reserved:\n if options.debug:\n print reservation_string(r, verbose=True)\n if r.state != 'active':\n continue\n if r.instance_tenancy != 'default':\n print 'WARNING: Non-default tenancy %s: %s' % (r.instance_tenancy, reservation_string(r))\n continue\n if r.instance_type not in purchased[r.availability_zone]:\n purchased[r.availability_zone][r.instance_type] = [r]\n else:\n purchased[r.availability_zone][r.instance_type].append(r)\n\n if purchased:\n reserved_purchases[region.name] = purchased\n\n return check_reservation_use(options, running_instances,\n reserved_purchases)", "def compute_zones(self):\n path = '/os-availability-zone/detail'\n res = self.compute.call(path, 'GET', data='', \n token=self.manager.identity.token)\n self.logger.debug('Get openstack availability zone: %s' % truncate(res))\n return res[0]['availabilityZoneInfo']", "def list(region, profile):\n ini_data = {}\n environment = {}\n\n if region:\n environment['region'] = region\n else:\n environment['region'] = find_myself()\n\n if profile:\n environment['profile'] = profile\n\n ini_data['environment'] = environment\n if start_list(ini_data):\n sys.exit(0)\n else:\n sys.exit(1)", "def region(self, args):\n m = MessageClass()\n print('123124')\n data = {'list': []}\n data['list'].append({\"Region_Name\": \"us-east-1\"})\n data['list'].append({\"Region_Name\": \"us-east-2\"})\n data['list'].append({\"Region_Name\": \"us-west-1\"})\n data['list'].append({\"Region_Name\": \"us-west-2\"})\n data['list'].append({\"Region_Name\": \"ap-northeast-1\"})\n data['list'].append({\"Region_Name\": \"ap-northeast-2\"})\n data['list'].append({\"Region_Name\": \"ap-south-1\"})\n data['list'].append({\"Region_Name\": \"ap-southeast-1\"})\n data['list'].append({\"Region_Name\": \"ap-southeast-1\"})\n data['list'].append({\"Region_Name\": \"ca-central-1\"})\n data['list'].append({\"Region_Name\": \"eu-central-1\"})\n data['list'].append({\"Region_Name\": \"eu-west-1\"})\n data['list'].append({\"Region_Name\": \"eu-west-2\"})\n data['list'].append({\"Region_Name\": \"eu-west-3\"})\n data['list'].append({\"Region_Name\": \"sa-east-1\"})\n m.data = data\n return m.to_json()", "def list(self, args):\n try:\n cloud = self._context.getCloudService()\n vdcs = cloud.listVirtualDatacenters()\n pprint_vdcs(vdcs)\n except (AbiquoException, AuthorizationException), ex:\n print \"Error: %s\" % ex.getMessage()", "def __get_reservations(self, instance_ids=None):\n if instance_ids:\n self.__validate_instance_id(instance_ids)\n euca_conn = self.__make_connection()\n try:\n return euca_conn.get_all_instances(instance_ids)\n except:\n euca.display_error_and_exit('%s' % ex)\n return False", "def get_zones_output(project: Optional[pulumi.Input[Optional[str]]] = None,\n region: Optional[pulumi.Input[Optional[str]]] = None,\n status: Optional[pulumi.Input[Optional[str]]] = None,\n opts: Optional[pulumi.InvokeOptions] = None) -> pulumi.Output[GetZonesResult]:\n ...", "def list_instances(self):\n instances = utils.list_instances(self.compute_client,\n drv_conf.resource_group)\n\n self._uuid_to_omni_instance.clear()\n instance_names = []\n for instance in instances:\n openstack_id = None\n if instance.tags and 'openstack_id' in instance.tags:\n openstack_id = instance.tags['openstack_id']\n if openstack_id is None:\n openstack_id = self._get_uuid_from_omni_id(instance.name)\n self._uuid_to_omni_instance[openstack_id] = instance\n instance_names.append(instance.name)\n return instance_names", "def getZoneRecords(self, filters=[]):\n return self._getRecords('zone', filters)", "def list_regions():\n regions_areas = (\n db.session.query(\n models.Region.code.label(\"region_code\"),\n models.Region.name.label(\"region_name\"),\n db.case([(models.District.code.is_(None),\n db.literal_column(\"'admin_area'\"))],\n else_=db.literal_column(\"'district'\")).label(\"area_type\"),\n db.case([(models.District.code.is_(None), models.AdminArea.code)],\n else_=models.District.code).label(\"area_code\"),\n db.case([(models.District.code.is_(None), models.AdminArea.name)],\n else_=models.District.name).label(\"area_name\")\n ).select_from(models.Region)\n .join(models.Region.areas)\n .outerjoin(models.AdminArea.districts)\n .filter(models.Region.code != \"GB\")\n .order_by(\"region_name\", \"area_name\")\n .all()\n )\n regions = {}\n areas = {}\n for row in regions_areas:\n regions[row.region_code] = row.region_name\n areas.setdefault(row.region_code, []).append(row)\n\n return render_template(\"regions.html\", regions=regions, areas=areas)", "def _aws_get_instance_by_tag(region, name, tag, raw):\n client = boto3.session.Session().client('ec2', region)\n matching_reservations = client.describe_instances(Filters=[{'Name': tag, 'Values': [name]}]).get('Reservations', [])\n instances = []\n [[instances.append(_aws_instance_from_dict(region, instance, raw)) # pylint: disable=expression-not-assigned\n for instance in reservation.get('Instances')] for reservation in matching_reservations if reservation]\n return instances", "def get_snapshots(self, region):\n try:\n conn = ec2.connect_to_region(region, **self.credentials)\n region_snapshots = conn.get_all_snapshots(owner='self')\n except boto.exception.EC2ResponseError:\n return []\n return region_snapshots", "def show_all_instances(self):\n if not self.all_instances:\n logging.error(\"%s: no instances detected\", self.name)\n return\n instances = \"\"\n for instance in self.all_instances:\n instances += \" - {0.name} (pid: {0.pid})\".format(instance)\n logging.info(\"arangod instances for starter: %s - %s\", self.name, instances)", "def list_instance_name():\n\n if request.method == \"GET\":\n with lock:\n names = list(instances.keys())\n return jsonify(names)\n return Response(status=200)", "def running_instances(self, args):\n message = MessageClass()\n region = args[\"Region\"]\n\n # Boto3 resource creation by providing the access_id and access_secret\n ec2 = boto3.resource(service_name='ec2', region_name=region, api_version=None, use_ssl=True,\n verify=None, endpoint_url=None, aws_access_key_id=self.aws_access_key,\n aws_secret_access_key=self.aws_secret_token, aws_session_token=None,\n config=None)\n instances = ec2.instances.filter(\n Filters=[{'Name': 'instance-state-name', 'Values': ['running']}])\n for instance in instances:\n attachment = MessageAttachmentsClass()\n attachment.title = instance.id\n message.attach(attachment)\n\n button = MessageButtonsClass()\n button.text = \"Stop Instance\"\n button.value = \"Stop Instance\"\n button.name = \"Stop Instance\"\n button.command = {\"service_application\": self.yellowant_integration_id,\n \"function_name\": \"stop-instance\",\n \"data\": {\"Instance-ID\": instance.id, \"Region\": region}}\n attachment.attach_button(button)\n\n message.message_text = \"Instances Running are:\"\n return message.to_json()", "def ec2(filter=\".*\",interval=0):\n interval = int(interval)\n def printlist():\n (list_instances,list_headers) = ec2list(filter=filter)\n x = PrettyTable()\n x.field_names = list_headers\n for instance in list_instances:\n x.add_row([ instance[y] for y in list_headers ])\n print(x)\n if interval:\n while True:\n os.system(\"clear\")\n printlist()\n time.sleep(interval)\n else:\n printlist()", "def get_virtual_servers(configuration: Configuration,\r\n resource_group_id: str = None,\r\n name: str = None,\r\n vpc_id: str = None,\r\n vpc_name: str = None,\r\n vpc_crn: str = None) -> Dict[str, Any]:\r\n service = create_ibmcloud_api_client(configuration)\r\n try:\r\n instances = \\\r\n service.list_instances(resource_group_id=resource_group_id, name=name, vpc_id=vpc_id, vpc_crn=vpc_crn,\r\n vpc_name=vpc_name).get_result()['instances']\r\n except ApiException as e:\r\n logger.error(\"List instances failed with status code \" +\r\n str(e.code) + \": \" + e.message)\r\n return instances", "def getinstancelist():\n dbcursor_dict.execute(dbq.get_all_instance_list, )\n db_instance_list = dbcursor_dict.fetchall()\n return db_instance_list", "def ow_instances(ow, ow_stack):\n log.info(\"ow_instances( %s )\", ow_stack)\n try:\n instances = ow.describe_instances(StackId=ow_stack)\n except Exception, e:\n print(e)\n log.info(e)\n sys.exit()\n ow_launch_data = {}\n for instance in instances['Instances']:\n created_at = datetime.datetime.strptime(\n instance['CreatedAt'], '%Y-%m-%dT%H:%M:%S+00:00').strftime('%s')\n ow_launch_data[instance['InstanceId']] = created_at\n log.info(\"instance %s, created %s\", instance, created_at)\n return ow_launch_data", "def ls(region_name=DEFAULT_REGION):\n s3conn = s3.connect_to_region(region_name)\n buckets = s3conn.get_all_buckets()\n for bucket in buckets:\n print(bucket.name)", "def list_zones(pattern=None):\n zlist = []\n cmd = [CMD_ZONEADM, \"list\", \"-pc\"]\n proc = subprocess.Popen(cmd, stdout=subprocess.PIPE)\n stdout, stderr = proc.communicate()\n ret = proc.returncode\n\n if ret:\n raise OSError(\"%s exited with exit code %d. stderr: '%s.'\" %\n (str(cmd), ret, stderr))\n\n def set_attr(zone, attr, line):\n \"\"\"just a helper function \"\"\"\n zone.set_attr(attr, line[attr])\n\n # line format:\n # zoneid:zonename:state:zonepath:uuid:brand:ip-type:r/w:file-mac-profile\n for line in str(stdout).split(\"\\n\"):\n if not line:\n continue\n line = line.split(\":\")\n\n if pattern and not(re.match(pattern, line[ZONE_ENTRY['ZNAME']])):\n continue # skip entries that does not pass regexp\n\n tmp_zone = Zone(line[ZONE_ENTRY['ZNAME']])\n for item in ZONE_ENTRY.values():\n set_attr(tmp_zone, item, line)\n\n zlist.append(tmp_zone)\n\n return zlist", "def getVampAll(self):\n for i in range(1, 4):\n self.getAllZoneAllParam(str(i))\n\t#return(self._pzones)", "def get_zones(region=None, key=None, keyid=None, profile=None):\n conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)\n\n return [z.name for z in conn.get_all_zones()]", "def ListRegions(self):\n project = properties.VALUES.core.project.GetOrFail()\n request = self.messages.CloudfunctionsProjectsLocationsListRequest(\n name='projects/' + project\n )\n return list_pager.YieldFromList(\n service=self.client.projects_locations,\n request=request,\n field='locations',\n batch_size_attribute='pageSize',\n )", "def get_regions(ec2_client=None):\n if not ec2_client:\n ec2_client = boto3.client('ec2')\n resp = ec2_client.describe_regions()\n return [region['RegionName'] for region in resp.get('Regions', [])]", "def List(self, zone):\n project = properties.VALUES.core.project.Get(required=True)\n parent_ref = resources.REGISTRY.Parse(\n zone,\n params={'projectsId': project},\n collection='tpu.projects.locations')\n request = self.messages.TpuProjectsLocationsNodesListRequest(\n parent=parent_ref.RelativeName())\n return list_pager.YieldFromList(\n service=self.client.projects_locations_nodes,\n request=request,\n method='List',\n batch_size_attribute='pageSize',\n field='nodes'\n )", "def generateInfoInstances(regions):\n print \"Writing instances info to output file %s\" % instances_data_output_file\n with open(instances_data_output_file, 'w') as f3:\n f3.write(\"INSTANCES\\n\")\n f3.write(\"Name\\tinstance ID\\tKEEP-tag\\tproduction\\tinstance_type\\tstate\\tlaunched\\tsecurity_groups\\tregion\\n\\n\")\n for region in regions:\n print \".\" # feedback for user\n instances = getInstances(region)\n for i in instances:\n f3.write(\"%s\\t%s\\t%s\\t%s\\t%s\\t%s\\t%s\\t%s\\t%s\\n\"\n % (get_name_tag(i), i.id, getKeepTag(i), isProduction(i), i.instance_type, i.state,\n i.launch_time, getGroups(i), i.region.name))", "def instances(self, endpoint_name=None):\n if endpoint_name is None:\n _, body = self.request('/v1.1/endpoint/instances', 'GET')\n else:\n _, body = self.request('/v1.1/endpoints/%s/instances' % endpoint_name, 'GET')\n return body", "def get_regions(**kwargs):\n\n instance = Ceic._get_instance()\n\n get_dictionaries_method = instance._dictionary_facade.get_regions\n result = instance._make_request(get_dictionaries_method, **kwargs)\n\n return result", "def list_zone(self, args):\r\n manager = DNSManager(self.client)\r\n table = Table(['id', 'record', 'type', 'ttl', 'value'])\r\n\r\n table.align['ttl'] = 'l'\r\n table.align['record'] = 'r'\r\n table.align['value'] = 'l'\r\n\r\n zone_id = resolve_id(manager.resolve_ids, args['<zone>'], name='zone')\r\n\r\n records = manager.get_records(\r\n zone_id,\r\n record_type=args.get('--type'),\r\n host=args.get('--record'),\r\n ttl=args.get('--ttl'),\r\n data=args.get('--data'),\r\n )\r\n\r\n for record in records:\r\n table.add_row([\r\n record['id'],\r\n record['host'],\r\n record['type'].upper(),\r\n record['ttl'],\r\n record['data']\r\n ])\r\n\r\n return table", "def export_getInstancesContent( self, selDict, sortDict, start, limit ):\n return gVirtualMachineDB.getInstancesContent( selDict, sortDict, start, limit )", "def ex_list_availability_zones(self, only_available=True):\n params = {'Action': 'DescribeAvailabilityZones'}\n\n if only_available:\n params.update({'Filter.0.Name': 'state'})\n params.update({'Filter.0.Value.0': 'available'})\n\n params.update({'Filter.1.Name': 'region-name'})\n params.update({'Filter.1.Value.0': self.region_name})\n\n result = self.connection.request(self.path,\n params=params.copy()).object\n\n availability_zones = []\n for element in self._findall(result, 'availabilityZoneInfo/item'):\n name = self._findtext(element, 'zoneName')\n zone_state = self._findtext(element, 'zoneState')\n region_name = self._findtext(element, 'regionName')\n\n availability_zone = ExEC2AvailabilityZone(\n name=name,\n zone_state=zone_state,\n region_name=region_name\n )\n availability_zones.append(availability_zone)\n\n return availability_zones", "def _format_zone_list(self, instance_list):\n result = []\n if instance_list is not None:\n if \"items\" in instance_list:\n items = instance_list[\"items\"]\n for item in items:\n result.append(self._process_instance(item))\n return result", "def get_available_regions(service_name):\n session = boto3.session.Session()\n return session.get_available_regions(service_name)", "def get_available_regions(service_name):\n session = boto3.session.Session()\n return session.get_available_regions(service_name)", "def get_instances(instance_ids):\n\n instances = dict()\n conn = connect_to_region(REGION, aws_access_key_id=KEY_ID, aws_secret_access_key=ACCESS_KEY)\n try:\n reservations = conn.get_all_instances(instance_ids)\n except EC2ResponseError, ex:\n print 'Got exception when calling EC2 for instances (%s): %s' % \\\n (\", \".join(instance_ids), ex.error_message)\n return instances\n\n for r in reservations:\n if len(r.instances) and r.instances[0].id in instance_ids:\n instances[r.instances[0].id] = r.instances[0].tags[\"Name\"]\n\n return instances" ]
[ "0.73566705", "0.7152035", "0.69493294", "0.6796809", "0.67603457", "0.6752783", "0.6647198", "0.6643667", "0.64551127", "0.64104474", "0.6406182", "0.6393634", "0.63902986", "0.6389321", "0.63319427", "0.629142", "0.628562", "0.6270777", "0.6261531", "0.6253145", "0.621456", "0.6178527", "0.6153025", "0.61499745", "0.6134551", "0.61043745", "0.61033905", "0.6071402", "0.60678333", "0.606688", "0.60613006", "0.60513186", "0.6031941", "0.6022053", "0.5993538", "0.59828573", "0.59609485", "0.59595716", "0.5950737", "0.5942368", "0.59410864", "0.59188783", "0.59131825", "0.5904996", "0.58999485", "0.58801377", "0.5874835", "0.5860995", "0.5855861", "0.5847558", "0.5847304", "0.5843767", "0.5834465", "0.582911", "0.5828224", "0.582805", "0.5815494", "0.58127284", "0.57836723", "0.5767121", "0.5762351", "0.5756356", "0.5753825", "0.57529503", "0.57464427", "0.57307595", "0.5730227", "0.57196164", "0.5714554", "0.57086784", "0.56895214", "0.56629366", "0.56594336", "0.56370866", "0.56324655", "0.56274843", "0.561627", "0.5604376", "0.55940837", "0.5586535", "0.55829203", "0.55790406", "0.55758643", "0.55690664", "0.5565775", "0.55612814", "0.5560775", "0.5552611", "0.55316395", "0.55227023", "0.55226696", "0.55159223", "0.5511968", "0.5506451", "0.549577", "0.54916155", "0.5484717", "0.54689395", "0.54689395", "0.54680634" ]
0.82746065
0
Create all Cassandra security groups in all regions
def createAllSG(): for info in conf_HVM: ec2 = boto.ec2.connect_to_region(info['region']+'-'+info['zone']) createSG(ec2,'SG-Cassandra-'+info['region']+'-'+info['zone'],CASSANDRA_RULES)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def create_groups():\n groups = [\"iDRAC-Administrators\", \"iDRAC-Operators\", \"iDRAC-Readonly\"]\n group_priviledges = [\"0x000001ff\", \"0x000000f9\", \"0x00000001\"]\n for host in online_hosts:\n for index in [1,2,3]:\n print index,\" \", groups[index-1]\n with settings(warn_only=True):\n\n result1 = sudo(\"racadm5 -r \"+host+\" -u root -p \"+PASSWORD+\" config -g cfgStandardSchema -i \"+str(index) +\" -o cfgSSADRoleGroupName \"+groups[index-1])\n if result1.failed:\n logging.error(\"Host: [ \"+host+\" ] : \" + \"Configuration for RoleGroupName failed \")\n\n result2 = sudo(\"racadm5 -r \"+host+\" -u root -p \"+PASSWORD+\" config -g cfgStandardSchema -i \"+str(index) +\" -o cfgSSADRoleGroupDomain corp.inmobi.com\")\n if result2.failed:\n logging.error(\"Host: [ \"+host+\" ] : \" + \"Configuration for RoleGroupDomain failed \")\n\n result3 = sudo(\"racadm5 -r \"+host+\" -u root -p \"+PASSWORD+\" config -g cfgStandardSchema -i \"+str(index) +\" -o cfgSSADRoleGroupPrivilege \"+ group_priviledges[index-1])\n if result3.failed:\n logging.error(\"Host: [ \"+host+\" ] : \" + \"Configuration for RoleGroupPriviledge failed \")", "def create_groups(**kwargs):\n for gname in SEC_GROUP_NAMES.itervalues():\n Group.objects.get_or_create(name=gname)", "def generate_groups(ctx):\n asyncio.run(generate_groups_impl(ctx.obj[\"config\"]))", "def create_groups(self, role):\n security_group_names = self._get_all_group_names()\n\n cluster_group_name = self.get_cluster_group_name()\n if not cluster_group_name in security_group_names:\n self.ec2Connection.create_security_group(cluster_group_name, \"Hadoop cluster (%s)\" % (self.name))\n self.ec2Connection.authorize_security_group(cluster_group_name, cluster_group_name)\n # Allow SSH from anywhere\n self.ec2Connection.authorize_security_group(cluster_group_name, ip_protocol=\"tcp\", from_port=22, to_port=22, cidr_ip=\"0.0.0.0/0\")\n\n role_group_name = self.group_name_for_role(role)\n if not role_group_name in security_group_names:\n self.ec2Connection.create_security_group(role_group_name, \"Hadoop %s (%s)\" % (role, self.name))", "def handle_region(self, region, args):\n result = [CHECKMARK, str(region), \"created security group '{}'\".format(GROUP_NAME)]\n\n try:\n # Create the security group\n response = region.conn.create_security_group(\n Description='Security group for Alia replicas and clients.',\n GroupName=GROUP_NAME,\n )\n\n # Get the newly created group id\n group_id = response[\"GroupId\"]\n\n # Allow all network traffic from within the security group\n response = region.conn.authorize_security_group_ingress(\n GroupId = group_id,\n IpPermissions = [\n {\n \"IpProtocol\": \"tcp\", \"FromPort\": 0, \"ToPort\": 65535,\n \"UserIdGroupPairs\": [\n {\n \"GroupId\": group_id,\n \"Description\": \"allow all traffic from the same group\",\n }\n ]\n }\n ]\n )\n\n # Open Alia-specific ports for access\n reponse = region.conn.authorize_security_group_ingress(\n GroupId = group_id,\n IpPermissions = [\n {\n \"IpProtocol\": \"tcp\", \"FromPort\": 22, \"ToPort\": 22,\n \"IpRanges\": [\n {\n \"CidrIp\": \"0.0.0.0/0\",\n \"Description\": \"allow remote SSH access\"\n }\n ]\n },\n {\n \"IpProtocol\": \"tcp\", \"FromPort\": 3264, \"ToPort\": 3285,\n \"IpRanges\": [\n {\n \"CidrIp\": \"0.0.0.0/0\",\n \"Description\": \"external Alia service access\",\n }\n ],\n \"Ipv6Ranges\": [\n {\n \"CidrIpv6\": \"::/0\",\n \"Description\": \"external Alia service IPv6 access\"\n }\n ]\n },\n {\n \"IpProtocol\": \"tcp\", \"FromPort\": 5356, \"ToPort\": 5356,\n \"IpRanges\": [\n {\n \"CidrIp\": \"0.0.0.0/0\",\n \"Description\": \"research services access\"\n }\n ]\n },\n {\n \"IpProtocol\": \"tcp\", \"FromPort\": 4157, \"ToPort\": 4157,\n \"IpRanges\": [\n {\n \"CidrIp\": \"0.0.0.0/0\",\n \"Description\": \"master services access\",\n }\n ]\n },\n ]\n )\n\n\n except Exception as e:\n result[0] = CROSSMARK\n result[2] = str(e)\n\n\n return result", "def ensure_security_groups_created(vpc, environment):\n conglomerate_name = environment + '-conglomerate'\n load_balancer_name = environment + '-load-balancer'\n\n existing = vpc.security_groups.filter(Filters=[\n { 'Name': 'group-name', 'Values': [ conglomerate_name, load_balancer_name ] }\n ])\n ret = {}\n for security_group in existing:\n if security_group.group_name == conglomerate_name:\n ret['conglomerate'] = security_group\n elif security_group.group_name == load_balancer_name:\n ret['load-balancer'] = security_group\n else:\n raise Exception(\"Unexpected security group name: \" + security_group.group_name)\n\n if not ret['conglomerate']:\n # untested\n ret['conglomerate'] = vpc.create_security_group(\n GroupName=conglomerate_name,\n Description=conglomerate_name\n )\n if not ret['load-balancer']:\n # untested\n ret['load-balancer'] = vpc.create_security_group(\n GroupName=load_balancer_name,\n Description=load_balancer_name\n )\n\n try:\n ret['conglomerate'].authorize_ingress(IpPermissions=[\n { 'IpProtocol': 'icmp', 'FromPort': 0, 'ToPort': 255, 'IpRanges': [ { 'CidrIp': '0.0.0.0/0' } ] },\n { 'IpProtocol': 'tcp', 'FromPort': 9000, 'ToPort': 9000, 'UserIdGroupPairs': [ { 'GroupId': ret['load-balancer'].id } ] },\n ])\n except botocore.exceptions.ClientError as e:\n if e.response['Error']['Code'] != 'InvalidPermission.Duplicate':\n raise e\n\n try:\n ret['load-balancer'].authorize_ingress(IpPermissions=[\n { 'IpProtocol': 'tcp', 'FromPort': 80, 'ToPort': 80 },\n { 'IpProtocol': 'tcp', 'FromPort': 443, 'ToPort': 443 },\n { 'IpProtocol': 'icmp', 'FromPort': 0, 'ToPort': 255, 'IpRanges': [ { 'CidrIp': '0.0.0.0/0' } ] },\n { 'IpProtocol': 'tcp', 'FromPort': 1024, 'ToPort': 65535, 'IpRanges': [ { 'CidrIp': Constants['VpcCidr'] } ] },\n ])\n except botocore.exceptions.ClientError as e:\n if e.response['Error']['Code'] != 'InvalidPermission.Duplicate':\n raise e\n\n return ret", "def add_security_groups(port, context):\n dbcontext = context._plugin_context\n groups = [context._plugin.get_security_group(dbcontext, sg)\n for sg in port['security_groups']]\n port['security_groups'] = groups", "def _set_security_group(client, instance_id_list, security_groups):\n logging.info('Setting the security group of instances.')\n for instance_id in instance_id_list:\n client.modify_instance_attribute(InstanceId=instance_id, Groups=security_groups)", "def dvs_port_security_group(self):\n self.show_step(1)\n self.env.revert_snapshot(\"dvs_vcenter_systest_setup\")\n\n cluster_id = self.fuel_web.get_last_created_cluster()\n\n os_ip = self.fuel_web.get_public_vip(cluster_id)\n os_conn = os_actions.OpenStackActions(\n os_ip, SERVTEST_USERNAME,\n SERVTEST_PASSWORD,\n SERVTEST_TENANT)\n\n default_net = os_conn.nova.networks.find(label=self.inter_net_name)\n\n # Create security group with rules for ssh and ping\n security_group = os_conn.create_sec_group_for_ssh()\n\n self.show_step(2)\n instances = openstack.create_instances(\n os_conn=os_conn,\n nics=[{'net-id': default_net.id}],\n security_groups=[security_group.name])\n openstack.verify_instance_state(os_conn)\n\n self.show_step(3)\n access_point, access_point_ip = openstack.create_access_point(\n os_conn=os_conn,\n nics=[{'net-id': default_net.id}],\n security_groups=[security_group.name])\n\n ips = [os_conn.get_nova_instance_ip(i, net_name=self.inter_net_name)\n for i in instances]\n ip_pair = dict.fromkeys([access_point_ip])\n for key in ip_pair:\n ip_pair[key] = ips\n openstack.check_connection_vms(ip_pair)\n\n self.show_step(4)\n ips = []\n for instance in instances:\n port = os_conn.neutron.create_port({\n \"port\": {\n \"network_id\": default_net.id,\n \"device_id\": instance.id\n }})['port']\n ips.append(port['fixed_ips'][0]['ip_address'])\n\n self.show_step(5)\n for key in ip_pair:\n ip_pair[key] = ips\n openstack.check_connection_vms(ip_pair, result_of_command=1)", "def test_aws_service_api_security_groups_get(self):\n pass", "def load_security_groups(self):\n url = self.lookup(\"security_groups_url\")\n groups = self._fetcher.get_entities(url)\n if groups is None:\n return\n\n group_names = [group['name']\n for group in groups if group['running_default'] is False]\n # at this point the group_names contain all the running groups in addition\n # to the groups assigned to this space.\n # That's why we need to remove the duplicates\n group_names = list(set(group_names))\n\n for name in group_names:\n self._security_groups.append({'name': name})", "def create_initial_groups():\n \n from base import get_group_database, get_user_database\n import api\n \n # we want any groups we create in here to be active immediately\n save_min_sponsors = Group._min_sponsors\n Group._min_sponsors = 1\n \n user_db = get_user_database()\n group_db = get_group_database()\n \n user_admin = user_db['admin']\n \n def create_group(user_id, name, desc, owner, parent_id, join_pol, memb_vis, memb_edit=''):\n if not group_db.has_key(user_id):\n g = group_db.create_group(user_id=user_id,\n name=name,\n description=desc,\n owner=owner,\n no_pay=True)\n group_db.force_accept(g)\n if parent_id:\n group_db.join_group(g, group_db[parent_id], force=1)\n \n g = group_db[user_id]\n if join_pol:\n api.group_set_join_policy(user_admin, g, join_pol)\n if join_pol == 'open':\n # if membership is open, allow non-members to read\n api.group_set_other_perms(user_admin, g, 'ro')\n if memb_vis:\n api.group_set_membership_visible(user_admin, g, memb_vis)\n if desc:\n api.group_set_settings(user_admin, g, description=desc)\n if memb_edit:\n api.group_set_member_edit(user_admin, g, memb_edit)\n \n # set date of formation\n create = datetime(2004, 05, 10, 12, 0, 0)\n g.date = create\n \n \n groups = [\n ('top', 'Top', 'This group contains the top-level groups.', user_admin, None, '', 'open', ''),\n ('regional', 'Regional', 'Contains groups with a regional focus.', user_admin, 'top', '', 'open', ''),\n ('orgs', 'Organizations', 'Contains categories of organizations.', user_admin, 'top', '', 'open', ''),\n ('community', 'Community', 'Contains groups that are focused or based on ned.com.', user_admin, 'top', '', 'open', ''),\n ('issues', 'Issues', 'Contains groups focused on particular issues.', user_admin, 'top', '', 'open', ''),\n ('general', 'General', 'Contains groups that don\\'t belong in other categories.', user_admin, 'top', 'open', 'open', ''),\n ('general-other', 'General', 'Contains groups that don\\'t belong in other categories.', user_admin, 'general', 'open', 'open', ''),\n ('help', 'Help', 'Contains site help.', user_admin, 'community', '', 'open', ''),\n ('community-general', 'Community - General',\n '', user_admin, 'community', 'open', 'open', 'member'),\n ('suggestions', 'Suggestions', 'For community suggestions.', user_admin, 'community-general', '', 'open', ''),\n ('public', 'Public sector',\n 'Groups operating in the public sector should join this group.', user_admin, 'orgs', 'open', 'open', 'member'),\n ('private', 'Private sector',\n 'Groups operating in the private sector should join this group.', user_admin, 'orgs', 'open', 'open', 'member'),\n ('social', 'Social sector',\n 'Groups operating in the social sector should join this group.', user_admin, 'orgs', 'open', 'open', 'member'),\n ('orgs-general', 'Organizations - General',\n \"For organizations that don't fit in other categories.\", user_admin, 'orgs', 'open', 'open', 'member'),\n ('issues-business', 'Business',\n '', user_admin, 'issues', 'open', 'open', 'member'),\n ('issues-cyf', 'Children - Youth - Families',\n '', user_admin, 'issues', 'open', 'open', 'member'),\n ('issues-education', 'Education',\n '', user_admin, 'issues', 'open', 'open', 'member'),\n ('issues-env', 'Environment - Conservation',\n '', user_admin, 'issues', 'open', 'open', 'member'),\n ('issues-health', 'Health Care',\n '', user_admin, 'issues', 'open', 'open', 'member'),\n ('issues-pol', 'Policy - Politics',\n '', user_admin, 'issues', 'open', 'open', 'member'),\n ('issues-religion', 'Religion',\n '', user_admin, 'issues', 'open', 'open', 'member'),\n ('issues-soc', 'Social Justice - Human Services',\n '', user_admin, 'issues', 'open', 'open', 'member'),\n ('issues-tech', 'Technology',\n '', user_admin, 'issues', 'open', 'open', 'member'),\n ('issues-general', 'Issues - General',\n '', user_admin, 'issues', 'open', 'open', 'member'),\n ('ned', '<ned> Network',\n '', user_admin, '', '', '', ''),\n ('ned-internal', 'Ned - Internal',\n '', user_admin, '', '', '', ''),\n ('sitedev', 'Site Development',\n '', user_admin, 'ned-internal', '', '', ''),\n ]\n \n for user_id, name, desc, owner, parent_id, join_pol, memb_vis, memb_edit in groups:\n create_group(user_id, name, desc, owner, parent_id, join_pol, memb_vis, memb_edit)\n \n # Help group\n g_help = group_db['help']\n api.group_set_anon_read(user_admin, g_help, True)\n \n # ON groups\n g_on = group_db['ned']\n group_db.join_group(g_on, group_db['private'], force=1)\n group_db.join_group(g_on, group_db['public'], force=1)\n group_db.join_group(g_on, group_db['social'], force=1)\n api.group_set_owners_by_user_id(user_admin, g_on, ['admin', 'jimc'])\n api.group_set_join_policy(user_admin, g_on, 'owner')\n api.group_set_invite_policy(user_admin, g_on, 'owner')\n api.group_set_membership_visible(user_admin, g_on, 'open')\n api.group_set_member_edit(user_admin, g_on, True)\n api.group_set_anon_read(user_admin, g_on, True)\n \n g_on_int = group_db['ned-internal']\n api.group_set_owners_by_user_id(user_admin, g_on_int, ['admin', 'jimc'])\n api.group_set_join_policy(user_admin, g_on_int, 'owner')\n api.group_set_invite_policy(user_admin, g_on_int, 'owner')\n api.group_set_membership_visible(user_admin, g_on_int, 'member')\n api.group_set_member_edit(user_admin, g_on_int, True)\n api.group_set_anon_read(user_admin, g_on_int, False)\n \n g_sitedev = group_db['sitedev']\n api.group_set_owners_by_user_id(user_admin, g_sitedev, ['admin', 'jimc'])\n \n Group._min_sponsors = save_min_sponsors", "def request_access_to_groups(self, ceph):\n for ceph_group in (\"volumes\", \"images\", \"vms\"):\n ceph.request_access_to_group(\n name=ceph_group,\n object_prefix_permissions={\"class-read\": [\"rbd_children\"]},\n permission=\"rwx\",\n )", "def list_secgroups(self, name=None):", "def init_valet_groups(self):\n\n for rk, r in self.stack.items():\n properties = r.get(\"properties\", {})\n metadata = properties.get(\"metadata\", {})\n\n if len(metadata) > 0:\n valet_rules = metadata.get(\"valet_groups\", None)\n\n if valet_rules is not None and valet_rules != \"\":\n rule_list = []\n if isinstance(valet_rules, six.string_types):\n rules = valet_rules.split(\",\")\n for gr in rules:\n rule_list.append(gr.strip())\n else:\n self.status = \"incorrect valet group metadata format\"\n self.logger.error(self.status)\n return\n\n # Check rule validation of valet_groups.\n self.status = self.resource.check_valid_rules(self.tenant_id,\n rule_list,\n use_ex=self.use_dha)\n if self.status != \"ok\":\n self.logger.error(self.status)\n return\n\n self.status = self._make_valet_groups(properties.get(\"name\"),\n properties[\"availability_zone\"][0],\n rule_list)\n if self.status != \"ok\":\n self.logger.error(self.status)\n return\n\n # Check and create server groups if they do not exist.\n scheduler_hints = properties.get(\"scheduler_hints\", {})\n if len(scheduler_hints) > 0:\n for hint_key in scheduler_hints.keys():\n if hint_key == \"group\":\n hint = scheduler_hints[hint_key]\n self.status = self._make_group(properties.get(\"name\"), hint)\n if self.status != \"ok\":\n self.logger.error(self.status)\n return", "def create_security_group(group_name):\n ec2 = boto.ec2.connect_to_region(AWS_REGION)\n for g in ec2.get_all_security_groups():\n if g.name == group_name:\n return # We already have this group setup\n group = ec2.create_security_group(group_name,\n \"%s SSH access group\" % group_name)\n group.authorize(\"tcp\", 22, 22, \"0.0.0.0/0\") # SSH is on port 22, all IPs\n group.authorize(\"tcp\", 80, 80, \"0.0.0.0/0\")\n group.authorize(\"tcp\", 61000, 65000, \"0.0.0.0/0\")\n print \"Created new security group\"", "def _generate_ec2_instance_and_sg(resource):\n for instance in resource.instances.all():\n for security_group in instance.security_groups:\n yield instance, security_group", "def get_groups():\n\n groups = [\"shelter\", \"sharing\", \"unsheltered\", \"motel\"]\n\n for item in groups:\n group = Group(group_name=item)\n\n db.session.add(group)\n\n db.session.commit()", "def sg_lookup_all(session, vpc_id):\n if session is None:\n return NoneDict()\n\n client = session.client('ec2')\n response = client.describe_security_groups(Filters=[{\"Name\": \"vpc-id\", \"Values\": [vpc_id]}])\n\n if len(response['SecurityGroups']) == 0:\n return NoneDict()\n else:\n sgs = NoneDict()\n for sg in response['SecurityGroups']:\n key = _find(sg.get('Tags', []), lambda x: x[\"Key\"] == \"Name\")\n if key:\n key = key['Value']\n sgs[key] = sg['GroupId']\n\n return sgs", "def _generate_rds_instances_and_sg(resource, session):\n for db_instance in resource.describe_db_instances()[\"DBInstances\"]:\n for security_group in db_instance[\"VpcSecurityGroups\"]:\n yield db_instance, security_group, _get_sg_name(security_group[\"VpcSecurityGroupId\"], session)", "def add_users_to_groups(output=True):\n\n for group in DEFAULT_GROUPS:\n user = User.objects.get(username=group)\n role_title = Group.objects.get(name=group)\n user.groups.add(role_title)", "def setup(self):\n base = automap_base()\n engine = create_engine(\"mysql+pymysql://\" + csconfig.config.db_user + \":\" +\n csconfig.config.db_password + \"@\" +\n csconfig.config.db_host + \":\" +\n str(csconfig.config.db_port) +\n \"/\" + csconfig.config.db_name)\n base.prepare(engine, reflect=True)\n session = Session(engine)\n cloud_yaml = base.classes.csv2_group_resource_yaml\n\n for cloud in self.group_resources:\n cloud_yamls = session.query(cloud_yaml).\\\n filter(cloud_yaml.group_name == self.name,\n cloud_yaml.cloud_name == cloud.cloud_name)\n cloud_yaml_list = []\n for yam in cloud_yamls:\n cloud_yaml_list.append([yam.yaml_name, yam.yaml, yam.mime_type])\n if cloud.cloud_type == 'localhost':\n newcloud = cloudscheduler.localhostcloud.LocalHostCloud(extrayaml=cloud_yaml_list, resource=cloud)\n else:\n newcloud = cloudscheduler.openstackcloud.\\\n OpenStackCloud(extrayaml=cloud_yaml_list, resource=cloud)\n self.clouds[newcloud.name] = newcloud\n self.log.debug(\"Added all clouds for group: %s\", self.name)", "def create_sec_group(ec2, sec_group_name):\n sec = ec2.create_security_group(sec_group_name, 'Jvivian Boto SecGroup')\n port = 22\n sec.authorize('tcp', port, port, '0.0.0.0/0')", "def create(self, body: CloudSecurityGroup) -> Dict:\n\t\treturn self._post(route=AWSSecurityGroupConsts.CLOUD_SECURITY_GROUP.value, body=body)", "def test_create_resource_group(self):\n pass", "def create(self, name, desc, tenant_id):\n data = {\"security_group\": {\"name\": name, \n \"description\": desc, \n \"tenant_id\":tenant_id}}\n\n path = '%s/security-groups' % self.ver\n res = self.client.call(path, 'POST', data=json.dumps(data), \n token=self.manager.identity.token)\n self.logger.debug('Create openstack security group: %s' % truncate(res))\n return res[0]['security_group']", "def grant_grp_access ( ec2_conn, incoming_grps, tgt_grp, port, protocol = 'tcp' ) :\n for grp in incoming_grps :\n if not does_grp_rule_exist( tgt_grp, grp, port, port, protocol ) :\n tgt_grp.authorize( ip_protocol = protocol,\n from_port = port,\n to_port = port,\n src_group = grp )\n ec2_conn.authorize_security_group_egress( group_id = grp.id,\n ip_protocol = protocol,\n from_port = port,\n to_port = port,\n src_group_id = tgt_grp.id )", "def add_all_regions():\n gene_id = request.json['gene_id']\n panel_id = request.json['panel_id']\n tx_id = request.json['tx_id']\n gene_name = request.json['gene_name']\n project_id = get_project_id_by_panel_id(s, panel_id)\n\n add_preftxs_to_panel(s, project_id, [{\"gene\": gene_name, \"tx_id\": tx_id}, ])\n add_genes_to_panel_with_ext(s, panel_id, gene_id)\n return jsonify({\"genes\": [gene_id, ]})", "def create_endpoints_new_network():\n\n for i in range(NUM_OF_SHARDS):\n key_explorer = \"explorers_\" + str(i)\n array_instance_ip = parse_network_config(key_explorer)\n array_instance_id = retrieve_instance_id(array_instance_ip)\n\n # 0/ - detect region of explorers\n reg = retrieve_instance_region(array_instance_ip[0])\n # all nodes registered for the same endpoints should be located in the same region, if not, gracefully exit\n # verify_nodes_same_region(reg, array_instance_ip)\n\n print(\"\\n######################################### Creating complete pipeline for shard\", str(i),\n \"in AWS region: \", reg, \"#########################################\\n\")\n # 1/ - request certificates\n print(\"\\n==== step 1: request SSL certificates, CertificateArn will be stored into dict_region_sslcerts \\n\")\n domain_name = ''.join(['api.s', str(i), \".\", BASE_DOMAIN_NAME])\n dict_existing_certs = get_existing_certs(reg, domain_name)\n dict_region_sslcerts.clear()\n if dict_existing_certs[domain_name]:\n print(\"[INFO] SSL certificate of\", domain_name, \"exists, skipping..\")\n dict_region_sslcerts[reg].append(dict_existing_certs[domain_name][0])\n else:\n print(\"[INFO] SSL certificate of\", domain_name, \"does NOT exist, requesting..\")\n request_ssl_certificates(reg, domain_name)\n\n print(\"[RESULT] OF STEP 1\")\n pp.pprint(dict_region_sslcerts)\n\n # 2/ - create target group\n dict_region_tgarn.clear()\n array_tgs = create_name_target_group(i, ID_DOMAIN_NAME)\n pp.pprint(array_tgs)\n create_target_group(reg, array_tgs)\n print(\"[RESULT] OF STEP 2\")\n pp.pprint(dict_region_tgarn)\n\n # 3/ - create elb\n dict_region_elb2arn.clear()\n elb2_name = ''.join('s' + str(i) + '-' + ID_DOMAIN_NAME + '-' + reg)\n array_dns_hostedzone = create_elb2(reg, elb2_name)\n print(\"[RESULT] OF STEP 3\")\n pp.pprint(dict_region_elb2arn)\n\n # 4/ - create listener\n dict_region_ListenerArn.clear()\n create_listener(reg, dict_region_elb2arn, dict_region_sslcerts, dict_region_tgarn)\n print(\"[RESULT] OF STEP 4\")\n pp.pprint(dict_region_ListenerArn)\n\n # 5/ - create one more rule for the current listener\n host_header_value = ''.join('ws.s' + str(i) + '.' + BASE_DOMAIN_NAME)\n create_rule(reg, dict_region_ListenerArn, dict_region_tgarn, dict_region_elb2arn, host_header_value)\n\n # 6/ - register explorer instances into the target group\n register_explorers(reg, array_instance_id, dict_region_tgarn)\n\n # 7/ - create entries on Route 53\n array_record_set = create_name_record_set(i, BASE_DOMAIN_NAME)\n create_dns_entries(HOSTED_ZONE_ID, array_record_set, array_dns_hostedzone)", "def create_groups(groups):\n for group_name in groups:\n try:\n Group.objects.get_or_create(name=group_name)\n except Exception as e:\n raise CouldNotCreateGroup(group_name, e)", "def test_aws_service_api_vm_security_group_put(self):\n pass", "def load_species_groups():\n\n print(\"Species groups\")\n\n SpeciesGroup.query.delete()\n\n with open(\"seed_data/species_group_seed.psv\") as species:\n for row in species:\n species_group_id, species_group_name = row.strip().split(\"|\")\n\n group = SpeciesGroup(species_group_id = species_group_id,\n species_group = species_group_name)\n\n db.session.add(group)\n\n db.session.commit()", "def pre_security_group_create(self, resource_dict):\n pass", "def create_infrastructure(aws_key, aws_secret):\n ec2_client, s3_client, iam_client, redshift_client = create_clients(\n aws_key, aws_secret\n )\n role_arn = create_iam_role(iam_client)\n create_redshift_cluster(redshift_client, role_arn)\n # Loop until the cluster status becomes \"Available\"\n status = \"\"\n while status.lower() != \"available\":\n cluster_properties = get_cluster_properties(redshift_client)\n status = cluster_properties['ClusterStatus']\n print('Cluster status is %s' % status)\n time.sleep(30)\n set_vpc_properties(ec2_client, cluster_properties['VpcId'])\n print_cluster_properties(redshift_client)", "def test_system_group_create(audreyvars, tunnel_requested, system_groups):\n server = audreyvars[\"KATELLO_HOST\"]\n login = audreyvars.get(\"KATELLO_USER\", \"admin\")\n org = audreyvars.get(\"KATELLO_ORG\", \"redhat\")\n password = audreyvars.get(\"KATELLO_PASS\", \"admin\")\n\n # If using a tunnel to access ec2, an alternative port is needed\n if tunnel_requested:\n port = audreyvars.get(\"SSH_TUNNEL_KATELLO_PORT\", 1443)\n else:\n port = audreyvars.get(\"KATELLO_PORT\", 443)\n\n # Query existing system groups\n current_group_names = [g.get('name') for g in common.katello.system_group_query(server, port, org, login, password)]\n\n # Determine whether groups were created\n new_group_ids = []\n for group_name in system_groups:\n if group_name not in current_group_names:\n result_dict = common.katello.system_group_create(server, port, org, login, password, group_name)\n new_group_ids.append(result_dict.get('id'))\n\n if len(new_group_ids) == 0:\n pytest.skip(msg=\"System groups already exist, no groups created\")", "def create_acl(self, context, sg):\n self.security_group_driver.create_acl(context, sg)", "def crea_grupo(self):\r\n \r\n self.comprueba_casos_seleccionados()", "def process_security_group ( ec2_conn, vpc, base_name, params, secgrp_type = None, secgrp_description = None ) :\n if not secgrp_type :\n secgrp_type = params[ 'type' ]\n if not secgrp_description :\n secgrp_description = params[ 'description' ]\n\n secgrp = find_group( ec2_conn, base_name, secgrp_type )\n if not secgrp :\n if params.get( 'create', 'NO' ) == 'YES' :\n secgrp_name = get_secgrp_name( base_name, secgrp_type )\n print \"Creating security group with name: \" + secgrp_name\n secgrp = create_secgrp( ec2_conn, vpc, secgrp_name, secgrp_description )\n else :\n print \"ERROR: Could not find group with name \" + get_secgrp_name( base_name, secgrp_type )\n sys.exit( 1 )\n\n print \"Prepping rules for security group \" + secgrp.name\n remove_all_rules( ec2_conn, [ secgrp ], True, base_name )\n\n # Reload group to retrieve new object with no rules.\n secgrp = find_group( ec2_conn, base_name, secgrp_type )\n\n is_public = params.get( 'public' ) == 'YES'\n if is_public :\n nat_secgrp = None\n if params.get( 'os-update' ) == 'YES' :\n ec2_conn.authorize_security_group_egress( group_id = secgrp.id,\n ip_protocol = \"tcp\",\n from_port = 80,\n to_port = 80,\n cidr_ip = all_ip_cidr )\n if params.get( 'public-tcp-ports' ) :\n public_ports = params[ 'public-tcp-ports' ]\n for port in public_ports :\n secgrp.authorize( ip_protocol = \"tcp\",\n from_port = port,\n to_port = port,\n cidr_ip = all_ip_cidr )\n\n if params.get( 'incoming-cidr-rules' ) :\n for incoming_rule in params[ 'incoming-cidr-rules' ] :\n start_port = incoming_rule.get( 'port' )\n end_port = start_port\n\n protocol = get_secgrp_protocol_param( incoming_rule )\n cidr_list = get_cidr_param( incoming_rule[ 'cidr' ] )\n\n secgrp.authorize( ip_protocol = protocol,\n from_port = start_port,\n to_port = end_port,\n cidr_ip = cidr_list )\n\n else :\n nat_secgrp = find_group( ec2_conn, base_name, 'NAT' )\n # Grant NAT access to login to the machine\n if not nat_secgrp :\n print \"ERROR: Could not find NAT security group!\"\n sys.exit( 1 )\n grant_ssh_access( ec2_conn, [ secgrp ], nat_secgrp )\n if params.get( 'os-update' ) == 'YES' :\n grant_cidr_access( ec2_conn, all_ip_cidr, [ secgrp ], 80, nat_secgrp )\n # Need to reload secgrp so it contains latest rules\n secgrp = find_group( ec2_conn, base_name, secgrp_type )\n # Need to reload NAT secgrp so it contains latest rules\n nat_secgrp = find_group( ec2_conn, base_name, 'NAT' )\n\n if params.get( 'outgoing-cidr-rules' ) :\n for outgoing_rule in params[ 'outgoing-cidr-rules' ] :\n start_port = outgoing_rule.get( 'port' )\n end_port = start_port\n protocol = get_secgrp_protocol_param( outgoing_rule )\n cidr_list = get_cidr_param( outgoing_rule[ 'cidr' ] )\n\n for cidr in cidr_list :\n grant_cidr_access( ec2_conn, cidr, [ secgrp ], start_port, nat_secgrp, protocol )\n\n if params.get( 'outgoing-group-rules' ) :\n for outgoing_rule in params[ 'outgoing-group-rules' ] :\n start_port = outgoing_rule.get( 'port' )\n end_port = start_port\n protocol = get_secgrp_protocol_param( outgoing_rule )\n target_secgrp_type = outgoing_rule[ 'group-type' ]\n target_secgrp = find_group( ec2_conn, base_name, target_secgrp_type )\n grant_grp_access( ec2_conn, [ secgrp ], target_secgrp, start_port, protocol )\n \n if params.get( 'incoming-group-rules' ) :\n for incoming_rule in params[ 'incoming-group-rules' ] :\n start_port = incoming_rule.get( 'port' )\n end_port = start_port\n protocol = get_secgrp_protocol_param( incoming_rule )\n incoming_secgrp_type = incoming_rule[ 'group-type' ]\n incoming_secgrp = find_group( ec2_conn, base_name, incoming_secgrp_type )\n grant_grp_access( ec2_conn, [ incoming_secgrp ], secgrp, start_port, protocol )\n\n if params.get( 'self-rules' ) :\n for self_rule in params[ 'self-rules' ] :\n start_port = self_rule.get( 'port' )\n end_port = start_port\n\n if not start_port :\n start_port = self_rule[ 'port-range' ][ 'start' ]\n end_port = self_rule[ 'port-range' ][ 'end' ]\n\n protocol = get_secgrp_protocol_param( self_rule )\n\n grant_grp_self_access( ec2_conn, secgrp, start_port, end_port, protocol )\n\n # Reload the security group so it contains all the latest rules\n secgrp = find_group( ec2_conn, base_name, secgrp_type )\n return ( secgrp_type, secgrp )", "def available_groups(cls):\n raise NotImplementedError", "def post_security_group_create(self, resource_dict):\n pass", "def load_all_groups(self):\n for _, group in self.scopes.items():\n group.update()", "def add_hosts_all_subnets(client, parsed_args):\n for host in client.hosts():\n if host.category == \"linux\":\n if host.server_id:\n EXISTING.append(host)\n if host.server_id is None:\n create_agentless(host, client, parsed_args)", "def _sync_all(cursor):\n _print_info('Syncing all privileges.')\n\n all_namespace_permissions = _fetch_all_namespace_permissions(cursor)\n\n for namespace_permission in all_namespace_permissions:\n namespace = namespace_permission['namespace']\n users = namespace_permission['users']\n\n _print_info('Working on namespace: \\'{}\\''.format(namespace))\n for user in users:\n _grant_select_privilege(cursor, user, namespace)", "def create_security_group(self, body=None):\r\n return self.post(self.security_groups_path, body=body)", "def createSG(ec2,name,rules):\n\t# check if the security group exists\n\tgroup = None\n\tsgGroups = [sg for sg in ec2.get_all_security_groups() if sg.name == name]\n\tif sgGroups:\n\t\tgroup = sgGroups[0]\n\t\tec2.delete_security_group(name=name, group_id=group)\t\n\tprint \"Creating %s Security Group\" % name\n\tgroup = ec2.create_security_group(name, 'group for %s' % name)\n\tif group:\n\t\t# Set the inbound rules\n\t\tfor rule in rules:\n\t\t\tif rule.src_group_name:\n\t\t\t\tgroup.authorize(ip_protocol=rule.ip_protocol,from_port=rule.from_port,to_port=rule.to_port,cidr_ip=rule.cidr_ip,src_group=group)\n\t\t\telse:\n\t\t\t\tgroup.authorize(ip_protocol=rule.ip_protocol,from_port=rule.from_port,to_port=rule.to_port,cidr_ip=rule.cidr_ip,src_group=None)\n\t\treturn True\n\telse:\n\t\tlogError('Error during '+name+' Security Group update')\n\t\treturn False", "def copy_to_region(self, region, name=None):\r\n if region.name == self.region:\r\n raise BotoClientError('Unable to copy to the same Region')\r\n conn_params = self.connection.get_params()\r\n rconn = region.connect(**conn_params)\r\n sg = rconn.create_security_group(name or self.name, self.description)\r\n source_groups = []\r\n for rule in self.rules:\r\n grant = rule.grants[0]\r\n for grant in rule.grants:\r\n if grant.name:\r\n if grant.name not in source_groups:\r\n source_groups.append(grant.name)\r\n sg.authorize(None, None, None, None, grant)\r\n else:\r\n sg.authorize(rule.ip_protocol, rule.from_port, rule.to_port,\r\n grant.cidr_ip)\r\n return sg", "def setup_group_workspaces(context):\n if context.readDataFile(\"marker.txt\") is None:\n return\n\n portal = context.getSite()\n if \"groups\" not in portal.objectIds():\n\n groups = portal[\n portal.invokeFactory(\"Folder\",id=\"groups\")]\n\n # set default properties\n groups.setTitle(\"groups\")\n groups.setDescription(\"Group workspaces container.\")\n groups._getWorkflowTool().doActionFor(groups, \"publish\" \"\")\n groups.setExcludeFromNav(True)\n groups.update() \n logger.info(\"Groups container created.\")", "def create_default_groups():\n from flaskbb.fixtures.groups import fixture\n result = []\n for key, value in fixture.items():\n group = Group(name=key)\n\n for k, v in value.items():\n setattr(group, k, v)\n\n group.save()\n result.append(group)\n return result", "def dvs_instances_one_group(self):\n self.show_step(1)\n self.env.revert_snapshot(\"dvs_vcenter_systest_setup\")\n\n cluster_id = self.fuel_web.get_last_created_cluster()\n\n os_ip = self.fuel_web.get_public_vip(cluster_id)\n os_conn = os_actions.OpenStackActions(\n os_ip, SERVTEST_USERNAME,\n SERVTEST_PASSWORD,\n SERVTEST_TENANT)\n\n default_net = os_conn.nova.networks.find(label=self.inter_net_name)\n\n # Create security group with rules for ssh and ping\n security_group = os_conn.create_sec_group_for_ssh()\n\n # Get max count of instances which we can create according to\n # resource limit\n vm_count = min(\n [os_conn.nova.hypervisors.resource_class.to_dict(h)['vcpus']\n for h in os_conn.nova.hypervisors.list()])\n\n self.show_step(2)\n self.show_step(3)\n openstack.create_instances(os_conn=os_conn,\n nics=[{'net-id': default_net.id}],\n vm_count=vm_count,\n security_groups=[security_group.name])\n openstack.verify_instance_state(os_conn)\n\n self.show_step(4)\n srv_list = os_conn.nova.servers.list()\n fip = openstack.create_and_assign_floating_ips(os_conn, srv_list)\n ip_pair = dict.fromkeys(fip)\n for key in ip_pair:\n ip_pair[key] = [value for value in fip if key != value]\n openstack.check_connection_vms(ip_pair)\n\n self.show_step(5)\n for srv in srv_list:\n os_conn.nova.servers.delete(srv)\n logger.info(\"Check that instance was deleted.\")\n os_conn.verify_srv_deleted(srv)", "def test_08_security_group(self):\n # Validate the following:\n # 1. Create a project\n # 2. Assign some security groups to that project\n # 3. Verify the security groups can only be assigned to VM belonging\n # to that project.\n\n security_group = SecurityGroup.create(\n self.apiclient,\n self.services[\"security_group\"],\n projectid=self.project.id\n )\n self.debug(\"Created security group with ID: %s\" % security_group.id)\n # Default Security group should not have any ingress rule\n sercurity_groups = SecurityGroup.list(\n self.apiclient,\n projectid=self.project.id\n )\n self.assertEqual(\n isinstance(sercurity_groups, list),\n True,\n \"Check for list security groups response\"\n )\n\n self.assertNotEqual(\n len(sercurity_groups),\n 0,\n \"Check List Security groups response\"\n )\n # Authorize Security group to SSH to VM\n ingress_rule = security_group.authorize(\n self.apiclient,\n self.services[\"security_group\"],\n projectid=self.project.id\n )\n self.assertEqual(\n isinstance(ingress_rule, dict),\n True,\n \"Check ingress rule created properly\"\n )\n\n self.debug(\n \"Authorizing ingress rule for sec group ID: %s for ssh access\"\n % security_group.id)\n self.virtual_machine = VirtualMachine.create(\n self.apiclient,\n self.services[\"server\"],\n serviceofferingid=self.service_offering.id,\n securitygroupids=[security_group.id],\n projectid=self.project.id\n )\n self.debug(\"Deployed VM (ID: %s) in project: %s\" % (\n self.virtual_machine.id,\n self.project.id\n ))\n self.assertEqual(\n self.virtual_machine.state,\n 'Running',\n \"VM state should be running after deployment\"\n )\n # Deploy another VM with same security group outside the project\n self.debug(\n \"Deploying VM with security group: %s outside project:%s\" % (\n security_group.id,\n self.project.id\n ))\n with self.assertRaises(Exception):\n VirtualMachine.create(\n self.apiclient,\n self.services[\"server\"],\n serviceofferingid=self.service_offering.id,\n accountid=self.account.name,\n domainid=self.account.domainid,\n securitygroupids=[security_group.id],\n )\n return", "def get_all_in_region(self, cloud_account_id: str, region_id: str) -> List[Dict]:\n\t\tquery_parameters = {'cloudAccountId': cloud_account_id, 'regionId': region_id}\n\t\treturn self._get(route=AWSSecurityGroupConsts.CLOUD_SECURITY_GROUP.value, params=query_parameters)", "async def start_collect_securities():\n\n await collect_securities.cast()", "def dvs_vcenter_multiple_nics(self):\n self.show_step(1)\n self.env.revert_snapshot(\"dvs_vcenter_systest_setup\")\n cluster_id = self.fuel_web.get_last_created_cluster()\n\n os_ip = self.fuel_web.get_public_vip(cluster_id)\n os_conn = os_actions.OpenStackActions(\n os_ip, SERVTEST_USERNAME,\n SERVTEST_PASSWORD,\n SERVTEST_TENANT)\n\n tenant = os_conn.get_tenant(SERVTEST_TENANT)\n networks = []\n router = os_conn.get_router(os_conn.get_network(self.ext_net_name))\n\n self.show_step(2)\n self.show_step(3)\n for net in self.net_data:\n network = os_conn.create_network(network_name=net.keys()[0],\n tenant_id=tenant.id)['network']\n\n logger.info('Create subnet {}'.format(net.keys()[0]))\n subnet = os_conn.create_subnet(subnet_name=net.keys()[0],\n network_id=network['id'],\n cidr=net[net.keys()[0]],\n ip_version=4)\n\n # Check that network is created.\n assert_true(\n os_conn.get_network(network['name'])['id'] == network['id'])\n os_conn.add_router_interface(\n router_id=router[\"id\"],\n subnet_id=subnet[\"id\"])\n networks.append(network)\n\n nics = [{'net-id': network['id']} for network in networks]\n\n # Add rules for ssh and ping\n os_conn.goodbye_security()\n\n _s_groups = os_conn.neutron.list_security_groups()\n _srv_tenant_id = os_conn.get_tenant(SERVTEST_TENANT).id\n default_sg = [sg for sg in _s_groups['security_groups']\n if sg['tenant_id'] == _srv_tenant_id and\n sg['name'] == 'default'][0]\n\n self.show_step(4)\n instances = openstack.create_instances(\n os_conn=os_conn,\n nics=nics,\n security_groups=[default_sg['name']])\n openstack.verify_instance_state(os_conn)\n\n self.show_step(5)\n for instance in instances:\n for net in networks:\n assert_true(os_conn.get_nova_instance_ip(\n instance, net_name=net['name']) is not None)\n\n net_1_name = self.net_data[0].keys()[0]\n net_2_name = self.net_data[1].keys()[0]\n ips = {\n net_1_name: {'ips': [], 'access_point_ip': ''},\n net_2_name: {'ips': [], 'access_point_ip': ''}\n }\n\n for net in networks:\n ips[net['name']]['ips'] = map(\n (lambda x:\n os_conn.get_nova_instance_ip(x, net_name=net['name'])),\n instances)\n access_point, access_point_ip = openstack.create_access_point(\n os_conn=os_conn,\n nics=[{'net-id': net['id']}],\n security_groups=[default_sg['name']])\n ips[net['name']]['access_point_ip'] = access_point_ip\n\n logger.info(pretty_log(ips))\n\n self.show_step(6)\n cmds = [\"sudo /bin/ip link set up dev eth1\",\n \"sudo /sbin/cirros-dhcpc up eth1\"]\n access_point_ip = ips[net_1_name]['access_point_ip']\n for ip in ips[net_1_name]['ips']:\n openstack.remote_execute_command(access_point_ip, ip, cmds[0])\n openstack.remote_execute_command(access_point_ip, ip, cmds[1])\n\n self.show_step(7)\n for net in networks:\n inst_ips = ips[net['name']]['ips']\n access_point_ip = ips[net['name']]['access_point_ip']\n ip_pair = {ip: [v for v in inst_ips if v != ip] for ip in inst_ips}\n openstack.check_connection_through_host(access_point_ip,\n ip_pair,\n timeout=60 * 5,\n interval=10)", "def test_create_role_for_all_namespaces(self):\n pass", "def make_groups(self):\n for g in self.groups:\n self.add_group(groupname=g['groupname'],\n grouptitle=g['grouptitle'],\n path_to_group=g['path'])", "def list_autoscaling_group(region, filter_by_kwargs):\n conn = boto.ec2.autoscale.connect_to_region(region)\n groups = conn.get_all_groups()\n return lookup(groups, filter_by=filter_by_kwargs)", "def add_groups(self, resolvables):\n for g in [self._resolve_group(group) for group in resolvables]:\n self._client.group_memberships.create({\n 'account': self,\n 'group': g,\n })", "def get_all_groups(self):\n self.cursor.execute(\"select * from groups\")\n self.connection.commit()\n return self.cursor.fetchall()", "def test_get_resource_group_list(self):\n pass", "def security_group_create(auth=None, **kwargs):\n cloud = get_operator_cloud(auth)\n kwargs = _clean_kwargs(keep_name=True, **kwargs)\n return cloud.create_security_group(**kwargs)", "def insert_default_user_groups(apps, schema_editor):\n pass\n # print apps.app_configs.keys()\n # Group = apps.get_model('auth', 'Group')\n # Group.objects.create(name='Displayable Users')\n # Group.objects.create(name='Commissioners')", "async def generate_groups_impl(config):\n async with create_sessionmaker(config)() as dbsession:\n item_stmt = select(Item).filter(Item.group_id is None)\n count_stmt = select(func.count(Item.id)).filter(Item.group_id is None)\n count = await dbsession.execute(count_stmt)\n result = await dbsession.execute(item_stmt)\n categories = []\n with click.progressbar(\n result.scalars(), length=count.scalar_one(), label=\"Generating potential groups\"\n ) as progress:\n for item in progress:\n for category in item.attributes[\"_categories\"]:\n categories.append(category.lower())\n counts = [(cat, count) for cat, count in Counter(categories).most_common() if count >= 15] # noqa: PLR2004\n counts.sort(key=lambda c: c[1])\n max_groups = len(counts)\n with click.progressbar(length=max_groups, label=\"Generating groups\") as progress:\n while counts:\n category = counts[0][0]\n group_stmt = select(Group).filter(Group.value == category)\n result = await dbsession.execute(group_stmt)\n group = result.scalars().first()\n if group is None:\n group = Group(value=category, label=category[0].upper() + category[1:], split=\"basic\")\n dbsession.add(group)\n result = await dbsession.execute(item_stmt)\n for item in result.scalars():\n if category in item.attributes[\"_categories\"]:\n item.group = group\n await dbsession.commit()\n categories = []\n result = await dbsession.execute(item_stmt)\n for item in result.scalars():\n for category in item.attributes[\"_categories\"]:\n categories.append(category.lower())\n old_counts = len(counts)\n counts = [\n (cat, count) for cat, count in Counter(categories).most_common() if count >= 15 # noqa: PLR2004\n ]\n counts.sort(key=lambda c: c[1])\n progress.update(old_counts - len(counts))\n await dbsession.commit()", "def security_groups(self, oid):\n try:\n path = u'/servers/%s/os-security-groups' % oid\n res = self.client.call(path, u'GET', data=u'', \n token=self.manager.identity.token)\n self.logger.debug(u'Get openstack server security groups: %s' % truncate(res))\n return res[0][u'security_groups']\n except Exception as error:\n self.logger.error(error, exc_info=True)\n data = []\n return res", "def register_secgroup_event_handler(self):\n\n LOG.info(\"Security groups feature is enabled\")\n\n # NB security group rules cannot be updated, and security\n # groups themselves have no forwarder state in them, so we\n # don't need the update events\n\n # register pre-commit events if they're available\n if PRECOMMIT:\n # security group precommit events\n registry.subscribe(self.process_secgroup_commit,\n resources.SECURITY_GROUP,\n events.PRECOMMIT_CREATE)\n registry.subscribe(self.process_secgroup_commit,\n resources.SECURITY_GROUP,\n events.PRECOMMIT_DELETE)\n # security group rule precommit events\n registry.subscribe(self.process_secgroup_commit,\n resources.SECURITY_GROUP_RULE,\n events.PRECOMMIT_CREATE)\n registry.subscribe(self.process_secgroup_commit,\n resources.SECURITY_GROUP_RULE,\n events.PRECOMMIT_DELETE)\n\n # register post-commit events\n # security group post commit events\n registry.subscribe(self.process_secgroup_after,\n resources.SECURITY_GROUP,\n events.AFTER_CREATE)\n registry.subscribe(self.process_secgroup_after,\n resources.SECURITY_GROUP,\n events.AFTER_DELETE)\n # security group rule post commit events\n registry.subscribe(self.process_secgroup_after,\n resources.SECURITY_GROUP_RULE,\n events.AFTER_CREATE)\n registry.subscribe(self.process_secgroup_after,\n resources.SECURITY_GROUP_RULE,\n events.AFTER_DELETE)\n\n if not PRECOMMIT:\n # Liberty requires a BEFORE_DELETE hack\n registry.subscribe(self.process_secgroup_commit,\n resources.SECURITY_GROUP_RULE,\n events.BEFORE_DELETE)", "def test_aws_service_api_vm_security_group_delete(self):\n pass", "def list_secgroups(self, name=None):\n groups = self.cloudman.network.security_groups()\n\n # print (\"TTTTT\")\n # for g in groups:\n # pprint(g)\n\n if name is not None:\n for entry in groups:\n\n if entry['name'] == name:\n groups = [entry]\n break\n\n return self.get_list(\n groups,\n kind=\"secgroup\")", "def update_cluster_security_group(ec2, cluster_props):\n vpc = ec2.Vpc(id=cluster_props['VpcId'])\n\n # The first Security group should be the default one\n defaultSg = list(vpc.security_groups.all())[0]\n print(\"Default Security group:\", defaultSg)\n\n # Authorize access\n try:\n defaultSg.authorize_ingress(GroupName=defaultSg.group_name,\n CidrIp='0.0.0.0/0',\n IpProtocol='TCP',\n FromPort=int(DWH_PORT),\n ToPort=int(DWH_PORT)\n )\n print(\"Access authorized\")\n except botocore.exceptions.ClientError as e:\n print(\"ClientError:\", e)\n except Exception as e:\n print(\"Error:\", e)", "def dvs_instances_batch_mix_sg(self):\n self.show_step(1)\n self.env.revert_snapshot(\"dvs_vcenter_systest_setup\")\n\n cluster_id = self.fuel_web.get_last_created_cluster()\n\n os_ip = self.fuel_web.get_public_vip(cluster_id)\n os_conn = os_actions.OpenStackActions(\n os_ip, SERVTEST_USERNAME,\n SERVTEST_PASSWORD,\n SERVTEST_TENANT)\n tenant = os_conn.get_tenant(SERVTEST_TENANT)\n\n self.show_step(2)\n net_1 = os_conn.create_network(\n network_name=self.net_data[0].keys()[0],\n tenant_id=tenant.id)['network']\n\n subnet = os_conn.create_subnet(\n subnet_name=net_1['name'],\n network_id=net_1['id'],\n cidr=self.net_data[0][self.net_data[0].keys()[0]],\n ip_version=4)\n\n # Check that network is created\n assert_true(os_conn.get_network(net_1['name'])['id'] == net_1['id'])\n\n # Create Router_01, set gateway and add interface to external network\n router_1 = os_conn.create_router('router_1', tenant=tenant)\n\n # Add net_1 to router_1\n os_conn.add_router_interface(router_id=router_1[\"id\"],\n subnet_id=subnet[\"id\"])\n\n # Get max count of instances which we can create according to\n # resource limit\n vm_count = min(\n [os_conn.nova.hypervisors.resource_class.to_dict(h)['vcpus']\n for h in os_conn.nova.hypervisors.list()])\n\n self.show_step(3)\n self.show_step(4)\n sg1 = os_conn.nova.security_groups.create('SG1', \"descr\")\n sg2 = os_conn.nova.security_groups.create('SG2', \"descr\")\n\n self.icmp[\"security_group_rule\"][\"security_group_id\"] = sg1.id\n self.icmp[\"security_group_rule\"][\"remote_group_id\"] = sg1.id\n self.icmp[\"security_group_rule\"][\"direction\"] = \"ingress\"\n os_conn.neutron.create_security_group_rule(self.icmp)\n self.icmp[\"security_group_rule\"][\"direction\"] = \"egress\"\n os_conn.neutron.create_security_group_rule(self.icmp)\n\n for sg in [sg1, sg2]:\n self.tcp[\"security_group_rule\"][\"security_group_id\"] = sg.id\n self.tcp[\"security_group_rule\"][\"remote_group_id\"] = sg.id\n self.tcp[\"security_group_rule\"][\"direction\"] = \"ingress\"\n os_conn.neutron.create_security_group_rule(self.tcp)\n self.tcp[\"security_group_rule\"][\"direction\"] = \"egress\"\n os_conn.neutron.create_security_group_rule(self.tcp)\n\n # Add rules for ssh and ping\n os_conn.goodbye_security()\n _s_groups = os_conn.neutron.list_security_groups()['security_groups']\n _srv_tenant_id = os_conn.get_tenant(SERVTEST_TENANT).id\n default_sg = [sg for sg in _s_groups\n if sg['tenant_id'] == _srv_tenant_id and\n sg['name'] == 'default'][0]\n\n for step in (5, 9):\n self.show_step(step) # step 5, 9\n step += 1\n self.show_step(step) # step 6, 10\n step += 1\n\n openstack.create_instances(\n os_conn=os_conn,\n nics=[{'net-id': net_1['id']}],\n vm_count=vm_count,\n security_groups=[sg1.name, sg2.name, default_sg['name']])\n openstack.verify_instance_state(os_conn)\n\n self.show_step(step) # step 7, 11\n step += 1\n\n instances = os_conn.nova.servers.list()\n fip = openstack.create_and_assign_floating_ips(os_conn, instances)\n ip_pair = dict.fromkeys(fip)\n for key in ip_pair:\n ip_pair[key] = [value for value in fip if key != value]\n openstack.check_connection_vms(ip_pair,\n timeout=60 * 5,\n interval=10)\n\n self.show_step(step) # step 8, 12\n step += 1\n\n for instance in instances:\n os_conn.nova.servers.delete(instance)\n logger.info(\"Check that instance was deleted.\")\n os_conn.verify_srv_deleted(instance)", "def create_predefined_roles(sender, **kwargs):\n from django.contrib.contenttypes.models import ContentType\n from w2s.defaults import TaskDefaults\n from users.models import Roles\n\n if ContentType.objects.filter(app_label='auth', model='group').exists() and ContentType.objects.filter(app_label='users', model='roles').exists():\n predefined_roles = TaskDefaults.get_predefined_roles()\n for role_alias, role_name in predefined_roles.items():\n group_model = ContentType.objects.filter(app_label='auth', model='group')[0].model_class()\n\n if not group_model.objects.filter(name=role_name).exists():\n access_specifiers = TaskDefaults.get_predefined_role_access_specifiers(role_alias=role_alias)\n allowed_permissions_sets = [\n TaskDefaults.get_access_specifier_permissions(specifier)[0] for specifier in access_specifiers]\n allowed_permissions = list(set([item for sublist in allowed_permissions_sets for item in sublist]))\n\n # Creating Group\n group_instance = group_model.objects.create(name=role_name)\n group_instance.permissions.set(allowed_permissions)\n if group_instance.save() is None:\n print('\\033[0;37;42m Generated new role \"%s\", Applying details... \\033[0m' % role_alias)\n\n # Creating Role detail\n role_instance = Roles.objects.create(\n group = group_instance,\n alias = role_alias,\n accesses = ','.join(access_specifiers),\n description = 'Predefined role for %s' % role_alias\n )\n\n if role_instance.save() is None:\n print('\\033[0;37;42m Details applied for role: %s \\033[0m' % role_alias)\n else:\n print('---- Error while generating predefined roles ---')\n print(' -Either auth.group or users.roles model does not exists !!!')", "def index(self, req, tenant_id):\n LOG.debug(\"Index() called with %s\" % (tenant_id))\n\n sec_groups = models.SecurityGroup().find_all(tenant_id=tenant_id,\n deleted=False)\n\n # Construct the mapping from Security Groups to Security Group Rules\n rules_map = dict([(g.id, g.get_rules()) for g in sec_groups])\n\n return wsgi.Result(\n views.SecurityGroupsView(sec_groups,\n rules_map,\n req, tenant_id).list(), 200)", "def set_up_groups(self):\n groups = []\n groups.append({'groupname': 'th',\n 'grouptitle': 'TH',\n 'path': '/'})\n groups.append({'groupname': 'neutronics',\n 'grouptitle': 'Neutronics',\n 'path': '/'})\n groups.append({'groupname': 'metadata',\n 'grouptitle': 'Simulation Metadata',\n 'path': '/'})\n return groups", "def assign_licenses_to_service_groups(self, cr, uid, ids, context=None):\n\n if context is None:\n context = {}\n\n user_obj = self.pool.get('res.users')\n service_group_obj = self.pool.get('postlogistics.service.group')\n for config in self.browse(cr, uid, ids, context=context):\n company = config.company_id\n web_service = PostlogisticsWebService(company)\n\n relations = {}\n for license in company.postlogistics_license_ids:\n service_groups = self._get_allowed_service_group_codes(\n web_service, company, license, context=context)\n group_ids = service_group_obj.search(\n cr, uid, [('group_extid', 'in', service_groups)],\n context=context)\n for group_id in group_ids:\n if group_id in relations:\n relations[group_id].append(license.id)\n else:\n relations[group_id] = [license.id]\n for group_id, license_ids in relations.iteritems():\n vals = {'postlogistics_license_ids': [(6, 0, license_ids)]}\n service_group_obj.write(cr, uid, group_id, vals,\n context=context)\n\n\n return True", "def compute_server_groups(self):\n path = '/os-server-groups'\n res = self.compute.call(path, 'GET', data='', \n token=self.manager.identity.token)\n self.logger.debug('Get openstack server groups: %s' % truncate(res))\n return res[0]['server_groups']", "def capacitygroup_group():", "def create_users(self):\n if self.gl is None:\n print(\"No config found, please run connect first.\")\n exit(1)\n else:\n print(\"Starting Users creation.\")\n gl = self.gl\n config = self.config\n for username in config[\"users\"]:\n i = 0\n count = int(config[\"users\"][username][\"count\"])\n pw = config[\"users\"][username][\"pass\"]\n groups = config[\"users\"][username][\"groups\"]\n while i < count:\n i += 1\n print(\"creating user: \" + username + '-' + str(i) + \" ...\", end=' ')\n user = gl.users.create({'email': username + str(i) + '@example.com',\n 'password': pw,\n 'username': username + '-' + str(i),\n 'name': username + '-' + str(i),\n 'skip_confirmation': True})\n self.users.append(user)\n self.usergroups[user.id] = groups\n print(\"done.\")\n print(\"All Users created!\")", "def _test_multiple_ports_secgroup_inheritance(self):\n # create a security group and make it loginable and pingable\n secgrp = self._create_security_group('secgrp')\n self.create_loginable_secgroup_rule(\n secgroup_id=secgrp['id'])\n self.create_pingable_secgroup_rule(\n secgroup_id=secgrp['id'])\n if self.stateless_sg:\n self.create_ingress_metadata_secgroup_rule(\n secgroup_id=secgrp['id'])\n # create two ports with fixed IPs and the security group created\n ports = []\n for i in range(2):\n ports.append(self.create_port(\n self.network, fixed_ips=[{'subnet_id': self.subnets[0]['id']}],\n security_groups=[secgrp['id']]))\n # spawn instances with the ports created\n server_ssh_clients, fips, servers = self.create_vm_testing_sec_grp(\n ports=ports)\n # verify ICMP reachability and ssh connectivity\n for fip in fips:\n self.ping_ip_address(fip['floating_ip_address'])\n self.check_connectivity(fip['floating_ip_address'],\n CONF.validation.image_ssh_user,\n self.keypair['private_key'])", "def create_seurity_group(self):\n return True", "def create_sec_group(self, conn, name, project):\n sec_group = conn.create_security_group(\n name=name, description=\"Security Group\",\n project_id=project.id)\n conn.create_security_group_rule(sec_group)\n return sec_group", "def _node_groups(self, node, log=None):\n hostgroups = set(self.settings.MON_ZABBIX_HOSTGROUPS_NODE)\n hostgroups.update(node.monitoring_hostgroups)\n\n return self._get_or_create_hostgroups(self._node_kwargs(node), self.settings.MON_ZABBIX_HOSTGROUP_NODE, None,\n hostgroups=hostgroups, log=log)", "def create_all(self):\n for name in self.app.config['SIMPLE_DOMAINS']:\n self.connection.create_domain(name)", "def test_add_and_remove_privilege(self):\n\n self.create_common_users_and_groups()\n\n sgp = SetGroupPrivilegesAPI(\n tsurl=TS_URL,\n username=TS_USER,\n password=TS_PASSWORD,\n disable_ssl=True,\n )\n sgp.add_privilege(\n groups=[\"Group 1\", \"Group 2\"], privilege=Privileges.CAN_USE_SPOTIQ\n )", "def test_get_device_groups(self):\n pass", "def test_create_group(self):\n pass", "def test_create_group(self):\n pass", "def create_student_groups(allcourses, student_list):\n\n\t# for each course\n\tfor course in allcourses:\n\n\t\t# check all students\n\t\tfor student in student_list:\n\n\t\t\t# if student is attenting course\n\t\t\tif course.name in student.courses:\n\n\t\t\t\t# add student to course class\n\t\t\t\tcourse.add_student(student.last_name)\n\n\t\t# if course has seminars\n\t\tif course.seminars > 0:\n\n\t\t\t# count and add amount to course class\n\t\t\tnumofseminars = math.ceil(course.students/course.maxstudentssem)\n\t\t\tcourse.add_seminar(numofseminars)\n\n\t\t# if course has practicals\n\t\tif course.practicals > 0:\n\n\t\t\t# count and add to course class\n\t\t\tnumofpracticals = math.ceil(course.students/course.maxstudentsprac)\n\t\t\tcourse.add_practical(numofpracticals)\n\n\n\t\t#* divide students over groups *#\n\n\t\t# start with group \"1\"\n\t\tsem = 1\n\n\t\t# if course has seminars\n\t\tif course.seminars > 0:\n\n\t\t\t# iterate over students in course with steps of max amount of students\n\t\t\tfor i in range(0, len(course.studentnames), course.maxstudentssem):\n\n\t\t\t\t# create list with names of students\n\t\t\t\tstudentlist = course.studentnames[i: i + course.maxstudentssem]\n\n\t\t\t\t# add studentlist to course class\n\t\t\t\tcourse.create_seminar_group(sem, studentlist)\n\n\t\t\t\t# go on to the next group\n\t\t\t\tsem += 1\n\n\t\t# same for practical\n\t\tprac = 1\n\t\tif course.practicals > 0:\n\t\t\tfor i in range(0, len(course.studentnames), course.maxstudentsprac):\n\t\t\t\tstudentlist = course.studentnames[i: i + course.maxstudentsprac]\n\t\t\t\tcourse.create_practical_group(prac, studentlist)\n\t\t\t\tprac += 1\n\n\n\treturn allcourses, student_list", "def delete_groups(self, roles):\n security_group_names = self._get_all_group_names()\n\n for role in roles:\n role_group_name = self.group_name_for_role(role)\n if role_group_name in security_group_names:\n self.ec2Connection.delete_security_group(role_group_name)\n cluster_group_name = self.get_cluster_group_name()\n if cluster_group_name in security_group_names:\n self.ec2Connection.delete_security_group(cluster_group_name)", "def all_hosts(self):\n ...", "def grant_ssh_access ( ec2_conn, tgt_grps, nat_grp ) :\n for grp in tgt_grps :\n grant_grp_access( ec2_conn, [ nat_grp ], grp, 22 )", "def test_create_cluster_role(self):\n pass", "def test_ipam_vlan_groups_create(self):\n pass", "def cli_run_all(endpoint, email, password, org_name, grp_name):\n knex = Knex(endpoint)\n User(knex, email, password).login()\n org = Organization(knex, org_name).get()\n grp = org.sample_group(grp_name).get()\n auto_metadata(list(grp.get_samples()), lambda x: click.echo(x, err=True))\n run_group(grp, lambda x: click.echo(x, err=True))\n for sample in grp.get_samples():\n try:\n run_sample(sample, lambda x: click.echo(x, err=True))\n except Exception as e:\n click.echo(f'Sample {sample.name} failed with exception: {e}')", "def create_instances_request(nodes, placement_groups=None, exclusive=False):\n assert len(nodes) > 0\n assert len(nodes) <= BULK_INSERT_LIMIT\n # model here indicates any node that can be used to describe the rest\n model = next(iter(nodes))\n partition = lkp.node_partition(model)\n template = lkp.node_template(model)\n region = lkp.node_region(model)\n\n body = NSDict()\n body.count = len(nodes)\n if not exclusive:\n body.minCount = 1\n\n # source of instance properties\n body.sourceInstanceTemplate = template\n\n # overwrites properties accross all instances\n body.instanceProperties = instance_properties(partition, model)\n\n # key is instance name, value overwrites properties\n body.perInstanceProperties = {\n k: per_instance_properties(k, placement_groups) for k in nodes\n }\n\n zones = {\n **{\n f\"zones/{zone}\": {\"preference\": \"ALLOW\"}\n for zone in partition.zone_policy_allow or []\n },\n **{\n f\"zones/{zone}\": {\"preference\": \"DENY\"}\n for zone in partition.zone_policy_deny or []\n },\n }\n if zones:\n body.locationPolicy = {\"locations\": zones}\n\n request = util.compute.regionInstances().bulkInsert(\n project=cfg.project, region=region, body=body.to_dict()\n )\n\n if log.isEnabledFor(logging.DEBUG):\n log.debug(\n f\"new request: endpoint={request.methodId} nodes={to_hostlist(nodes)}\"\n )\n log_api_request(request)\n return request", "def _test_overlapping_sec_grp_rules(self):\n initial_security_groups = []\n if self.stateless_sg:\n md_secgrp = self._create_security_group('metadata_secgrp')\n self.create_ingress_metadata_secgroup_rule(\n secgroup_id=md_secgrp['id'])\n initial_security_groups.append(\n {'name': md_secgrp['name']})\n client_ssh, _, vms = self.create_vm_testing_sec_grp(\n num_servers=2, security_groups=initial_security_groups)\n tmp_ssh, _, tmp_vm = self.create_vm_testing_sec_grp(\n num_servers=1, security_groups=initial_security_groups)\n srv_ssh = tmp_ssh[0]\n srv_vm = tmp_vm[0]\n srv_port = self.client.list_ports(network_id=self.network['id'],\n device_id=srv_vm['server']['id'])['ports'][0]\n srv_ip = srv_port['fixed_ips'][0]['ip_address']\n secgrps = []\n for i, vm in enumerate(vms):\n sg = self._create_security_group('secgrp-%d' % i)\n self.create_loginable_secgroup_rule(secgroup_id=sg['id'])\n port = self.client.list_ports(network_id=self.network['id'],\n device_id=vm['server']['id'])['ports'][0]\n self.client.update_port(port['id'], security_groups=[sg['id']])\n secgrps.append(sg)\n tcp_port = 3000\n rule_list = [{'protocol': constants.PROTO_NUM_TCP,\n 'direction': constants.INGRESS_DIRECTION,\n 'port_range_min': tcp_port,\n 'port_range_max': tcp_port,\n 'remote_group_id': secgrps[0]['id']},\n {'protocol': constants.PROTO_NUM_TCP,\n 'direction': constants.INGRESS_DIRECTION,\n 'port_range_min': tcp_port,\n 'port_range_max': tcp_port + 2,\n 'remote_group_id': secgrps[1]['id']}]\n self.client.update_port(srv_port['id'],\n security_groups=[secgrps[0]['id'], secgrps[1]['id']])\n self.create_secgroup_rules(rule_list, secgroup_id=secgrps[0]['id'])\n\n if self.stateless_sg:\n # NOTE(slaweq): in case of stateless SG, client needs to have also\n # rule which will explicitly accept ingress TCP connections which\n # will be replies from the TCP server so it will use random\n # destination port (depends on the src port choosen by client while\n # establishing connection)\n self.create_security_group_rule(\n security_group_id=secgrps[0]['id'],\n protocol=constants.PROTO_NAME_TCP,\n direction=constants.INGRESS_DIRECTION)\n self.create_security_group_rule(\n security_group_id=secgrps[1]['id'],\n protocol=constants.PROTO_NAME_TCP,\n direction=constants.INGRESS_DIRECTION)\n\n # The conntrack entries are ruled by the OF definitions but conntrack\n # status can change the datapath. Let's check the rules in two\n # attempts\n for _ in range(2):\n with utils.StatefulConnection(\n client_ssh[0], srv_ssh, srv_ip, tcp_port) as con:\n con.test_connection()\n for port in range(tcp_port, tcp_port + 3):\n with utils.StatefulConnection(\n client_ssh[1], srv_ssh, srv_ip, port) as con:\n con.test_connection()", "def main():\n\n # Setup `pysc` to use BASIC auth, with a username, and password. Also sets the endpoint to use.\n setup_sensorcloud_basic(CONSTS['SC_USERNAME'], CONSTS['SC_PASSWORD'],\n CONSTS['SC_ENDPOINT'], CONSTS['PYSC_DEBUG'])\n\n org_id = CONSTS['ORG_ID']\n\n # Ensure the organisation exists on the SensorCloud endpoint.\n try:\n organisation = pysc.models.Organisation.single(org_id)\n except KeyError:\n raise RuntimeWarning(\"\"\"The organisation named {:s} was not found.\\n\"\"\"\n \"\"\"Although the `pysc` api has functionality to create an organisation, it cannot \"\"\"\n \"\"\"do so on the sensor-cloud.io instance on AWS.\"\"\".format(org_id))\n # Ensure sanity, check we got the organisation that we asked for.\n assert (org_id == organisation.id)\n\n # Here we use the Group.resolve_all helper with organisation_id param to filter groups based on id\n # The resolve_all command is similar to .index() however it also calls .follow() on found link automatically,\n # _and_ it converts the resulting HAL objects into real valid `pysc` Group() objects.\n org_groups = pysc.models.Group.resolve_all(params={'organisation_id': org_id})\n # We are not likely to have more than 1000 groups, so we don't need to do return doc pagination here.\n for g in org_groups:\n group_id = g.id\n print(\"Found group: {:s}\".format(group_id))\n\n print(\"Found a total of {:d} groups for {:s} on that SensorCloud endpoint.\".format(len(org_groups), org_id))", "def test_list_projects_for_groups(self):\n domain1 = unit.new_domain_ref()\n self.resource_api.create_domain(domain1['id'], domain1)\n domain2 = unit.new_domain_ref()\n self.resource_api.create_domain(domain2['id'], domain2)\n project1 = unit.new_project_ref(domain_id=domain1['id'])\n project1 = self.resource_api.create_project(project1['id'], project1)\n project2 = unit.new_project_ref(domain_id=domain1['id'])\n project2 = self.resource_api.create_project(project2['id'], project2)\n project3 = unit.new_project_ref(domain_id=domain1['id'])\n project3 = self.resource_api.create_project(project3['id'], project3)\n project4 = unit.new_project_ref(domain_id=domain2['id'])\n project4 = self.resource_api.create_project(project4['id'], project4)\n group_list = []\n role_list = []\n for _ in range(7):\n group = unit.new_group_ref(domain_id=domain1['id'])\n group = self.identity_api.create_group(group)\n group_list.append(group)\n\n role = unit.new_role_ref()\n self.role_api.create_role(role['id'], role)\n role_list.append(role)\n\n # Assign the roles - one inherited and one non-inherited on Domain1,\n # plus one on Project1 and Project2\n self.assignment_api.create_grant(group_id=group_list[0]['id'],\n domain_id=domain1['id'],\n role_id=role_list[0]['id'])\n self.assignment_api.create_grant(group_id=group_list[1]['id'],\n domain_id=domain1['id'],\n role_id=role_list[1]['id'],\n inherited_to_projects=True)\n self.assignment_api.create_grant(group_id=group_list[2]['id'],\n project_id=project1['id'],\n role_id=role_list[2]['id'])\n self.assignment_api.create_grant(group_id=group_list[3]['id'],\n project_id=project2['id'],\n role_id=role_list[3]['id'])\n\n # ...and a few of spoiler assignments to Domain2/Project4\n self.assignment_api.create_grant(group_id=group_list[4]['id'],\n domain_id=domain2['id'],\n role_id=role_list[4]['id'])\n self.assignment_api.create_grant(group_id=group_list[5]['id'],\n domain_id=domain2['id'],\n role_id=role_list[5]['id'],\n inherited_to_projects=True)\n self.assignment_api.create_grant(group_id=group_list[6]['id'],\n project_id=project4['id'],\n role_id=role_list[6]['id'])\n\n group_id_list = [group_list[1]['id'], group_list[2]['id'],\n group_list[3]['id']]\n\n # With inheritance on, we should also get back the Project3 due to the\n # inherited role from its owning domain.\n project_refs = (\n self.assignment_api.list_projects_for_groups(group_id_list))\n\n self.assertThat(project_refs, matchers.HasLength(3))\n self.assertIn(project1, project_refs)\n self.assertIn(project2, project_refs)\n self.assertIn(project3, project_refs)", "def auth_secgroup(self, args):\n region = args[\"Region\"]\n sgid = args[\"Security-group-ID\"]\n protocol = args[\"protocol\"]\n from_port = int(args[\"FromPort\"])\n to_port = int(args[\"ToPort\"])\n ip_range = args[\"IpRange\"]\n message = MessageClass()\n\n # Boto3 client creation by providing the access_id and access_secret\n ec2 = boto3.client(service_name='ec2', region_name=region, api_version=None, use_ssl=True,\n verify=None, endpoint_url=None, aws_access_key_id=self.aws_access_key,\n aws_secret_access_key=self.aws_secret_token, aws_session_token=None,\n config=None)\n\n data = ec2.authorize_security_group_ingress(\n GroupId=sgid,\n IpPermissions=[\n {'IpProtocol': protocol,\n 'FromPort': from_port,\n 'ToPort': to_port,\n 'IpRanges': [{'CidrIp': ip_range}]}\n ])\n attachment = MessageAttachmentsClass()\n attachment.title = data\n message.message_text = \"Ingress Successfully Set :\"\n message.attach(attachment)\n\n return message.to_json()", "def make_groups(df, radius=34):\n # df = pd.read_excel(path, sheet_name=\"Main\")\n coords = df[[\"x\", \"y\"]]\n coords = np.array(coords)\n X = coords\n\n # perform DBSCAN grouping\n clustering = DBSCAN(eps=radius, min_samples=1).fit(X)\n\n # add column db to df\n df[\"db\"] = clustering.labels_\n\n # get the number of groups in df\n size = len(df.groupby(\"db\"))\n\n # Adjacency groups will be the same color, make new column\n df[\"color\"] = df.db.apply(rand_color)\n\n # order groups\n ordered_groups_copies = df.db.tolist()\n group_order = []\n for g in ordered_groups_copies:\n if g in group_order:\n pass\n else:\n group_order.append(g)\n\n df[\"group_order\"] = df[\"db\"].apply(lambda x: sort_groups(x, group_order=group_order))\n return df", "def get_groups(self):\n\n if not self.check_prereqs():\n raise StopIteration\n\n db = self.env.get_db_cnx()\n cursor = db.cursor()\n\n query=self.create_query(self.sql_get_groups_query+\" ORDER BY $groupname_field$\",{'groupname_field':self.sql_groupname_field})\n self.log.debug(\"sqlflexibleauthstore: get_groups: %s\" % (query,))\n\n cursor.execute(query)\n desc=[i[0] for i in cursor.description]\n for row in cursor:\n dictrow=dict(zip(desc,row))\n yield dictrow[self.sql_groupname_field]", "def create_secgroup(self, args):\n message = MessageClass()\n region = args[\"Region\"]\n sgid = args[\"Group-Name\"]\n desc = args[\"Description\"]\n\n # Boto3 client creation by providing the access_id and access_secret\n ec2 = boto3.client(service_name='ec2', region_name=region, api_version=None, use_ssl=True,\n verify=None, endpoint_url=None, aws_access_key_id=self.aws_access_key,\n aws_secret_access_key=self.aws_secret_token, aws_session_token=None,\n config=None)\n response = ec2.describe_vpcs()\n vpc_id = response.get('Vpcs', [{}])[0].get('VpcId', '')\n\n response = ec2.create_security_group(GroupName=sgid,\n Description=desc,\n VpcId=vpc_id)\n attachment = MessageAttachmentsClass()\n d = response[\"GroupId\"]\n attachment.title = d\n message.message_text = \"Security group created:\"\n message.attach(attachment)\n\n return message.to_json()", "def _generate_elb_instances_and_sg(resource, session):\n for instance in resource.describe_load_balancers()[\"LoadBalancers\"]:\n for security_group in instance.get(\"SecurityGroups\", []):\n yield instance, security_group, _get_sg_name(security_group, session)" ]
[ "0.65404814", "0.641292", "0.6315235", "0.63037133", "0.62225246", "0.611194", "0.6016119", "0.589653", "0.58127975", "0.5788823", "0.57725734", "0.5730021", "0.571415", "0.56577706", "0.56387746", "0.55405027", "0.54687124", "0.5450472", "0.54221886", "0.5418091", "0.5405399", "0.5370526", "0.5361652", "0.5351294", "0.53435224", "0.5334952", "0.5327787", "0.5292629", "0.5288266", "0.52800274", "0.5274075", "0.52344733", "0.5232889", "0.5228884", "0.5225208", "0.5222685", "0.52155125", "0.5205776", "0.5187235", "0.51817876", "0.51723045", "0.51684344", "0.51662564", "0.51479286", "0.514696", "0.5142785", "0.51369077", "0.51304936", "0.510927", "0.51066864", "0.5102322", "0.50958633", "0.5091529", "0.50655377", "0.5063023", "0.50623566", "0.5060393", "0.5057666", "0.50479376", "0.50403905", "0.5027703", "0.5027432", "0.5026532", "0.50170505", "0.50114", "0.49990937", "0.49944174", "0.4992453", "0.4980485", "0.49743256", "0.4974132", "0.49722028", "0.497152", "0.49710086", "0.49693528", "0.49663118", "0.4963738", "0.49593353", "0.49568817", "0.49563602", "0.4950831", "0.4945306", "0.4937257", "0.4937257", "0.4936396", "0.49342105", "0.49262664", "0.4925549", "0.49231428", "0.4921836", "0.49172556", "0.49152905", "0.49131468", "0.49074867", "0.4904328", "0.49039942", "0.48977447", "0.48955032", "0.4895305", "0.4892658" ]
0.71990424
0
Create all key pairs in all regions
def createAllKP(): if not os.path.exists(keysDir): os.makedirs(keysDir) for info in conf_HVM: keyName = 'Key-'+info['region']+'-'+info['zone'] try: os.remove(keysDir+'/'+keyName+'.pem') except OSError: pass print "Key creation :",keyName ec2 = boto.ec2.connect_to_region(info['region']+'-'+info['zone']) # check if the key pair exists kps = [kp for kp in ec2.get_all_key_pairs() if kp.name == keyName] if kps: ec2.delete_key_pair(keyName) key = ec2.create_key_pair(keyName) key.save(keysDir)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def gen_keys():", "def createAllSG():\n\tfor info in conf_HVM:\n\t\tec2 = boto.ec2.connect_to_region(info['region']+'-'+info['zone'])\n\t\tcreateSG(ec2,'SG-Cassandra-'+info['region']+'-'+info['zone'],CASSANDRA_RULES)", "def setup_space_keys(cls):\n if cls.KEYS:\n return\n\n from pkg_resources import iter_entry_points\n\n for entry_point in iter_entry_points(group=cls.CATKIN_SPACES_GROUP):\n ep_dict = entry_point.load()\n cls.STORED_KEYS.append(entry_point.name + '_space')\n cls.SPACES[entry_point.name] = ep_dict\n cls._create_space_methods(entry_point.name)\n\n cls.KEYS = cls.STORED_KEYS + cls.EXTRA_KEYS", "def init_region ( aws, region_name, aws_account_type, init_params ) :\n ec2_conn = aws.ec2_conn( )\n keypair_savedir = os.environ[ 'PWD' ]\n print \"Creating new keypairs for region \" + region_name\n for keytype in init_params.get( 'keypairs', [] ) :\n keypair_name = get_keypair_name( aws_account_type, region_name, keytype )\n keypair = ec2_conn.get_key_pair( keypair_name )\n if keypair :\n print 'Keypair ' + keypair_name + ' already exists. Skipping.'\n else :\n keypair = ec2_conn.create_key_pair( keypair_name )\n keypair.save( keypair_savedir )\n keypair_filename = keypair_savedir + '/' + keypair_name + '.pem'\n print 'Created keypair ' + keypair_filename\n store_keypair( s3_infra_conn = aws.s3_infrastructure_conn( ),\n region_name = region_name,\n aws_account_type = aws_account_type,\n keypair_name = get_keypair_keypath( aws_account_type ) + keypair_name,\n keypair_filename = keypair_filename )\n print 'Stored keypair in S3 at: ' + get_keypair_keypath( aws_account_type )\n os.remove( keypair_filename )\n\n if init_params.get( 'init-deployment', 'YES' ) == 'YES' :\n print \"Creating Deployment security group.\"\n deploy_secgrp = ec2_conn.create_security_group( get_deployment_secgrp_name( ),\n \"Used by the deployment server.\" )\n deploy_secgrp.authorize( ip_protocol = \"tcp\",\n from_port = 22,\n to_port = 22,\n cidr_ip = hbo_cidr_list ) \n deploy_secgrp.authorize( ip_protocol = \"tcp\",\n from_port = 8080,\n to_port = 8080,\n cidr_ip = hbo_cidr_list ) \n\n deploy_secgrp.authorize( ip_protocol = \"tcp\",\n from_port = 22,\n to_port = 22,\n cidr_ip = build_server_cidr ) \n deploy_secgrp.authorize( ip_protocol = \"tcp\",\n from_port = 8080,\n to_port = 8080,\n cidr_ip = build_server_cidr ) \n\n if init_params.get( 'init-ami-update', 'YES' ) == 'YES' :\n print \"Creating ami-update security group.\"\n amiupdate_secgrp = ec2_conn.create_security_group( get_amiupdate_secgrp_name( ),\n \"Used by the ami update instances.\" )\n amiupdate_secgrp.authorize( ip_protocol = \"tcp\",\n from_port = 22,\n to_port = 22,\n cidr_ip = hbo_cidr_list ) \n amiupdate_secgrp.authorize( ip_protocol = \"tcp\",\n from_port = 8080,\n to_port = 8080,\n cidr_ip = hbo_cidr_list )", "def keysAll():", "def genKeys():\r\n (pub, priv) = rsa.newkeys(256)\r\n context = {\r\n 'pub': pub,\r\n 'priv': priv\r\n }\r\n return context", "def create_key ():", "def createAllDictionaries(self):\r\n self.makeSentenceLengths()\r\n self.makeWords()\r\n self.makeStems()\r\n self.makeGerund()\r\n self.makeWordLengths()", "def create_keys(i):\n sk = elgamal.create_sk()\n secret_keys.append(sk)\n\n keys = [0, 0]\n\n keys[x[i]] = elgamal.gen(sk)\n keys[1 - x[i]] = elgamal.o_gen()\n\n public_keys.append(keys)", "def create_all_taxonomic_keys(point_locations: dict, location_species: dict, location_range_species: dict,\n trait_data: dict, all_taxa_data: dict) -> dict:\n\n all_keys = {}\n\n # find all unique sets of species\n species_sets = set()\n for p in point_locations:\n loc = point_locations[p]\n all_species = set()\n all_species |= location_species[loc.name]\n if loc.n_direct_children() > 0:\n for c in loc.direct_children():\n all_species |= fetch_child_data(c, location_species)\n\n range_species = set(find_species_by_name(s) for s in location_range_species[loc])\n all_species |= range_species\n if len(all_species) > 0:\n species_sets.add(frozenset(all_species))\n\n # create keys for each unique set of species\n warnings = set()\n for sp_set in species_sets:\n taxa_data = {}\n for s in sp_set:\n try:\n taxa_data[\"Male \" + s.binomial()] = all_taxa_data[\"♂ Male {{\" + s.species + \"}}\"]\n taxa_data[\"Female \" + s.binomial()] = all_taxa_data[\"♀ Female {{\" + s.species + \"}}\"]\n except KeyError:\n report_error(\"Missing taxonomic key data: \" + s.species)\n\n all_keys[sp_set], new_warning = TMB_TaxKeyGen.generate_taxonomic_key(trait_data, taxa_data, verbose=False)\n warnings |= new_warning\n\n # global key for all species\n all_keys[\"all\"], new_warning = TMB_TaxKeyGen.generate_taxonomic_key(trait_data, all_taxa_data, verbose=False)\n warnings |= new_warning\n\n for w in sorted(warnings):\n report_error(w)\n\n return all_keys", "def test_aws_service_api_keypair_generate_post(self):\n pass", "def _create_keys(bucket_name, keys=[]):\n bucket = connection.create_bucket(bucket_name)\n\n for s in keys:\n key = bucket.new_key(s)\n key.set_contents_from_string(s)\n\n return bucket", "def make_consistent(self):\r\n\r\n for key in self.get_keys():\r\n self.eliminate_key(key)\r\n\r\n for i_temp in self.indexes(): #i will be a note index\r\n for j_temp in self.get_keys_from_note(i_temp):\r\n if self.key_dict_contains(j_temp):\r\n self.add_key(j_temp,Index(i_temp))\r\n## self.key_dict[j_temp].add(str(Index(i_temp)))\r\n else:\r\n self.initiate_new_key(j_temp,Index(i_temp))", "def Dictionary_create(nMarkers, markerSize):\n pass", "def secondary_keys_dicts(self):", "def _init_keys(self):\n\n basic_constraints = crypto.X509Extension('basicConstraints'.encode('ascii'), True,\n 'CA:TRUE, pathlen:0'.encode('ascii'))\n serial = self._get_serial()\n pkey = self._create_pkey(self.commonname, serial)\n self._create_cert(pkey, self.commonname, serial, [basic_constraints], expire=30*365)", "def initialize(self, keys: List[str]):", "def __initialSigningKeys(self) -> None:\n seedStr = '0' * 31\n seedNum = ['1', '2', '3', '4', '5', '6', '7', '8', '9', 'a', 'b', 'c', 'd', 'e', 'f']\n seedList = []\n for i in range(15):\n seed = seedStr + seedNum[i]\n seedList.append(seed.encode('utf-8'))\n\n for seed in seedList:\n self.signingKeysList.append(SigningKey(seed))\n log.info(\"15 signing keys have been generated successfully\")", "def create_inbound(self, keys):", "def make_s3_keys(self):\n # Write the data twice:\n for fmt in (VERSION_FMT, LATEST_FMT):\n yield make_s3_keys(self, fmt)", "def write_regions(pathfolder, key_firms, regions, methodvalues):\n ## Generate namefile\n namefile = generate_namefile(pathfolder, methodvalues)\n\n ## Writting\n db = shelve.open(namefile)\n db['nif'] = key_firms\n db['regions'] = regions\n db['methodvalues'] = methodvalues\n db.close()", "def generate_map(self):\n\n # Create main streets first\n self.create_main_streets()\n\n # Then create the commercial buildings in the center of town\n self.create_commercial_center()\n\n # Then create the neighborhoods that populate the rest of the city\n while(self.create_neighborhood()):\n pass\n\n # Clean up any invalid buildings that were created\n self.delete_inaccessible_buildings()", "def create_ssh_keys(self):\n self.random_ssh()\n\n return self.keys", "def create_and_fill_bucket(self):\n EmrProcessing.bucket = \\\n self.s3_handle.create_bucket(EmrProcessing.bucket_name)\n key = EmrProcessing.bucket.new_key('input/test.csv')\n input_file_path = '../data/test.csv'\n key.set_contents_from_filename(input_file_path)\n key.set_acl('public-read')\n\n key = EmrProcessing.bucket.new_key('mapper/mapper.py')\n input_file_path = '../src/mapper/mapper.py'\n key.set_contents_from_filename(input_file_path)\n key.set_acl('public-read')", "def _create_sections(self):\n self._SECTIONS = {}", "def create_keys(self):\n crypto_tool = CryptoTools()\n # creating RSA keys for the signer user\n public_key, private_key = crypto_tool.create_key_with_entropy()\n self.priv_key = crypto_tool.get_pem_format(private_key).decode(\"utf-8\")\n self.pub_key = crypto_tool.get_pem_format(public_key).decode(\"utf-8\")", "def create_endpoints_new_network():\n\n for i in range(NUM_OF_SHARDS):\n key_explorer = \"explorers_\" + str(i)\n array_instance_ip = parse_network_config(key_explorer)\n array_instance_id = retrieve_instance_id(array_instance_ip)\n\n # 0/ - detect region of explorers\n reg = retrieve_instance_region(array_instance_ip[0])\n # all nodes registered for the same endpoints should be located in the same region, if not, gracefully exit\n # verify_nodes_same_region(reg, array_instance_ip)\n\n print(\"\\n######################################### Creating complete pipeline for shard\", str(i),\n \"in AWS region: \", reg, \"#########################################\\n\")\n # 1/ - request certificates\n print(\"\\n==== step 1: request SSL certificates, CertificateArn will be stored into dict_region_sslcerts \\n\")\n domain_name = ''.join(['api.s', str(i), \".\", BASE_DOMAIN_NAME])\n dict_existing_certs = get_existing_certs(reg, domain_name)\n dict_region_sslcerts.clear()\n if dict_existing_certs[domain_name]:\n print(\"[INFO] SSL certificate of\", domain_name, \"exists, skipping..\")\n dict_region_sslcerts[reg].append(dict_existing_certs[domain_name][0])\n else:\n print(\"[INFO] SSL certificate of\", domain_name, \"does NOT exist, requesting..\")\n request_ssl_certificates(reg, domain_name)\n\n print(\"[RESULT] OF STEP 1\")\n pp.pprint(dict_region_sslcerts)\n\n # 2/ - create target group\n dict_region_tgarn.clear()\n array_tgs = create_name_target_group(i, ID_DOMAIN_NAME)\n pp.pprint(array_tgs)\n create_target_group(reg, array_tgs)\n print(\"[RESULT] OF STEP 2\")\n pp.pprint(dict_region_tgarn)\n\n # 3/ - create elb\n dict_region_elb2arn.clear()\n elb2_name = ''.join('s' + str(i) + '-' + ID_DOMAIN_NAME + '-' + reg)\n array_dns_hostedzone = create_elb2(reg, elb2_name)\n print(\"[RESULT] OF STEP 3\")\n pp.pprint(dict_region_elb2arn)\n\n # 4/ - create listener\n dict_region_ListenerArn.clear()\n create_listener(reg, dict_region_elb2arn, dict_region_sslcerts, dict_region_tgarn)\n print(\"[RESULT] OF STEP 4\")\n pp.pprint(dict_region_ListenerArn)\n\n # 5/ - create one more rule for the current listener\n host_header_value = ''.join('ws.s' + str(i) + '.' + BASE_DOMAIN_NAME)\n create_rule(reg, dict_region_ListenerArn, dict_region_tgarn, dict_region_elb2arn, host_header_value)\n\n # 6/ - register explorer instances into the target group\n register_explorers(reg, array_instance_id, dict_region_tgarn)\n\n # 7/ - create entries on Route 53\n array_record_set = create_name_record_set(i, BASE_DOMAIN_NAME)\n create_dns_entries(HOSTED_ZONE_ID, array_record_set, array_dns_hostedzone)", "def Dictionary_create_from(nMarkers, markerSize, baseDictionary):\n pass", "def generate_keys(self):\n\n # TODO: Store keys encrypted\n rsa1 = RsaPrivateKey.Generate()\n self.sign_private = str(rsa1)\n self.sign_public = str(rsa1.public_key)\n\n rsa2 = RsaPrivateKey.Generate()\n self.crypt_private = str(rsa2)\n self.crypt_public = str(rsa2.public_key)", "def keys(self) -> None:\r\n path = Path('./config/key')\r\n global key\r\n # If the file path does not exist, create one \r\n if not path.exists():\r\n os.makedirs(path)\r\n while True:\r\n # read key.key file\r\n try:\r\n file = open(path / 'key.key', 'rb')\r\n key = file.read()\r\n file.close\r\n # when key.key file does not exist. Create one\r\n except FileNotFoundError:\r\n key = Fernet.generate_key()\r\n file = open(path / 'key.key', 'wb')\r\n file.write(key)\r\n file.close()\r\n continue\r\n break", "def _makeimap(self):\n self.map_[\"source\"] = \"nasa\"\n self.map_[\"instrument\"] = \"goes\"\n self.map_[\"physobs\"] = \"irradiance\"\n self.map_[\"provider\"] = \"sdac\"", "def generate(self) -> Dict[str, Any]:\n raise NotImplementedError", "def initialize_region(self):\n self.new_region_name = \"\"\n self.map.regions.create_new_region()", "def new(num_buckets=256):\n aMap=[]", "def generate_keys(self):\n self.keys = []\n key = string_to_bit_array(self.passwd)\n key = self.permutation(key, CP_1) # Perform initial permutation on the key\n g, d = split_into_n(key, 28) # Split into g (LEFT) & d (RIGHT)\n for i in range(16): # Apply the 16 rounds\n g, d = self.shift(g, d, ROUND_KEY_SHIFT[i]) # Shift the key according to the round\n tmp = g + d # Merge them\n self.keys.append(self.permutation(tmp, CP_2)) # Perform the permutation to get the Ki", "def _reset_primary_keys_generators(self):\n self.primary_key = dict()\n self.remaining_primary_key = dict()", "def new(num_buckets=256):#用空列表初始化字典\n\taMap=[]\n\tfor i in range(num_buckets):\n\t\taMap.append([])\n\treturn aMap", "def keys():", "def create_keypair(address_type, addresses_path, address_prefix, name):\n vkey_file = get_vkey_file(addresses_path, address_prefix, name)\n skey_file = get_skey_file(addresses_path, address_prefix, name)\n\n if(path.exists(vkey_file)) :\n print(address_prefix, \"key pair already exists for\", name)\n return\n \n makedirs(path.dirname(vkey_file), mode=0o777, exist_ok=True)\n\n run_params = ['cardano-cli', address_type, 'key-gen', '--verification-key-file', vkey_file, '--signing-key-file', skey_file]\n subprocess_run(run_params, capture_output=False, text=True)\n return", "def create(self):\n\n for sample_name, sample_info in self.samples.items():\n if not sample_info[\"use_it\"]:\n continue\n process_name = sample_info[\"process_name_specific\"]\n sample_category = sample_info[\"sample_category\"]\n is_mc = (sample_info[\"type\"] == \"mc\")\n\n logging.info(\"Building dictionaries for sample %s...\" % process_name)\n for charge_selection in self.charge_selections:\n central_or_shift_extensions = [\"\", \"hadd\", \"addBackgrounds\"]\n central_or_shifts_extended = central_or_shift_extensions + self.central_or_shifts\n for central_or_shift_or_dummy in central_or_shifts_extended:\n process_name_extended = [ process_name, \"hadd\" ]\n for process_name_or_dummy in process_name_extended:\n if central_or_shift_or_dummy in [ \"hadd\" ] and process_name_or_dummy in [ \"hadd\" ]:\n continue\n if central_or_shift_or_dummy != \"central\" and central_or_shift_or_dummy not in central_or_shift_extensions:\n if not is_mc:\n continue\n if not self.accept_central_or_shift(central_or_shift_or_dummy, sample_info):\n continue\n\n key_dir = getKey(process_name_or_dummy, charge_selection, central_or_shift_or_dummy)\n for dir_type in [ DKEY_CFGS, DKEY_HIST, DKEY_LOGS, DKEY_RLES ]:\n initDict(self.dirs, [ key_dir, dir_type ])\n if dir_type in [ DKEY_CFGS, DKEY_LOGS ]:\n self.dirs[key_dir][dir_type] = os.path.join(self.get_dir_type(dir_type), dir_type, self.channel,\n \"_\".join([ charge_selection ]), process_name_or_dummy, central_or_shift_or_dummy)\n else:\n self.dirs[key_dir][dir_type] = os.path.join(self.outputDir, dir_type, self.channel,\n \"_\".join([ charge_selection ]), process_name_or_dummy)\n for subdirectory in [ \"comp_jetToTauFakeRate\", \"makePlots\" ]:\n key_dir = getKey(subdirectory)\n for dir_type in [ DKEY_CFGS, DKEY_HIST, DKEY_LOGS, DKEY_DCRD, DKEY_PLOT ]:\n initDict(self.dirs, [ key_dir, dir_type ])\n if dir_type in [ DKEY_CFGS, DKEY_LOGS, DKEY_DCRD, DKEY_PLOT ]:\n self.dirs[key_dir][dir_type] = os.path.join(self.get_dir_type(dir_type), dir_type, self.channel, subdirectory)\n else:\n self.dirs[key_dir][dir_type] = os.path.join(self.outputDir, dir_type, self.channel, subdirectory)\n for dir_type in [ DKEY_CFGS, DKEY_SCRIPTS, DKEY_HIST, DKEY_LOGS, DKEY_DCRD, DKEY_PLOT, DKEY_HADD_RT ]:\n initDict(self.dirs, [ dir_type ])\n if dir_type in [ DKEY_CFGS, DKEY_SCRIPTS, DKEY_LOGS, DKEY_DCRD, DKEY_PLOT, DKEY_HADD_RT ]:\n self.dirs[dir_type] = os.path.join(self.get_dir_type(dir_type), dir_type, self.channel)\n else:\n self.dirs[dir_type] = os.path.join(self.outputDir, dir_type, self.channel)\n\n numDirectories = 0\n for key in self.dirs.keys():\n if type(self.dirs[key]) == dict:\n numDirectories += len(self.dirs[key])\n else:\n numDirectories += 1\n logging.info(\"Creating directory structure (numDirectories = %i)\" % numDirectories)\n numDirectories_created = 0;\n frac = 1\n for key in self.dirs.keys():\n if type(self.dirs[key]) == dict:\n for dir_type in self.dirs[key].keys():\n create_if_not_exists(self.dirs[key][dir_type])\n numDirectories_created += len(self.dirs[key])\n else:\n create_if_not_exists(self.dirs[key])\n numDirectories_created = numDirectories_created + 1\n while 100*numDirectories_created >= frac*numDirectories:\n logging.info(\" %i%% completed\" % frac)\n frac = frac + 1\n logging.info(\"Done.\")\n\n inputFileLists = {}\n for sample_name, sample_info in self.samples.items():\n if not sample_info[\"use_it\"]:\n continue\n logging.info(\"Checking input files for sample %s\" % sample_info[\"process_name_specific\"])\n inputFileLists[sample_name] = generateInputFileList(sample_info, self.max_files_per_job)\n\n self.inputFileIds = {}\n for sample_name, sample_info in self.samples.items():\n if not sample_info[\"use_it\"]:\n continue\n\n process_name = sample_info[\"process_name_specific\"]\n inputFileList = inputFileLists[sample_name]\n\n logging.info(\"Creating configuration files to run '%s' for sample %s\" % (self.executable_analyze, process_name))\n\n is_mc = (sample_info[\"type\"] == \"mc\")\n sample_category = sample_info[\"sample_category\"]\n\n for charge_selection in self.charge_selections:\n for central_or_shift in self.central_or_shifts:\n \n if central_or_shift != \"central\" and not is_mc:\n continue\n if not self.accept_central_or_shift(central_or_shift, sample_info):\n continue\n\n # build config files for executing analysis code\n key_analyze_dir = getKey(process_name, charge_selection, central_or_shift)\n\n for jobId in inputFileList.keys():\n\n analyze_job_tuple = (process_name, charge_selection, central_or_shift, jobId)\n key_analyze_job = getKey(*analyze_job_tuple)\n ntupleFiles = inputFileList[jobId]\n if len(ntupleFiles) == 0:\n logging.warning(\"No input ntuples for %s --> skipping job !!\" % (key_analyze_job))\n continue\n\n cfgFile_modified_path = os.path.join(self.dirs[key_analyze_dir][DKEY_CFGS], \"analyze_%s_%s_%s_%i_cfg.py\" % analyze_job_tuple)\n logFile_path = os.path.join(self.dirs[key_analyze_dir][DKEY_LOGS], \"analyze_%s_%s_%s_%i.log\" % analyze_job_tuple)\n histogramFile_path = os.path.join(self.dirs[key_analyze_dir][DKEY_HIST], \"analyze_%s_%s_%s_%i.root\" % analyze_job_tuple)\n rleOutputFile_path = os.path.join(self.dirs[key_analyze_dir][DKEY_RLES], \"rle_%s_%s_%s_%i.txt\" % analyze_job_tuple) \\\n if self.select_rle_output else \"\"\n\n self.jobOptions_analyze[key_analyze_job] = {\n 'ntupleFiles' : ntupleFiles,\n 'cfgFile_modified' : cfgFile_modified_path,\n 'histogramFile' : histogramFile_path,\n 'logFile' : logFile_path,\n 'chargeSelection' : charge_selection,\n 'jet_minPt' : self.jet_minPt,\n 'jet_maxPt' : self.jet_maxPt,\n 'jet_minAbsEta' : self.jet_minAbsEta,\n 'jet_maxAbsEta' : self.jet_maxAbsEta,\n 'hadTau_selection_tight' : self.hadTau_selection_tight,\n 'hadTauSelection_denominator' : self.hadTau_selection_denominator,\n 'hadTauSelections_numerator' : self.hadTau_selections_numerator,\n 'trigMatchingOptions' : self.trigMatchingOptions,\n 'selEventsFileName_output' : rleOutputFile_path,\n 'absEtaBins' : self.absEtaBins,\n 'decayModes' : self.decayModes,\n 'central_or_shift' : central_or_shift,\n 'central_or_shifts_local' : [],\n 'apply_hlt_filter' : self.hlt_filter,\n }\n self.createCfg_analyze(self.jobOptions_analyze[key_analyze_job], sample_info)\n\n # initialize input and output file names for hadd_stage1\n key_hadd_stage1_dir = getKey(process_name, charge_selection)\n hadd_stage1_job_tuple = (process_name, charge_selection)\n key_hadd_stage1_job = getKey(*hadd_stage1_job_tuple)\n if not key_hadd_stage1_job in self.inputFiles_hadd_stage1:\n self.inputFiles_hadd_stage1[key_hadd_stage1_job] = []\n self.inputFiles_hadd_stage1[key_hadd_stage1_job].append(self.jobOptions_analyze[key_analyze_job]['histogramFile'])\n self.outputFile_hadd_stage1[key_hadd_stage1_job] = os.path.join(self.dirs[key_hadd_stage1_dir][DKEY_HIST],\n \"hadd_stage1_%s_%s.root\" % hadd_stage1_job_tuple)\n\n # initialize input and output file names for hadd_stage2\n key_hadd_stage1_job = getKey(process_name, charge_selection)\n key_hadd_stage2_dir = getKey(\"hadd\", charge_selection)\n key_hadd_stage2_job = getKey(charge_selection)\n if not key_hadd_stage2_job in self.inputFiles_hadd_stage2:\n self.inputFiles_hadd_stage2[key_hadd_stage2_job] = []\n self.inputFiles_hadd_stage2[key_hadd_stage2_job].append(self.outputFile_hadd_stage1[key_hadd_stage1_job])\n self.outputFile_hadd_stage2[key_hadd_stage2_job] = os.path.join(self.dirs[key_hadd_stage2_dir][DKEY_HIST],\n \"hadd_stage2_%s.root\" % charge_selection)\n\n logging.info(\"Creating configuration files for executing 'comp_jetToTauFakeRate'\")\n for charge_selection in self.charge_selections:\n charge_key = \"comp_%s\" % charge_selection\n self.comp_input_files[charge_key] = []\n for trigMatchingOption in self.trigMatchingOptions:\n key_hadd_stage2_job = getKey(charge_selection)\n key_comp_jetToTauFakeRate_dir = getKey(\"comp_jetToTauFakeRate\")\n key_comp_jetToTauFakeRate_job = getKey(charge_selection, trigMatchingOption)\n self.jobOptions_comp_jetToTauFakeRate[key_comp_jetToTauFakeRate_job] = {\n 'inputFile' : self.outputFile_hadd_stage2[key_hadd_stage2_job],\n 'cfgFile_modified' : os.path.join(\n self.dirs[DKEY_CFGS], \"comp_jetToTauFakeRate_%s_%s_cfg.py\" % (charge_selection, trigMatchingOption)),\n 'outputFile' : os.path.join(\n self.dirs[DKEY_HIST], \"comp_jetToTauFakeRate_%s_%s.root\" % (charge_selection, trigMatchingOption)),\n 'logFile' : os.path.join(\n self.dirs[DKEY_LOGS], \"comp_jetToTauFakeRate_%s_%s.log\" % (charge_selection, trigMatchingOption)),\n 'looseRegion' : \"jetToTauFakeRate_%s_%s/denominator/\" % (charge_selection, trigMatchingOption),\n 'tightRegion' : \"jetToTauFakeRate_%s_%s/numerator/\" % (charge_selection, trigMatchingOption),\n 'absEtaBins' : self.absEtaBins,\n 'ptBins' : self.ptBins,\n 'decayModes' : self.decayModes,\n 'hadTauSelections' : self.hadTau_selections_numerator,\n 'trigMatchingOption' : trigMatchingOption,\n 'plots_outputFileName' : os.path.join(self.dirs[key_comp_jetToTauFakeRate_dir][DKEY_PLOT], \"comp_jetToTauFakeRate_%s.png\" % trigMatchingOption)\n }\n self.createCfg_comp_jetToTauFakeRate(self.jobOptions_comp_jetToTauFakeRate[key_comp_jetToTauFakeRate_job])\n comp_output = self.jobOptions_comp_jetToTauFakeRate[key_comp_jetToTauFakeRate_job]['outputFile']\n self.targets.append(comp_output)\n self.comp_input_files[charge_key].append(comp_output)\n self.comp_output_files[charge_key] = os.path.join(self.dirs[DKEY_HIST], \"comp_jetToTauFakeRate_%s.root\" % charge_selection)\n\n logging.info(\"Creating configuration files to run 'makePlots'\")\n for charge_selection in self.charge_selections:\n key_hadd_stage2_job = getKey(charge_selection)\n key_makePlots_dir = getKey(\"makePlots\")\n key_makePlots_job = getKey(charge_selection) \n self.jobOptions_make_plots[key_makePlots_job] = {\n 'executable' : self.executable_make_plots,\n 'inputFile' : self.outputFile_hadd_stage2[key_hadd_stage2_job],\n 'cfgFile_modified' : os.path.join(\n self.dirs[key_makePlots_dir][DKEY_CFGS], \"makePlots_%s_cfg.py\" % self.channel),\n 'outputFile' : os.path.join(\n self.dirs[key_makePlots_dir][DKEY_PLOT], \"makePlots_%s.png\" % self.channel),\n 'histogramDir' : \"jetToTauFakeRate_%s\" % charge_selection,\n 'label' : None,\n 'make_plots_backgrounds' : self.make_plots_backgrounds\n }\n self.createCfg_makePlots(self.jobOptions_make_plots[key_makePlots_job])\n for trigMatchingOption in self.trigMatchingOptions:\n self.cfgFile_make_plots = self.cfgFile_make_plots_denominator\n for absEtaBin in [ \"absEtaLt1_5\", \"absEta1_5to9_9\" ]:\n key_hadd_stage2_job = getKey(charge_selection)\n key_makePlots_job = getKey(charge_selection, trigMatchingOption, absEtaBin, \"denominator\") \n self.jobOptions_make_plots[key_makePlots_job] = {\n 'executable' : self.executable_make_plots,\n 'inputFile' : self.outputFile_hadd_stage2[key_hadd_stage2_job],\n 'cfgFile_modified' : os.path.join(\n self.dirs[key_makePlots_dir][DKEY_CFGS], \"makePlots_%s_%s_%s_denominator_%s_cfg.py\" % \\\n (self.channel, charge_selection, trigMatchingOption, absEtaBin)),\n 'outputFile' : os.path.join(\n self.dirs[key_makePlots_dir][DKEY_PLOT], \"makePlots_%s_%s_%s_denominator_%s.png\" % (self.channel, charge_selection, trigMatchingOption, absEtaBin)),\n 'histogramDir' : \"jetToTauFakeRate_%s_%s/denominator/%s\" % (charge_selection, trigMatchingOption, absEtaBin),\n 'label' : None,\n 'make_plots_backgrounds' : self.make_plots_backgrounds\n }\n self.createCfg_makePlots(self.jobOptions_make_plots[key_makePlots_job])\n for hadTau_selection_numerator in self.hadTau_selections_numerator:\n key_hadd_stage2_job = getKey(charge_selection)\n key_makePlots_job = getKey(charge_selection, trigMatchingOption, absEtaBin, \"numerator\", hadTau_selection_numerator)\n self.jobOptions_make_plots[key_makePlots_job] = {\n 'executable' : self.executable_make_plots,\n 'inputFile' : self.outputFile_hadd_stage2[key_hadd_stage2_job],\n 'cfgFile_modified' : os.path.join(\n self.dirs[key_makePlots_dir][DKEY_CFGS], \"makePlots_%s_%s_%s_numerator_%s_%s_cfg.py\" % \\\n (self.channel, charge_selection, trigMatchingOption, hadTau_selection_numerator, absEtaBin)),\n 'outputFile' : os.path.join(\n self.dirs[key_makePlots_dir][DKEY_PLOT], \"makePlots_%s_%s_%s_numerator_%s_%s.png\" % \\\n (self.channel, charge_selection, trigMatchingOption, hadTau_selection_numerator, absEtaBin)),\n 'histogramDir' : \"jetToTauFakeRate_%s_%s/numerator/%s/%s\" % (charge_selection, trigMatchingOption, hadTau_selection_numerator, absEtaBin),\n 'label' : None,\n 'make_plots_backgrounds' : self.make_plots_backgrounds\n }\n self.createCfg_makePlots(self.jobOptions_make_plots[key_makePlots_job])\n\n self.sbatchFile_analyze = os.path.join(self.dirs[DKEY_SCRIPTS], \"sbatch_analyze_%s.py\" % self.channel)\n self.sbatchFile_comp_jetToTauFakeRate = os.path.join(self.dirs[DKEY_SCRIPTS], \"sbatch_comp_jetToTauFakeRate.py\")\n if self.is_sbatch:\n logging.info(\"Creating script for submitting '%s' jobs to batch system\" % self.executable_analyze)\n self.createScript_sbatch_analyze(self.executable_analyze, self.sbatchFile_analyze, self.jobOptions_analyze)\n logging.info(\"Creating script for submitting '%s' jobs to batch system\" % self.executable_comp_jetToTauFakeRate)\n self.createScript_sbatch(self.executable_comp_jetToTauFakeRate, self.sbatchFile_comp_jetToTauFakeRate, self.jobOptions_comp_jetToTauFakeRate)\n\n lines_makefile = []\n self.addToMakefile_analyze(lines_makefile)\n self.addToMakefile_hadd_stage1(lines_makefile)\n self.addToMakefile_hadd_stage2(lines_makefile, make_dependency = \"phony_hadd_stage1\", max_mem = '4096M')\n self.addToMakefile_comp_jetToTauFakeRate(lines_makefile)\n self.addToMakefile_comp_hadd(lines_makefile)\n self.addToMakefile_make_plots(lines_makefile)\n self.createMakefile(lines_makefile)\n\n logging.info(\"Done.\")\n\n return self.num_jobs", "def add_all_regions():\n gene_id = request.json['gene_id']\n panel_id = request.json['panel_id']\n tx_id = request.json['tx_id']\n gene_name = request.json['gene_name']\n project_id = get_project_id_by_panel_id(s, panel_id)\n\n add_preftxs_to_panel(s, project_id, [{\"gene\": gene_name, \"tx_id\": tx_id}, ])\n add_genes_to_panel_with_ext(s, panel_id, gene_id)\n return jsonify({\"genes\": [gene_id, ]})", "def initiate_new_key (self,key,index):\r\n\r\n #with shelf\r\n if self.using_shelf:\r\n\r\n\r\n self.key_dict[key] = {str(index)}\r\n\r\n #with database\r\n if self.using_database:\r\n\r\n value_tuple = (notebookname, key,)\r\n db_cursor.execute(\"INSERT OR REPLACE \"\r\n +\"INTO all_keys (keyword, notebook)\"\r\n +\" VALUES (?,?);\",\r\n value_tuple)\r\n value_tuple = (notebookname, key, str(index))\r\n db_cursor.execute(\"INSERT OR REPLACE \"\r\n +\"INTO keys_to_indexes\"\r\n +\" (notebook, keyword, note_index)\"\r\n +\" VALUES (?,?,?);\",\r\n value_tuple)", "def generate_keystream(self):", "def creating_nodes_for_regions(name_list, object_list, dict_of_neighbors_per_region):\n\n for name in name_list:\n new_node = Node(name) # Create an instance of Node\n object_list.append(new_node) # Adds to the object list\n # This for loop assigns to each Node its neighbors' dictionary\n for node in object_list:\n for region, neighbors in dict_of_neighbors_per_region.items():\n if node.name == region:\n node.neighbors = dict_of_neighbors_per_region[region]", "def keys(self, data, installer_context):", "def create_keypair(self, username):\n msg = \"create_keypair not implemented\"\n raise NotImplementedError(msg)", "def generateAllRegionVectors():\n\tregionVectors = []\n\tfor i in range(NUM_REGION_VECTORS):\n\t\tregionVectors.append('{0:04x}'.format(i))\n\treturn regionVectors", "def gen_parameters(self):\n\n print \"\\t* Adding parameters to compute template\"\n # get all the server client\n servers = self.novaclient.servers.list()\n\n # add all key_pair_names\n self.gen_key_name_parameters(servers)\n\n # add all images\n self.gen_image_parameters(servers)\n\n # add all flavors\n self.gen_flavor_parameters(servers)\n\n # add all networks\n self.gen_network_parameters()", "def create_pem_keys(self):\n self.random_rsa()\n\n return self.keys", "def _makeimap(self):\n self.map_['source'] = 'GOES'\n self.map_['provider'] = 'NOAA'\n self.map_['instrument'] = 'SUVI'\n self.map_['physobs'] = 'flux'", "def create_keypair(econfig_file=None, region=None, keyname=\"bcbio\"):\n import boto\n import boto.ec2\n if econfig_file:\n keypair_dir = os.path.dirname(econfig_file).replace(\"elasticluster\", \"aws_keypairs\")\n else:\n keypair_dir = os.path.join(os.getcwd(), \"aws_keypairs\")\n if not os.path.exists(keypair_dir):\n os.makedirs(keypair_dir)\n private_key = os.path.join(os.path.join(keypair_dir, keyname))\n new_key = not os.path.exists(private_key)\n if new_key:\n cmd = [\"ssh-keygen\", \"-t\", \"rsa\", \"-N\", \"\", \"-f\", private_key, \"-C\", \"bcbio_aws_keypair\"]\n subprocess.check_call(cmd)\n public_key = private_key + \".pub\"\n if region:\n ec2 = boto.ec2.connect_to_region(region)\n else:\n ec2 = boto.connect_ec2()\n key = ec2.get_key_pair(keyname)\n if key and new_key:\n print(\"Non matching key %s found in AWS, removing.\" % keyname)\n ec2.delete_key_pair(keyname)\n key = None\n if not key:\n print(\"Key %s not found in AWS, importing created key\" % keyname)\n with open(public_key) as in_handle:\n body = in_handle.read()\n try:\n ec2.import_key_pair(keyname, body)\n except TypeError as e:\n body = body.encode('utf-8')\n ec2.import_key_pair(keyname, body)\n return {\"user_key_name\": keyname, \"user_key_private\": private_key,\n \"user_key_public\": public_key}", "def init(x_in):\n global public_keys, secret_keys, x\n x = func.get_bits(x_in)\n\n public_keys, secret_keys = [], []\n\n elgamal.init_g_p_q()\n for i in range(3):\n create_keys(i)", "def copy_to_region(self, region):\r\n if region.name == self.region:\r\n raise BotoClientError('Unable to copy to the same Region')\r\n conn_params = self.connection.get_params()\r\n rconn = region.connect(**conn_params)\r\n kp = rconn.create_key_pair(self.name)\r\n return kp", "def createIndivitual(self) -> Dict[str, Any]:\n ind = {\n \"genome\": {\n key: numpy.random.randint(0, len(value), size=self.ref_count[key]) for (\n key, value) in self.grammar.items()\n },\n \"fitness\": None,\n \"fenotype\": None,\n }\n return ind", "def setup_units():\n # The number of tetrodes, probes, etc - any kind of grouping\n num_groups = 8\n\n # The region that each group belongs to\n regions = [\"SUB\"] * num_groups\n\n # A group number for each group, for example the tetrode number\n groups = [1, 2, 3, 4, 9, 10, 11, 12]\n\n output_dict = {\n \"num_groups\": num_groups,\n \"region\": regions,\n \"group\": groups,\n }\n\n return output_dict", "def generate_folds(self) -> list[dict]:\n pass", "def create_dicts():\n load_data_for_dict('data/atis/train/seq.in', 'data/atis/voc/vocabulary.json')\n load_data_for_dict('data/atis/valid/seq.in', 'data/atis/voc/vocabulary.json')\n load_data_for_dict('data/atis/test/seq.in', 'data/atis/voc/vocabulary.json') \n load_data_for_dict('data/atis/train/seq.out', 'data/atis/voc/slot_vocabulary.json')", "def _makeimap(self):\n self.map_['source'] = 'NAOJ'\n self.map_['provider'] = 'NRO'\n self.map_['instrument'] = 'NORH'\n self.map_['phyobs'] = ''", "def regions_dict(self):\n regions_dict = dict()\n for i, r in enumerate(self.regions):\n regions_dict[getattr(r, 'ix', i)] = r\n return regions_dict", "def gen_keys(lname,dsa=False):\n d = 'keys'\n if not os.path.isdir(d):\n os.mkdir(d)\n for n in lname:\n if not os.path.isfile('%s/%s.pem'%(d,n)):\n key = Crypto.PublicKey.DSA.generate(512, os.urandom) if dsa else Crypto.PublicKey.RSA.generate(1024,os.urandom)\n open('%s/%s.pem'%(d,n),'w').write(key.exportKey('PEM'))", "def create_keys(self, overwrite=None, out=None):\n # Find keys that have to be generated\n keys = self.policy_parser.get_keys(out)\n\n # Check whether keys exist\n if not overwrite:\n keys_exist = False\n for pair in keys:\n if pair.key_type is KeyType.signing:\n keys_exist = keys_exist | os.path.isfile(pair.json_key_path)\n keys_exist = keys_exist | os.path.isfile(pair.pem_key_path)\n elif pair.key_type is KeyType.encryption:\n keys_exist = keys_exist | os.path.isfile(pair.json_key_path)\n if keys_exist:\n if overwrite is None:\n answer = input('Keys directory is not empty. Overwrite? (y/n): ')\n if answer.lower() != 'y':\n return\n elif overwrite is False:\n return\n\n # Generate keys\n for pair in keys:\n if pair.key_type is KeyType.signing:\n args = [\n '--kid', pair.key_id,\n '--jwk', pair.json_key_path,\n '--pem-priv', pair.pem_key_path\n ]\n elif pair.key_type is KeyType.encryption:\n args = [\n '--aes', pair.json_key_path\n ]\n else:\n continue\n\n logger.debug(f'Starting key generation with arguments: {args}')\n try:\n keygen.main(args)\n except SystemExit as e:\n if e.code != 0:\n logger.error(f'An error occurred while running keygen with arguments: {args}')", "def gen_key_name_parameters(self, servers):\n\n self.set_of_keys = set(map(lambda server: server.key_name, servers))\n key_idx = \"\"\n for idx, key_pair in enumerate(self.set_of_keys):\n data = {\"type\": \"string\",\n \"description\": \"Name of keypair to assign to servers\",\n \"default\": key_pair}\n self.compute_data[\"parameters\"][\"key_name%s\" % key_idx] = data\n if len(self.set_of_keys) >= 1:\n key_idx = str(1+idx)", "def generatePolygons():", "def populate_instances(self):\n print \"Populating instances info...\"\n instances = self.get_all_instances()\n for i in instances:\n self.spreadsheet[i.id] = dict(Name_tag=self.get_name_tag(i), id=i.id, KEEP_tag=self.get_keep_tag(i),\n PROD_tag=self.is_production(i), instance_type=i.instance_type,\n state=i.state, launched=i.launch_time, region=i.region.name)", "def generate_signing_keys():\n return SigningKey.generate(curve=SECP256k1)", "def initpridict(cls):\n for i in range(len(clslist)):\n instcls = clslist[i]\n prilist = cls.pristage(instcls)\n configlist = cls.getConfigStages()\n tmpdict = dict()\n for j in range(len(configlist)):\n tmpdict.update(dict({configlist[j]: prilist[j]}))\n pridict.update(dict({instcls: tmpdict}))", "def create_infrastructure(aws_key, aws_secret):\n ec2_client, s3_client, iam_client, redshift_client = create_clients(\n aws_key, aws_secret\n )\n role_arn = create_iam_role(iam_client)\n create_redshift_cluster(redshift_client, role_arn)\n # Loop until the cluster status becomes \"Available\"\n status = \"\"\n while status.lower() != \"available\":\n cluster_properties = get_cluster_properties(redshift_client)\n status = cluster_properties['ClusterStatus']\n print('Cluster status is %s' % status)\n time.sleep(30)\n set_vpc_properties(ec2_client, cluster_properties['VpcId'])\n print_cluster_properties(redshift_client)", "def secondary_keys(self):", "def create(self):\n self.initialize()\n\n if not self.__keypair:\n logger.info('Creating keypair %s...' % self.keypair_settings.name)\n\n if self.keypair_settings.public_filepath and os.path.isfile(\n self.keypair_settings.public_filepath):\n logger.info(\"Uploading existing keypair\")\n self.__keypair = nova_utils.upload_keypair_file(\n self._nova, self.keypair_settings.name,\n self.keypair_settings.public_filepath)\n\n if self.keypair_settings.delete_on_clean is not None:\n delete_on_clean = self.keypair_settings.delete_on_clean\n self.__delete_keys_on_clean = delete_on_clean\n else:\n self.__delete_keys_on_clean = False\n else:\n logger.info(\"Creating new keypair\")\n keys = nova_utils.create_keys(self.keypair_settings.key_size)\n self.__keypair = nova_utils.upload_keypair(\n self._nova, self.keypair_settings.name,\n nova_utils.public_key_openssh(keys))\n file_utils.save_keys_to_files(\n keys, self.keypair_settings.public_filepath,\n self.keypair_settings.private_filepath)\n\n if self.keypair_settings.delete_on_clean is not None:\n delete_on_clean = self.keypair_settings.delete_on_clean\n self.__delete_keys_on_clean = delete_on_clean\n else:\n self.__delete_keys_on_clean = True\n elif self.__keypair and not os.path.isfile(\n self.keypair_settings.private_filepath):\n logger.warn(\"The public key already exist in OpenStack \\\n but the private key file is not found ..\")\n\n return self.__keypair", "def __enter__(self):\n self.key = self.make_new_key()\n self.sec_grp, self.group_id = self.make_new_group()\n\n printy(\"New key {} created\".format(self.key.name))\n printy(\"new security group {} created\".format(self.sec_grp.group_name))\n\n return self", "def island_procreate(self):\n for y in self.island_map:\n for cell in y:\n cell.procreate()", "def __init__(self):\n # better to be a prime number, less collision\n self.key_space = 2069\n self.hash_table = [Bucket() for i in range(self.key_space)]", "def _define_generators(self):\n\t\treturn {\n\t\t \"transaction_id\" : Mgcp._generate_uint32,\n\t\t \"connection_id\" : Mgcp._generate_uint32,\n\t\t \"request_id\" : Mgcp._generate_uint32,\n\t\t \"timestamp\" : Mgcp._generate_timestamp\n\t\t}", "def setKeys():\n keywords['c++'] = {}\n with open('cppkeywords.txt', 'r') as f:\n for i in f:\n i = i.strip('\\n')\n words = map(str, i.split())\n key = words[0]\n words.pop(0)\n keywords['c++'][key] = list(words)\n for j in words:\n MyDict.insert(j)\n keywords['py'] = {}\n with open('pykeywords.txt', 'r') as f:\n for i in f:\n i = i.strip('\\n')\n words = map(str, i.split())\n key = words[0]\n words.pop(0)\n keywords['py'][key] = list(words)\n for j in words:\n MyDict.insert(j)", "def add_key(self, key_list: list) -> None:\n\n for key, funct, desc in key_list:\n # Force keys to be lowercase\n key = key.lower()\n \n self.key_functs[key] = funct\n self.key_satified[key] = False\n self.key_description[key] = desc\n self.key_values[key] = None", "def generate_keymap(self):\n self.keymap.update({\n \"KEY_UP\":lambda: self.move_up(),\n \"KEY_DOWN\":lambda: self.move_down(),\n \"KEY_PAGEUP\":lambda: self.page_up(),\n \"KEY_PAGEDOWN\":lambda: self.page_down(),\n \"KEY_KPENTER\":lambda: self.select_entry(),\n \"KEY_ENTER\":lambda: self.select_entry()\n })\n if self.exitable:\n self.keymap[\"KEY_LEFT\"] = lambda: self.deactivate()", "def setup_cassiopeia(api_key, region=\"NA\"):\n\n cass.set_riot_api_key(api_key) # Set the api key for cassiopeia to use\n \n try:\n # Try to set the region of cassiopeia to our provided region\n cass.set_default_region(region)\n\n except:\n # Catch errors when the region is not valid, and print the valid list of \n # supported regions\n print(\"Region was not valid. Please enter a valid region from the list\", \n \"below:\")\n region_list = [\"NA\", \"BR\", \"EUNE\", \"LAN\", \"LAS\", \"OCE\", \"RU\", \"TR\", \"JP\", \n \"PH\", \"SG\", \"TW\", \"VN\", \"TH\", \"KR\", \"CN\", \"PBE\"]\n\n # Cycle through the region list and print each region with a dash in front\n for region in region_list:\n print(\"-\", region)\n\n #Exit the program, since there was error\n os._exit(1)", "def setup(self): \n self.suburbs_dict = dict()\n self.raw_proIds_dict = dict()\n self.propertyIds_dict = dict()\n self.valuations = dict()", "def create_all(self, change_dict={}):\n logging.info(\"#####################\")\n logging.info(\"Create objects\")\n logging.info(\"#####################\")\n\n # update according to argparse\n self.params_dict = self.maybe_modify_params_dict(self.params_dict, change_dict)\n\n # update according to general_setting in link_config\n if self.general_setting is not None:\n self.params_dict = self.maybe_modify_params_dict(\n self.params_dict, self.general_setting\n )\n\n self.maybe_assert_params_dict(self.params_dict)\n self.objects_dict = self.get_objects_dict(self.params_dict, self.link_config) \n return self.objects_dict", "def generate_keys(self, p, q, e):\n d = EucAlg(p, q)\n for i in d:\n if i == 0:\n raise Exception(\"p and q are not relatively prime.\")\n\n n = p*q\n phi_n = (p-1)*(q-1)\n d = EucAlg(e, phi_n)\n\n self._private_key = (d[0],n)\n self.public_key = (e,n)", "def create_init(dict, number_of_nodes):\n\n x_init = dict[\"x_init\"]\n u_init = dict[\"u_init\"]\n v_init = []\n for k in range(number_of_nodes - 1):\n v_init += x_init\n v_init += u_init\n if \"tf_init\" in dict:\n tf_init = dict[\"tf_init\"]\n v_init.append(tf_init)\n v_init += x_init\n\n return vertcat(*v_init)", "def create_param_grid ( param_grid: Dict ):\n \n return (\n dict ( zip ( param_grid.keys(), instance ) )\n for instance in product ( * param_grid.values() )\n ) # End create_param_grid", "def create_param_grid ( param_grid: Dict ):\n \n return (\n dict ( zip ( param_grid.keys(), instance ) )\n for instance in product ( * param_grid.values() )\n ) # End create_param_grid", "def createMap(self):\n map = {}\n for rows in xrange(0,(size[1]/50)):\n for columns in xrange(0,(size[0]/50)):\n if rows == (size[1]/50)-1 or rows == 0 or columns== (size[0]/50)-1 or columns==0:\n map.update({(rows,columns):\"block\"})\n elif(rows%3 == 0):\n map.update({(rows,columns):random.choice(map_options)})\n else:\n map.update({(rows,columns):random.choice(map_options[:1])})\n\n self.map = map", "def _construct_keytab(keytabs):\n temp_keytabs = []\n file_keytabs = []\n for kt_uri in keytabs:\n if kt_uri.startswith('zookeeper:'):\n zkpath = kt_uri[len('zookeeper:'):]\n with tempfile.NamedTemporaryFile(mode='wb', delete=False) as temp:\n ktab, _metadata = context.GLOBAL.zk.conn.get(zkpath)\n temp.write(ktab)\n temp_keytabs.append(temp.name)\n\n if kt_uri.startswith('file:'):\n file_keytabs.append(kt_uri[len('file:'):])\n\n kt_target = os.environ.get('KRB5_KTNAME')\n cmd_line = ['kt_add', kt_target] + temp_keytabs + file_keytabs\n subproc.check_call(cmd_line)\n\n for temp_keytab in temp_keytabs:\n fs.rm_safe(temp_keytab)", "def generatePreKeys(start, count):\n results = []\n start -= 1\n for i in range(0, count):\n preKeyId = ((start + i) % (Medium.MAX_VALUE - 1)) + 1\n results.append(PreKeyRecord(preKeyId, Curve.generateKeyPair()))\n\n return results", "def create(self):\n\n for sample_name, sample_info in self.samples.items():\n if not sample_info[\"use_it\"] or sample_info[\"sample_category\"] in [ \"additional_signal_overlap\", \"background_data_estimate\" ]:\n continue\n process_name = sample_info[\"process_name_specific\"]\n for charge_selection in self.charge_selections:\n key_dir = getKey(process_name, charge_selection)\n for dir_type in [ DKEY_CFGS, DKEY_HIST, DKEY_LOGS, DKEY_RLES ]:\n initDict(self.dirs, [ key_dir, dir_type ])\n if dir_type in [ DKEY_CFGS, DKEY_LOGS ]:\n self.dirs[key_dir][dir_type] = os.path.join(self.configDir, dir_type, self.channel,\n \"_\".join([ charge_selection ]), process_name)\n else:\n self.dirs[key_dir][dir_type] = os.path.join(self.outputDir, dir_type, self.channel,\n \"_\".join([ charge_selection ]), process_name)\n for dir_type in [ DKEY_CFGS, DKEY_SCRIPTS, DKEY_HIST, DKEY_LOGS, DKEY_DCRD, DKEY_PLOT, DKEY_HADD_RT ]:\n initDict(self.dirs, [ dir_type ])\n if dir_type in [ DKEY_CFGS, DKEY_SCRIPTS, DKEY_LOGS, DKEY_DCRD, DKEY_PLOT, DKEY_HADD_RT ]:\n self.dirs[dir_type] = os.path.join(self.configDir, dir_type, self.channel)\n else:\n self.dirs[dir_type] = os.path.join(self.outputDir, dir_type, self.channel)\n ##print \"self.dirs = \", self.dirs\n\n for key in self.dirs.keys():\n if type(self.dirs[key]) == dict:\n for dir_type in self.dirs[key].keys():\n create_if_not_exists(self.dirs[key][dir_type])\n else:\n create_if_not_exists(self.dirs[key])\n\n inputFileLists = {}\n for sample_name, sample_info in self.samples.items():\n if not sample_info[\"use_it\"] or sample_info[\"sample_category\"] in [ \"additional_signal_overlap\", \"background_data_estimate\" ]:\n continue\n logging.info(\"Checking input files for sample %s\" % sample_info[\"process_name_specific\"])\n inputFileLists[sample_name] = generateInputFileList(sample_name, sample_info, self.max_files_per_job, self.debug)\n \n self.inputFileIds = {}\n for sample_name, sample_info in self.samples.items():\n if not sample_info[\"use_it\"] or sample_info[\"sample_category\"] in [ \"additional_signal_overlap\", \"background_data_estimate\" ]:\n continue\n\n process_name = sample_info[\"process_name_specific\"]\n\n logging.info(\"Creating configuration files to run '%s' for sample %s\" % (self.executable_analyze, process_name)) \n\n is_mc = (sample_info[\"type\"] == \"mc\")\n lumi_scale = 1. if not (self.use_lumi and is_mc) else sample_info[\"xsection\"] * self.lumi / sample_info[\"nof_events\"]\n apply_genWeight = sample_info[\"apply_genWeight\"] if (is_mc and \"apply_genWeight\" in sample_info.keys()) else False\n sample_category = sample_info[\"sample_category\"]\n triggers = sample_info[\"triggers\"]\n apply_trigger_bits = (is_mc and (self.era == \"2015\" or (self.era == \"2016\" and sample_info[\"reHLT\"]))) or not is_mc\n\n for charge_selection in self.charge_selections:\n for central_or_shift in self.central_or_shifts:\n\n inputFileList = inputFileLists[sample_name]\n for jobId in inputFileList.keys():\n if central_or_shift != \"central\" and not is_mc:\n continue\n if central_or_shift.startswith(\"CMS_ttHl_thu_shape_ttH\") and sample_category != \"signal\":\n continue\n if central_or_shift.startswith(\"CMS_ttHl_thu_shape_ttW\") and sample_category != \"TTW\":\n continue\n if central_or_shift.startswith(\"CMS_ttHl_thu_shape_ttZ\") and sample_category != \"TTZ\":\n continue\n\n # build config files for executing analysis code\n key_dir = getKey(process_name, charge_selection)\n key_analyze_job = getKey(process_name, charge_selection, central_or_shift, jobId)\n\n ntupleFiles = inputFileList[jobId]\n if len(ntupleFiles) == 0:\n print \"Warning: ntupleFiles['%s'] = %s --> skipping job !!\" % (key_job, ntupleFiles)\n continue\n self.jobOptions_analyze[key_analyze_job] = {\n 'ntupleFiles' : ntupleFiles,\n 'cfgFile_modified' : os.path.join(self.dirs[key_dir][DKEY_CFGS], \"analyze_%s_%s_%s_%s_%i_cfg.py\" % \\\n (self.channel, process_name, charge_selection, central_or_shift, jobId)),\n 'histogramFile' : os.path.join(self.dirs[key_dir][DKEY_HIST], \"%s_%s_%s_%i.root\" % \\\n (process_name, charge_selection, central_or_shift, jobId)),\n 'logFile' : os.path.join(self.dirs[key_dir][DKEY_LOGS], \"analyze_%s_%s_%s_%s_%i.log\" % \\\n (self.channel, process_name, charge_selection, central_or_shift, jobId)),\n 'sample_category' : sample_category,\n 'triggers' : sample_info[\"triggers\"],\n 'charge_selection' : charge_selection,\n 'jet_minPt' : self.jet_minPt,\n 'jet_maxPt' : self.jet_maxPt,\n 'jet_minAbsEta' : self.jet_minAbsEta,\n 'jet_maxAbsEta' : self.jet_maxAbsEta,\n 'hadTau_selection_denominator' : self.hadTau_selection_denominator,\n 'hadTau_selections_numerator' : self.hadTau_selections_numerator,\n 'absEtaBins' : self.absEtaBins,\n ##'use_HIP_mitigation_mediumMuonId' : sample_info[\"use_HIP_mitigation_mediumMuonId\"],\n 'use_HIP_mitigation_mediumMuonId' : True,\n 'is_mc' : is_mc,\n 'central_or_shift' : central_or_shift,\n 'lumi_scale' : 1. if not (self.use_lumi and is_mc) else sample_info[\"xsection\"] * self.lumi / sample_info[\"nof_events\"],\n 'apply_genWeight' : sample_info[\"genWeight\"] if (is_mc and \"genWeight\" in sample_info.keys()) else False,\n 'apply_trigger_bits' : (is_mc and (self.era == \"2015\" or (self.era == \"2016\" and sample_info[\"reHLT\"]))) or not is_mc,\n }\n self.createCfg_analyze(self.jobOptions_analyze[key_analyze_job])\n\n # initialize input and output file names for hadd_stage1\n key_hadd_stage1 = getKey(process_name, charge_selection)\n if not key_hadd_stage1 in self.inputFiles_hadd_stage1:\n self.inputFiles_hadd_stage1[key_hadd_stage1] = []\n self.inputFiles_hadd_stage1[key_hadd_stage1].append(self.jobOptions_analyze[key_analyze_job]['histogramFile'])\n self.outputFile_hadd_stage1[key_hadd_stage1] = os.path.join(self.dirs[DKEY_HIST], \"histograms_harvested_stage1_%s_%s_%s.root\" % \\\n (self.channel, process_name, charge_selection))\n\n # initialize input and output file names for hadd_stage2\n key_hadd_stage1 = getKey(process_name, charge_selection)\n key_hadd_stage2 = getKey(charge_selection)\n if not key_hadd_stage2 in self.inputFiles_hadd_stage2:\n self.inputFiles_hadd_stage2[key_hadd_stage2] = []\n self.inputFiles_hadd_stage2[key_hadd_stage2].append(self.outputFile_hadd_stage1[key_hadd_stage1])\n self.outputFile_hadd_stage2[key_hadd_stage2] = os.path.join(self.dirs[DKEY_HIST], \"histograms_harvested_stage2_%s_%s.root\" % \\\n (self.channel, charge_selection))\n\n logging.info(\"Creating configuration files for executing 'comp_jetToTauFakeRate'\")\n for charge_selection in self.charge_selections:\n key_comp_jetToTauFakeRate_job = getKey(charge_selection)\n key_hadd_stage2 = getKey(charge_selection)\n self.jobOptions_comp_jetToTauFakeRate[key_comp_jetToTauFakeRate_job] = {\n 'inputFile' : self.outputFile_hadd_stage2[key_hadd_stage2],\n 'cfgFile_modified' : os.path.join(\n self.dirs[DKEY_CFGS], \"comp_jetToTauFakeRate_%s_cfg.py\" % charge_selection),\n 'outputFile' : os.path.join(\n self.dirs[DKEY_HIST], \"comp_jetToTauFakeRate_%s.root\" % charge_selection),\n 'logFile' : os.path.join(\n self.dirs[DKEY_LOGS], \"comp_jetToTauFakeRate_%s.log\" % charge_selection),\n 'looseRegion' : \"jetToTauFakeRate_%s/denominator/\" % charge_selection,\n 'tightRegion' : \"jetToTauFakeRate_%s/numerator/\" % charge_selection,\n 'absEtaBins' : self.absEtaBins,\n 'ptBins' : self.ptBins\n }\n self.createCfg_comp_jetToTauFakeRate(self.jobOptions_comp_jetToTauFakeRate[key_comp_jetToTauFakeRate_job])\n self.targets.append(self.jobOptions_comp_jetToTauFakeRate[key_comp_jetToTauFakeRate_job]['outputFile'])\n\n logging.info(\"Creating configuration files to run 'makePlots'\")\n for charge_selection in self.charge_selections:\n key_makePlots_job = getKey(charge_selection)\n key_hadd_stage2 = getKey(charge_selection)\n self.jobOptions_make_plots[key_makePlots_job] = {\n 'executable' : self.executable_make_plots,\n 'inputFile' : self.outputFile_hadd_stage2[key_hadd_stage2],\n 'cfgFile_modified' : os.path.join(\n self.dirs[DKEY_CFGS], \"makePlots_%s_cfg.py\" % self.channel),\n 'outputFile' : os.path.join(\n self.dirs[DKEY_PLOT], \"makePlots_%s.png\" % self.channel),\n 'histogramDir' : \"jetToTauFakeRate_%s\" % charge_selection,\n 'label' : None,\n 'make_plots_backgrounds' : [ \"TT\", \"TTW\", \"TTZ\", \"EWK\", \"Rares\" ],\n }\n self.createCfg_makePlots(self.jobOptions_make_plots[key_makePlots_job])\n self.cfgFile_make_plots = self.cfgFile_make_plots_denominator\n for absEtaBin in [ \"absEtaLt1_5\", \"absEta1_5to9_9\" ]:\n key_makePlots_job = getKey(charge_selection, absEtaBin, \"denominator\")\n key_hadd_stage2 = getKey(charge_selection)\n self.jobOptions_make_plots[key_makePlots_job] = {\n 'executable' : self.executable_make_plots,\n 'inputFile' : self.outputFile_hadd_stage2[key_hadd_stage2],\n 'cfgFile_modified' : os.path.join(\n self.dirs[DKEY_CFGS], \"makePlots_%s_%s_denominator_%s_cfg.py\" % (self.channel, charge_selection, absEtaBin)),\n 'outputFile' : os.path.join(\n self.dirs[DKEY_PLOT], \"makePlots_%s_%s_denominator_%s.png\" % (self.channel, charge_selection, absEtaBin)),\n 'histogramDir' : \"jetToTauFakeRate_%s/denominator/%s\" % (charge_selection, absEtaBin),\n 'label' : None,\n 'make_plots_backgrounds' : [ \"TT\", \"TTW\", \"TTZ\", \"EWK\", \"Rares\" ],\n }\n self.createCfg_makePlots(self.jobOptions_make_plots[key_makePlots_job])\n for hadTau_selection_numerator in self.hadTau_selections_numerator:\n key_makePlots_job = getKey(charge_selection, absEtaBin, \"numerator\", hadTau_selection_numerator)\n key_hadd_stage2 = getKey(charge_selection)\n self.jobOptions_make_plots[key_makePlots_job] = {\n 'executable' : self.executable_make_plots,\n 'inputFile' : self.outputFile_hadd_stage2[key_hadd_stage2],\n 'cfgFile_modified' : os.path.join(\n self.dirs[DKEY_CFGS], \"makePlots_%s_%s_numerator_%s_%s_cfg.py\" % (self.channel, charge_selection, hadTau_selection_numerator, absEtaBin)),\n 'outputFile' : os.path.join(\n self.dirs[DKEY_PLOT], \"makePlots_%s_%s_numerator_%s_%s.png\" % (self.channel, charge_selection, hadTau_selection_numerator, absEtaBin)),\n 'histogramDir' : \"jetToTauFakeRate_%s/numerator/%s/%s\" % (charge_selection, hadTau_selection_numerator, absEtaBin),\n 'label' : None,\n 'make_plots_backgrounds' : [ \"TT\", \"TTW\", \"TTZ\", \"EWK\", \"Rares\" ],\n }\n self.createCfg_makePlots(self.jobOptions_make_plots[key_makePlots_job])\n\n if self.is_sbatch:\n logging.info(\"Creating script for submitting '%s' jobs to batch system\" % self.executable_analyze)\n self.sbatchFile_analyze = os.path.join(self.dirs[DKEY_SCRIPTS], \"sbatch_analyze_%s.py\" % self.channel)\n self.createScript_sbatch_analyze(self.executable_analyze, self.sbatchFile_analyze, self.jobOptions_analyze)\n logging.info(\"Creating script for submitting '%s' jobs to batch system\" % self.executable_comp_jetToTauFakeRate)\n self.sbatchFile_comp_jetToTauFakeRate = os.path.join(self.dirs[DKEY_SCRIPTS], \"sbatch_comp_jetToTauFakeRate.py\")\n self.createScript_sbatch(self.executable_comp_jetToTauFakeRate, self.sbatchFile_comp_jetToTauFakeRate, self.jobOptions_comp_jetToTauFakeRate)\n\n lines_makefile = []\n self.addToMakefile_analyze(lines_makefile)\n self.addToMakefile_hadd_stage1(lines_makefile)\n self.addToMakefile_hadd_stage2(lines_makefile)\n self.addToMakefile_comp_jetToTauFakeRate(lines_makefile)\n self.addToMakefile_make_plots(lines_makefile)\n self.createMakefile(lines_makefile)\n \n logging.info(\"Done\")", "def generate_all_locations(grid, shape):", "def _generate_table(self):\n for i in xrange(32):\n dest = [0]\n gw = [0]\n self._table.append(\n {'destination': dest, 'gateway': gw}\n )", "def pre_config_root_create(self, resource_dict):\n pass", "def build(self):\n states = WOFRegion.query.filter(WOFRegion.country_iso=='US')\n\n logger.info('Indexing US states.')\n\n for row in tqdm(states):\n\n # Key -> id(s)\n for key in map(keyify, state_key_iter(row)):\n self.add_key(key, row.wof_id)\n\n # ID -> state\n self.add_location(row.wof_id, StateMatch(row))", "def generate_keys(self):\n\n\t\tmin_ext = 1 << self.size_ext - 1\n\t\tmax_ext = 1 << self.size_ext\n\t\t\n\t\t\t\n\t\t# step 1 : chose random primary numbers p and q\n\t\tn = generate_prime(self.min_bound,self.max_bound)\n\t\tself._p = n\n\t\tn = generate_prime(self.min_bound,self.max_bound)\n\t\twhile(n == self._p):\n\t\t\tn = generate_prime(self.min_bound,self.max_bound)\n\t\tself._q = n\n\n\t\t#step 2 : compute n = pq\n\t\tself.n = self._p * self._q\n\n\t\t#step 3 : compute phi(n)\n\t\tself._phi = (self._p - 1) * (self._q - 1)\n\n\t\t#step 4 : chose the exponent\n\t\textension = randint(min_ext,max_ext) << math.ceil(math.log2(self.D))\n\t\textension = extension + self.D\n\t\twhile (gcd(self._phi,n) != 1):\n\t\t\textension = randint(min_ext,max_ext) << math.ceil(math.log2(self.D))\n\t\tself._d = extension\n\n\t\t#step 5 : compute d (private key)\n\t\tself.e = euclide_algorithm(self._d, self._phi)[\"U\"] % self._phi\n\n\t\tprint(\"p = \", self._p)\n\t\tprint(\"q = \", self._q)\n\t\tprint(\"d = \", self._d)", "def get_parkey_map(self):\n pkmap = {}\n for selection in self.selections.normal_values():\n for parkey, choices in selection.get_parkey_map().items():\n if parkey not in pkmap:\n pkmap[parkey] = set()\n pkmap[parkey] |= set(choices)\n for parkey, choices in pkmap.items():\n pkmap[parkey] = list(pkmap[parkey])\n if \"CORR\" not in parkey:\n pkmap[parkey].sort()\n return pkmap", "def make_cache_keys(self, identifiers):\n\n raise NotImplementedError", "def populate(self):\n\n NUM_COUNTRIES = 2 # random.randint(1, 4)\n\n # find a suitable hex\n with Timer(\"Creating initial data\", debug=self.debug):\n\n for i in range(NUM_COUNTRIES):\n country, provinces, pops = create_country(self, self.map)\n country.determine_tax_policy()\n self.countries.append(country)", "def _prepare_encryption_table():\n seed = 0x00100001\n crypt_table = {}\n\n for i in range(256):\n index = i\n for j in range(5):\n seed = (seed * 125 + 3) % 0x2AAAAB\n temp1 = (seed & 0xFFFF) << 0x10\n\n seed = (seed * 125 + 3) % 0x2AAAAB\n temp2 = (seed & 0xFFFF)\n\n crypt_table[index] = (temp1 | temp2)\n\n index += 0x100\n\n return crypt_table", "def _build_keys(self, slug, date=None, granularity='all'):\n slug = slugify(slug) # Ensure slugs have a consistent format\n if date is None:\n date = datetime.utcnow()\n patts = self._build_key_patterns(slug, date)\n if granularity == \"all\":\n return list(patts.values())\n return [patts[granularity]]", "def create_city():\n city = {}\n city['biysk'] = {}\n city['biysk']['barnaul'] = 9\n city['biysk']['novosibirsk'] = 11\n city['biysk']['belokurikha'] = 8\n city['barnaul'] = {}\n city['barnaul']['tomsk'] = 4\n city['belokurikha'] = {}\n city['belokurikha']['novosibirsk'] = 2\n city['novosibirsk'] = {}\n city['novosibirsk']['barnaul'] = 2\n city['novosibirsk']['tomsk'] = 5\n city['novosibirsk']['omsk'] = 20\n city['tomsk'] = {}\n city['tomsk']['krasnoyarsk'] = 6\n city['krasnoyarsk'] = {}\n city['krasnoyarsk']['omsk'] = 7\n city['omsk'] = {}\n return city", "def make_doi_regions(author_list: AuthorList):\n\n regions = {}\n for author in author_list:\n inst = author.institution\n if inst.region not in regions:\n regions[inst.region] = {\n \"identifier\": inst.region,\n \"name\": inst.region,\n \"types\": [\"Region\"],\n \"country\": None,\n \"country_code\": None,\n \"country_code_2\": None,\n \"region\": inst.region,\n \"subregion\": None,\n \"coordinates\": None,\n \"count\": 0,\n \"members\": {inst.subregion},\n \"rors\": {inst.ror_id},\n }\n else:\n regions[inst.region][\"members\"].add(inst.subregion)\n regions[inst.region][\"rors\"].add(inst.ror_id)\n\n return to_affiliations_list(regions)", "def create_all_tables(self):\n pass" ]
[ "0.65445393", "0.6132496", "0.60415924", "0.5918604", "0.59134144", "0.587338", "0.5822073", "0.5657623", "0.5650972", "0.5635661", "0.5604821", "0.5563397", "0.5509014", "0.5504165", "0.5494566", "0.54587173", "0.5435119", "0.54258394", "0.54082614", "0.5393367", "0.5384728", "0.5372961", "0.532285", "0.532167", "0.5318754", "0.5316679", "0.5307809", "0.5272147", "0.5269188", "0.52466214", "0.52363205", "0.5223402", "0.5215443", "0.51926243", "0.5184496", "0.51683277", "0.514504", "0.51122534", "0.5108816", "0.50966984", "0.50898886", "0.50830954", "0.50724626", "0.50673836", "0.50657773", "0.50556076", "0.50555104", "0.5043245", "0.50422174", "0.5040133", "0.50392073", "0.50360715", "0.50231916", "0.5020617", "0.5014766", "0.5013766", "0.50058293", "0.49807742", "0.49762323", "0.49747193", "0.49743047", "0.49733374", "0.49717516", "0.49693236", "0.49679178", "0.4966886", "0.49496076", "0.4942623", "0.4939805", "0.49359784", "0.4932408", "0.4928857", "0.49159712", "0.49140924", "0.49041948", "0.49033737", "0.48963067", "0.4893164", "0.48931354", "0.4890649", "0.4889567", "0.4888141", "0.4888141", "0.48857135", "0.48853007", "0.48819226", "0.48785236", "0.48771298", "0.48726442", "0.48700243", "0.48671094", "0.48634368", "0.48578343", "0.48495153", "0.48452428", "0.4833018", "0.48315683", "0.482938", "0.48232457", "0.48183456" ]
0.7191877
0
Constructor for the ManagementAccessToken class
def __init__(self, access_token=None, token_type=None, error=None): # Initialize members of the class self.access_token = access_token self.token_type = token_type self.error = error
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __init__(self, access_token):\n self.access_token = access_token", "def __init__(self, access_token=None):\n self.access_token = access_token", "def __init__(self, access_token):\n self._access_token = access_token", "def __init__(self, access_token_cache, account_id, credentials):\n super(AccessTokenStore, self).__init__(lock=None)\n self._access_token_cache = access_token_cache\n self._account_id = account_id\n self._credentials = credentials", "def __init__(self, access_token, endpoint='/me',\r\n version='2.5'):\r\n self.access_token = access_token\r\n self.endpoint = endpoint", "def __init__(self, token_path):\n self._accessToken = None\n self._tokenPath = token_path", "def __init__(self, host='192.168.45.45', username='admin', password='Admin123', verify_cert=False):\n logging.debug(\"In the Token __init__() class method.\")\n\n self.__host = host\n self.__username = username\n self.__password = password\n self.verify_cert = verify_cert\n self.token_expiry = None\n self.token_refreshes = 0\n self.access_token = None\n self.uuid = None\n self.refresh_token = None\n self.generate_tokens()", "def __init__(self, oauth_consumer_token=None, oauth_access_token=None):\n self.consumer_token = oauth_consumer_token\n self.access_token = oauth_access_token", "def __init__(self, access_token=None, access_token_secret=None, consumer_key=None, consumer_secret=None, header_auth=None):\r\n if access_token is not None:\r\n self.token = Token(access_token, access_token_secret)\r\n else:\r\n self.token = None\r\n\r\n if consumer_key is None and consumer_secret is None:\r\n consumer_key = self.consumer_key\r\n consumer_secret = self.consumer_secret\r\n\r\n if header_auth is not None:\r\n self.header_auth = header_auth\r\n\r\n self.consumer = Consumer(consumer_key, consumer_secret)", "def __init__(self, consumer_key, consumer_secret, access_token,\n access_token_secret, **kwargs):\n self.consumer_key = consumer_key\n self.consumer_secret = consumer_secret\n self.access_token = access_token\n self.access_token_secret = access_token_secret\n super().__init__(**kwargs)", "def __init__(self):\n self.authurl = Config().auth\n self.baseurl = Config().api\n self.s = Session()\n self.s.headers = {'Accept': 'application/json'}\n data = {\"grant_type\": \"client_credentials\", \"scope\": \"/read-public\", \"client_id\": Config().client_id,\n \"client_secret\": Config().client_secret}\n r = self.s.request(method=\"post\", url=self.authurl, data=data)\n self.s.headers = {'Accept': 'application/json', \"Access token\": r.json()[\"access_token\"]}", "def __call__(self, access_token):", "def __init__(self, access_token=None):\r\n self.access_token = access_token\r\n\r\n self.add_filter(self.add_auth)", "def __init__(self, access_key, secret_key, **kwargs):\r\n pass", "def __init__(self):\n self.application_id = None\n self.secret = None\n self.token = {}", "def __init__(self, client_id=None, client_secret=None):\n self.client_id = client_id\n self.client_secret = client_secret\n self.access_token = None\n self.refresh_token = None\n self.token_expiration_time = None", "def __init__(self, metadata_service):\n\n super(MetadataCredentials, self).__init__(\n access_token=metadata_service.auth_token,\n client_id=None,\n client_secret=None,\n refresh_token=None,\n token_expiry=None,\n token_uri=None,\n user_agent=None\n )\n self._metadata_service = metadata_service", "def __init__(self, access_token, app_id, timeout=5):\n self._timeout = timeout\n\n if isinstance(access_token, basestring):\n self._access_token = str(access_token)\n else:\n raise TypeError(\"Access token should be a string\")\n\n if isinstance(app_id, (int,long)):\n self._app_id = app_id\n else:\n raise TypeError(\"App ID should be an integer number\")\n\n self.uid = None\n self.uid, self.email, self.passwd = self._create_uid()\n self.session = self._auth_session()", "def __init__(self, client_id: str, client_secret: str, access_token_publish_url: str, access_token: str = None):\n\n self.client_id = client_id\n self.client_secret = client_secret\n self.access_token_publish_url = access_token_publish_url\n self.api_base_url = 'https://api.ce-cotoha.com/api/dev/'\n\n if access_token is not None:\n self.access_token = access_token\n else:\n self.access_token = self.update_access_token()", "def __init__(self, authenticator, access_token, expires_in, scope):\n super(ImplicitAuthorizer, self).__init__(authenticator)\n self._expiration_timestamp = time.time() + expires_in\n self.access_token = access_token\n self.scopes = set(scope.split(' '))", "def __init__(self, name=None, auth_token_provider_title=None, auth_token_provider_default_claims=None, auth_token_provider_endpoint=None, auth_access_token_request=None, auth_token_provider_keypair_alias=None, auth_token_provider_conn_timeout=None, auth_token_provider_so_timeout=None, auth_token_provider_client_id=None, auth_token_provider_scope=None, auth_token_provider_reuse_access_token=None, auth_token_provider_relaxed_ssl=None, token_request_customizer_type=None, auth_token_validator_type=None): # noqa: E501 # noqa: E501\n\n self._name = None\n self._auth_token_provider_title = None\n self._auth_token_provider_default_claims = None\n self._auth_token_provider_endpoint = None\n self._auth_access_token_request = None\n self._auth_token_provider_keypair_alias = None\n self._auth_token_provider_conn_timeout = None\n self._auth_token_provider_so_timeout = None\n self._auth_token_provider_client_id = None\n self._auth_token_provider_scope = None\n self._auth_token_provider_reuse_access_token = None\n self._auth_token_provider_relaxed_ssl = None\n self._token_request_customizer_type = None\n self._auth_token_validator_type = None\n self.discriminator = None\n\n if name is not None:\n self.name = name\n if auth_token_provider_title is not None:\n self.auth_token_provider_title = auth_token_provider_title\n if auth_token_provider_default_claims is not None:\n self.auth_token_provider_default_claims = auth_token_provider_default_claims\n if auth_token_provider_endpoint is not None:\n self.auth_token_provider_endpoint = auth_token_provider_endpoint\n if auth_access_token_request is not None:\n self.auth_access_token_request = auth_access_token_request\n if auth_token_provider_keypair_alias is not None:\n self.auth_token_provider_keypair_alias = auth_token_provider_keypair_alias\n if auth_token_provider_conn_timeout is not None:\n self.auth_token_provider_conn_timeout = auth_token_provider_conn_timeout\n if auth_token_provider_so_timeout is not None:\n self.auth_token_provider_so_timeout = auth_token_provider_so_timeout\n if auth_token_provider_client_id is not None:\n self.auth_token_provider_client_id = auth_token_provider_client_id\n if auth_token_provider_scope is not None:\n self.auth_token_provider_scope = auth_token_provider_scope\n if auth_token_provider_reuse_access_token is not None:\n self.auth_token_provider_reuse_access_token = auth_token_provider_reuse_access_token\n if auth_token_provider_relaxed_ssl is not None:\n self.auth_token_provider_relaxed_ssl = auth_token_provider_relaxed_ssl\n if token_request_customizer_type is not None:\n self.token_request_customizer_type = token_request_customizer_type\n if auth_token_validator_type is not None:\n self.auth_token_validator_type = auth_token_validator_type", "def __init__(self, token, api_key):\n self.token = token\n self.api_key = api_key", "def __init__(self, username, passwordresettoken, passwordresetexpires, password, email, phone, facebook, google, linkin, group_id):\n\n self.username = username\n self.passwordresettoken = passwordresettoken\n self.passwordresetexpires = passwordresetexpires\n self.password = password\n self.email = email\n self.phone = phone\n self.facebook = facebook\n self.google = google\n self.linkin = linkin\n self.createdate = datetime.now()\n self.updatedate = datetime.now()\n self.group_id = group_id", "def __init__(self, token, arn) -> None:\n self.token = token\n self.arn = arn", "def __init__(self, access_token=None):\r\n self.access_token = access_token\r\n\r\n self.add_filter(self.add_auth)\r\n self.add_filter(self.set_format)", "def __init__(self):\n\n self._authorization = None\n self._last_used = datetime.utcnow() - timedelta(hours=10)\n\n self._resource_owner_key = None\n self._resource_owner_secret = None\n\n self._consumer_key = etrade_config.oauth_consumer_key\n self._consumer_secret = etrade_config.oath_consumer_secret\n\n self._auth_file_path = etrade_config.auth_file_path\n self._user_name = etrade_config.user_name\n self._user_pwd = etrade_config.user_pwd", "def __init__(\n self,\n uri,\n audience,\n token_type,\n get_token,\n **kwargs\n ):\n self.sasl = SASLAnonymousCredential()\n self.uri = uri\n self.audience = audience\n self.token_type = token_type\n self.get_token = get_token\n self.expires_in = kwargs.pop(\"expires_in\", AUTH_DEFAULT_EXPIRATION_SECONDS)\n self.expires_on = kwargs.pop(\"expires_on\", None)", "def __init__(self, setting):\n self.setting.update(setting)\n self.base = (self.setting.get('base') + '/').lower()\n self.client = Box(self.setting.get('access_token'))\n self.client.users_get_current_account()", "def __init__(self, username, password, referrer, expiration=60,\n root_uri=_DEFAULT_ROOT_URI):\n self.username = username\n self.password = password\n self.expiration = expiration\n self.referrer = referrer\n self.root_uri = root_uri\n self._token = None\n self._request_token()", "def __init__(self, token, connection):\n self.__token = token\n self.__connection = connection\n self.__message = None\n self.__admin_id = None\n self.__username = None", "def __init__(self, access_token, base_url=\"https://api.crowdstrike.com\"):\n self.headers = {'Authorization': 'Bearer {}'.format(access_token)}\n self.base_url = base_url", "def __init__(self, authenticator):\n self._authenticator = authenticator\n self._clear_access_token()\n self._validate_authenticator()", "def __init__(self,consumer_key,consumer_secret):\n\t\tself.CONSUMER_KEY = consumer_key\n\t\tself.CONSUMER_SECRET = consumer_secret\n\t\tself.token=None\n\t\tself.secret=None\n\t\tself.resource_owner_key=None\n\t\tself.resource_owner_secret=None", "def __init__(self, authtoken, organization_id):\n self.headers = {\n 'Authorization': 'Zoho-oauthtoken ' + authtoken,\n }\n self.details = {\n 'organization_id': organization_id\n }", "def __init__(self, api_token):\n self.api_token = api_token", "def __init__(self, access_token, base_url='https://api.crowdstrike.com'):\n self.headers = { 'Authorization': 'Bearer {}'.format(access_token) }\n self.base_url = base_url", "def __init__(self, access_token=None):\r\n self.access_token = access_token\r\n\r\n self.add_filter(self.add_auth)\r\n self.add_filter(self.use_json)", "def __init__(self, access_token=None):\r\n self.access_token = access_token\r\n\r\n self.add_filter(self.add_auth)\r\n self.add_filter(self.use_json)", "def __init__(self):\n self.api = Api(consumer_key=credentials[\"consumer_key\"],\n consumer_secret=credentials[\"consumer_secret\"],\n access_token_key=credentials[\"access_token_key\"],\n access_token_secret=credentials[\"access_token_secret\"])", "def __init__(self):\n self.token = None\n self.login()", "def __init__(self, token):\n\n self.token = token", "def __init__(self, password, mfa_token=None):\n self._password = password\n self._mfa_token = mfa_token", "def __init__(self, client_id, token, scope=[\"activity\", \"heartrate\", \"location\", \"nutrition\", \"profile\", \"settings\", \"sleep\", \"social\", \"weight\"]):\n\n\t\tif token['access_token'] == \"\":\n\t\t\t# We need to fetch a token for the user.\n\t\t\tprint(\"Note: looks like we don't have an access token yet. Let's fetch one.\")\n\n\t\t\tself.client = MobileApplicationClient(client_id)\n\t\t\tself.fitbit = OAuth2Session(client_id, client=self.client, scope=scope)\n\n\t\t\tauthorization_base_url = \"https://www.fitbit.com/oauth2/authorize\"\n\n\t\t\tauthorization_url, state = self.fitbit.authorization_url(authorization_base_url)\n\n\t\t\tprint(\"Please go to the following authorization URL: {}\".format(authorization_url))\n\n\t\t\traw_callback_url = input(\"Paste callback URL you get back here: \")\n\n\t\t\tself.fitbit.token_from_fragment(raw_callback_url)\n\t\t\tself.token = self.fitbit.token['access_token']\n\n\t\t\tprint(self.fitbit.token)\n\n\t\telse:\n\t\t\t# We've got an access token, and we'll use it.\n\t\t\tself.client = MobileApplicationClient(client_id)\n\t\t\tself.fitbit = OAuth2Session(client_id, client=self.client, scope=scope, token=token)\n\t\t\tself.token = token['access_token']", "def __init__(self, authorization_url, token_url, token_refresh_url, client_id, xapi_key,\n local_server_address=LOCAL_OAUTH_SERVER_URL):\n\n # Grab the client info needed\n self.local_server_address = local_server_address\n self.authorization_url = authorization_url\n self.token_url = token_url\n self.token_refresh_url = token_refresh_url\n self.client_id = client_id\n self.xapi_key = xapi_key", "def __init__(self, host='localhost', password='agent'):\n self._host = host\n self._password = password\n self._auth_url = \"http://%s:5000/v2.0/tokens\" % self._host\n self._token = None\n self._vsm_url = None", "def __init__(self, token):\n self.token = token", "def __init__(self, token):\n self.token = token", "def __init__(self, token):\n self.token = token", "def __init__(self, temboo_session):\n super(GetTokenDetails, self).__init__(temboo_session, '/Library/Utilities/TokenStorage/GetTokenDetails')", "def __init__(self, address='192.168.1.1', port=443, username=\"admin\", password=\"Admin123\", version='latest'):\n # stash connectivity info for login call\n self.server_address = address\n self.server_port = port\n self.username = username\n self.password = password\n\n # access_token is used to save the current access token this could be either a normal login token or a custom\n # login token\n self.access_token = None\n # original_access_token is where we cache the normal 30 minute token obtained with admin credentials\n self.original_access_token = None\n # original_custom_token is where we store the custom token\n self.original_custom_token = None\n\n # WARNINGS\n requests.packages.urllib3.disable_warnings()\n # swagger doesn't like 'also_return_response' sent from FDM\n warnings.filterwarnings(\n 'ignore', 'config also_return_response is not a recognized config key')\n\n # The following version is the API version that will be used\n if version == 'latest':\n self.version = str(version)\n else:\n self.version = 'v'+str(version)", "def create_access_token(self):\n\t\t# Wraper for also caching invalid results\n #def getMetadataRofs(path):\n #\ttry:\n # \treturn self.client.metadata(path)\n # except Exception, e:\n # log.write('Exception at getMetadataRofs for path '+ path + '\\n')\n # pprint(e, log)\n # return False\n\n\t\ttry:\n\t\t\trequest_token = self.session.obtain_request_token()\n\t\t\turl = self.session.build_authorize_url(request_token)\n\t\t\tprint url\n\t\t\traw_input()\n\t\t\taccess_token = self.session.obtain_access_token(request_token)\n\t\t\tself.client = client.DropboxClient(self.session)\n\t\t\t\n\t\t\t# Build cache for metadata querying\n\n\t\t\t# Wraper for also caching invalid results\n\t\t\tdef getMetadataRofs(path):\n\t\t\t\ttry:\n\t\t\t\t\treturn self.client.metadata(path)\n\t\t\t\texcept Exception, e:\n\t\t\t\t\tlogger.error('Exception at getMetadataRofs for path '+ path + '\\n')\n\t\t logger.debug(sys.exc_info()[0])\n\t\t\t\t\treturn False\n\n\t\t\tself.cache_metadata = Cache(getMetadataRofs)\n\t\t\tself.cache_files = {}\n\n\t\texcept Exception, e:\n\t\t\tlogger.error('Exception %s at create_access_token' % (sys.exc_info()[0]))\n\t\t\tlogger.debug(pformat(sys.exc_info()))", "def __init__(self, access_token, base_url=SHEERID_ENDPOINT_SANDBOX,\n target_version=\"0.5\", verbose=False, insecure=False):\n self.access_token = access_token\n self.base_url = base_url\n self.verbose = verbose\n self.target_version = target_version\n self.insecure = insecure", "def __init__(self, token):\r\n self.apiroot = 'https://api-ssl.bitly.com/v3'\r\n\r\n self.access_token = token\r\n self.add_filter(self.add_authorization)", "def __init__(self, authtoken, portal_id):\n self.details = { \n 'authtoken': authtoken\n }\n self.portal_id = portal_id", "def __init__(self, authtoken, portal_id):\n self.details = { \n 'authtoken': authtoken\n }\n self.portal_id = portal_id", "def __init__(self, user_id=None, access_token=None):\n default_attr = dict(user_id=str(),\n access_token=str())\n self.user_id = user_id\n self.access_token = access_token\n self._set_default_attr(default_attr)", "def __init__(self, adapter, config, createToken=None):\r\n self._adapter = adapter\r\n self.oauthToken = None\r\n\r\n #make sure their request implementation matches our adapter\r\n if not hasattr(adapter, \"getRequest\"):\r\n raise TypeError(\"Your http request implementation is missing the getRequest method\")\r\n if not hasattr(adapter, \"postRequest\"):\r\n raise TypeError(\"Your http request implementation is missing the postRequest method\")\r\n if not hasattr(adapter, \"deleteRequest\"):\r\n raise TypeError(\"Your http request implementation is missing the deleteRequest method\")\r\n if not hasattr(adapter, \"putRequest\"):\r\n raise TypeError(\"Your http request implementation is missing the putRequest method\")\r\n\r\n self._config = config\r\n self._oauth = OAuth(config, self._adapter)\r\n\r\n if createToken is not None:\r\n self.createAccessToken = createToken\r\n else:\r\n self.createAccessToken = self.createAccessTokenReplacement()", "def __init__(self, client_id, client_secret=None, access_token=None,\n refresh_token=None, verify=True):\n self.is_authenticated = False\n self.access_token = access_token\n self.client_id = client_id\n self.client_secret = client_secret\n self.DEFAULT_LIMIT = 100\n self.ratelimit_clientlimit = None\n self.ratelimit_clientremaining = None\n self.ratelimit_userlimit = None\n self.ratelimit_userremaining = None\n self.ratelimit_userreset = None\n self.refresh_token = refresh_token\n self.verify = verify", "def __init__(self, url, username, password):\n if url[-1] == \"/\":\n self._url = url[:-1]\n else:\n self._url = url\n\n params = 'username=%s&password=%s' % (username, password)\n r = requests.post(urljoin(self._url, Client._auth_resource) + params)\n \n if r.status_code != requests.status_codes.codes.ok:\n raise ClientAuthenticationFailed()\n\n try:\n data = r.json()\n token_id = data['tokenId']\n except Exception, e:\n raise ClientException(r.status_code, \n \"Some error has ocurred getting the token value from %s\" % r.text)\n\n self._token_id = token_id\n self._username = username", "def __init__(self,\n token_storage_type, token_type,\n token_lifetime, refresh_token_lifetime,\n app, development = False):\n self.token_type = token_type\n self.token_storage = token_storage_type()\n self.refresh_token_storage = token_storage_type()\n self.token_lifetime = token_lifetime\n self.refresh_token_lifetime = refresh_token_lifetime\n self.init_app(app)\n self.development = development\n\n log.info(\"Auth object initialized. Token: %s, Storage: %s, token lifetime %d, refresh token lifetime %d, development: %r\",\n self.token_type.__name__,\n token_storage_type.__name__,\n self.token_lifetime,\n self.refresh_token_lifetime,\n self.development,)", "def __init__(self, host, access_key, secret_key):\n self._host = host\n self._access_key = access_key\n self._secret_key = secret_key", "def __init__(self, host, access_key, secret_key):\n self._host = host\n self._access_key = access_key\n self._secret_key = secret_key", "def __init__(self, host, access_key, secret_key):\n self._host = host\n self._access_key = access_key\n self._secret_key = secret_key", "def __init__(self, account, password, proxy=None):\n self.account = account\n self.password = password\n self._proxy = proxy\n self._token = None", "def __init__(\n self,\n uri,\n audience,\n username,\n password,\n **kwargs\n ):\n self.username = username\n self.password = password\n expires_in = kwargs.pop(\"expires_in\", AUTH_DEFAULT_EXPIRATION_SECONDS)\n expires_on = kwargs.pop(\"expires_on\", None)\n expires_in, expires_on = self._set_expiry(expires_in, expires_on)\n self.get_token = partial(_generate_sas_access_token, uri, username, password, expires_in)\n super(SASTokenAuth, self).__init__(\n uri,\n audience,\n kwargs.pop(\"token_type\", TOKEN_TYPE_SASTOKEN),\n self.get_token,\n expires_in=expires_in,\n expires_on=expires_on\n )", "def __init__(self, public_key, private_key, token, token_secret, base_url='http://api.telldus.com'):\n self.public_key = public_key\n self.private_key = private_key\n self.token = token\n self.token_secret = token_secret\n\n self.base_url = base_url\n\n self.oauth = self.generate_temp_session()", "def __init__(self, client_id, client_secret):\n self.client_id = client_id\n self.client_secret = client_secret\n self.token = None\n self.request_time = None\n self._initialized = False", "def __init__(self, oauth2_access_token, locale=None, rest_client=None):\n if rest_client is None: rest_client = RESTClient\n if isinstance(oauth2_access_token, basestring):\n if not _OAUTH2_ACCESS_TOKEN_PATTERN.match(oauth2_access_token):\n raise ValueError(\"invalid format for oauth2_access_token: %r\"\n % (oauth2_access_token,))\n self.session = DropboxOAuth2Session(oauth2_access_token, locale)\n elif isinstance(oauth2_access_token, DropboxSession):\n # Backwards compatibility with OAuth 1\n if locale is not None:\n raise ValueError(\"The 'locale' parameter to DropboxClient is only useful \"\n \"when also passing in an OAuth 2 access token\")\n self.session = oauth2_access_token\n else:\n raise ValueError(\"'oauth2_access_token' must either be a string or a DropboxSession\")\n self.rest_client = rest_client", "def test_create_o_auth_access_token(self):\n pass", "def __init__(self, authtoken: str):\n super().__init__(authtoken)\n self.person = Person(authtoken)\n self.device = Device(authtoken)\n self.flexschedulerule = FlexSchedulerule(authtoken)\n self.notification = Notification(authtoken)\n self.schedulerule = Schedulerule(authtoken)\n self.zone = Zone(authtoken)", "def __init__(self, url, token):\n super().__init__(url, token)", "def __init__(self, provider=None, config_file=None, profile_name=None,\n tenant_id=None, user_id=None, fingerprint=None,\n private_key=None, pass_phrase=None,\n duration_seconds=MAX_ENTRY_LIFE_TIME,\n refresh_ahead=DEFAULT_REFRESH_AHEAD):\n CheckValue.check_int_gt_zero(duration_seconds, 'duration_seconds')\n CheckValue.check_int_gt_zero(refresh_ahead, 'refresh_ahead')\n if duration_seconds > SignatureProvider.MAX_ENTRY_LIFE_TIME:\n raise IllegalArgumentException(\n 'Access token cannot be cached longer than ' +\n str(SignatureProvider.MAX_ENTRY_LIFE_TIME) + ' seconds.')\n\n #\n # This class depends on the oci package\n #\n if oci is None:\n raise ImportError('Package \\'oci\\' is required; please install')\n\n try:\n if provider is not None:\n if not isinstance(\n provider,\n (oci.signer.Signer,\n oci.auth.signers.InstancePrincipalsSecurityTokenSigner)):\n raise IllegalArgumentException(\n 'provider should be an instance of oci.signer.Signer' +\n 'or oci.auth.signers.' +\n 'InstancePrincipalsSecurityTokenSigner.')\n self._provider = provider\n elif (tenant_id is None or user_id is None or fingerprint is None or\n private_key is None):\n CheckValue.check_str(config_file, 'config_file', True)\n CheckValue.check_str(profile_name, 'profile_name', True)\n if config_file is None and profile_name is None:\n # Use default user profile and private key from default path\n # of configuration file ~/.oci/config.\n config = oci.config.from_file()\n elif config_file is None and profile_name is not None:\n # Use user profile with given profile name and private key\n # from default path of configuration file ~/.oci/config.\n config = oci.config.from_file(profile_name=profile_name)\n elif config_file is not None and profile_name is None:\n # Use user profile with default profile name and private key\n # from specified configuration file.\n config = oci.config.from_file(file_location=config_file)\n else: # config_file is not None and profile_name is not None\n # Use user profile with given profile name and private key\n # from specified configuration file.\n config = oci.config.from_file(\n file_location=config_file, profile_name=profile_name)\n self._provider = oci.signer.Signer(\n config['tenancy'], config['user'], config['fingerprint'],\n config['key_file'], config.get('pass_phrase'),\n config.get('key_content'))\n else:\n CheckValue.check_str(tenant_id, 'tenant_id')\n CheckValue.check_str(user_id, 'user_id')\n CheckValue.check_str(fingerprint, 'fingerprint')\n CheckValue.check_str(private_key, 'private_key')\n CheckValue.check_str(pass_phrase, 'pass_phrase', True)\n if path.isfile(private_key):\n key_file = private_key\n key_content = None\n else:\n key_file = None\n key_content = private_key\n self._provider = oci.signer.Signer(\n tenant_id, user_id, fingerprint, key_file, pass_phrase,\n key_content)\n except AttributeError:\n raise ImportError('Package \\'oci\\' is required; please install')\n self._signature_cache = Memoize(duration_seconds)\n self._refresh_interval_s = (duration_seconds - refresh_ahead if\n duration_seconds > refresh_ahead else 0)\n\n # Refresh timer.\n self._timer = None\n self._service_host = None\n self._logger = None\n self._logutils = LogUtils()\n self._sess = Session()\n self._request_utils = RequestUtils(self._sess, self._logutils)", "def get_access_token(self, *args, **kwargs):\n raise NotImplementedError('Subclasses must implement this method.')", "def __init__(self):\n #print (\"Object created\")\n self.apikey='acc_4fc1a435b3188b5'\n self.secret = 'f49c4be14a048d5de7e7f6c564b52022'\n self.fileToIdMap = {}", "def __init__(self, channel_access_token):\n self._headers = {\n 'Authorization': 'Bearer {0}'.format(channel_access_token),\n 'Content-Type': 'application/json',\n }", "def __init__(self, client_id=None, access_token=None):\r\n if not client_id and not access_token:\r\n raise TypeError('__init__() must be passed at least one '\r\n 'of client_id, access_token')\r\n\r\n self.apiroot = 'https://api.instagram.com/v1'\r\n\r\n self.client_id = client_id\r\n self.access_token = access_token\r\n self.add_filter(self.add_authorization)", "def __init__(self, consumer_key,\n consumer_secret,\n request_token_url,\n access_token_url,\n authorize_url,\n callback_url='oob',\n version='1.0',\n token=None):\n self.__consumer_key = consumer_key\n self.__signature_method = 'HMAC-SHA1'\n self.__version = version\n self.__consumer_secret = consumer_secret\n self.__signing_key = None\n self.__signature_base_string = None\n self.__parameter_string = None\n self.__auth_headers = None\n self.__token = token\n self.__signature = None\n self.__access_token_url = access_token_url\n self.__request_token_url = request_token_url\n self.__authorize_url = authorize_url\n self.__callback_url = callback_url\n self.__response = None\n self.__request = None", "def access_token(self):\n return self.access_token_str", "def auth_token(self):", "def __init__(self, consumer_key, consumer_secret, access_token, access_token_secret):\n self.api = self.getAPI(consumer_key, consumer_secret, access_token, access_token_secret)", "def __init__(self, authenticator, refresh_token=None):\n super(Authorizer, self).__init__(authenticator)\n self.refresh_token = refresh_token", "def __init__(self, access_token: str):\n self._access_token = access_token\n self._history = collections.deque(maxlen=20)\n self._schema: graphql.GraphQLSchema = MISSING\n self._localstorage = None\n self._contextstorage = None\n self._transport = None\n self._query_type = \"query\"\n self._required_query_output: Optional[str] = None\n self._service_endpoint: Optional[str] = None\n self._caller: Optional[str] = None", "def _authenticate(self):\n url = self.endpoint + \"/tokens\"\n h = httplib2.Http()\n response, rawcontent = h.request(\n url, \n method=\"POST\",\n headers={ \"Content-Type\":\"application/json\" },\n body=json.dumps(self.credentials()))\n content = json.loads(rawcontent)\n self.token = content['access']['token']['id']\n #TODO: this needs to convert the ISO8601 string to a timestamp\n self.expiration = content['access']['token']['expires']\n self.catalog = content['access']['serviceCatalog']", "def __init__(\n self,\n uri,\n audience,\n get_token,\n **kwargs\n ):\n super(JWTTokenAuth, self).__init__(uri, audience, kwargs.pop(\"token_type\", TOKEN_TYPE_JWT), get_token)\n self.get_token = get_token", "def _set_access_token(self):\n integration_context = demisto.getIntegrationContext()\n access_token = integration_context.get(ACCESS_TOKEN_CONST)\n valid_until = integration_context.get(EXPIRES_IN)\n if access_token and valid_until:\n if int(time.time()) < valid_until:\n self.access_token = access_token\n self.api_url = integration_context.get(API_URL_CONST, DEFAULT_API_URL)\n self.instance_id = integration_context.get(INSTANCE_ID_CONST)\n return\n demisto.debug(f'access token time: {valid_until} expired/none. Will call oproxy')\n access_token, api_url, instance_id, refresh_token, expires_in = self._oproxy_authorize()\n updated_integration_context = {\n ACCESS_TOKEN_CONST: access_token,\n EXPIRES_IN: int(time.time()) + expires_in - SECONDS_30,\n API_URL_CONST: api_url,\n INSTANCE_ID_CONST: instance_id\n }\n if refresh_token:\n updated_integration_context.update({'refresh_token': refresh_token})\n demisto.setIntegrationContext(updated_integration_context)\n self.access_token = access_token\n self.api_url = api_url\n self.instance_id = instance_id", "def __init__(self, auth_key, auth_secret):\n\n self._auth_key = auth_key\n self._auth_secret = auth_secret", "def __init__(self, session: ClientSession, token: str, *, api_base: str = API_BASE):\r\n self._session = session\r\n self._token = token\r\n self._api_base = api_base", "def __init__(self, token):\n self.token = token\n self.session = requests.Session()\n self.session.headers.update({\"Authorization\": \"Bearer {token}\".format(token=self.token)})", "def __init__(self, key, secret, token=None, token_secret=None, subdomain=None):\n\n self.key = key\n self.secret = secret\n self.token = token\n self.token_secret = token_secret\n self.subdomain = subdomain or self.DOMAIN", "def __init__(self, action, resource, instance_id, username, token):\n self.action = action\n self.resource = resource\n self.instance_id = instance_id\n self.username = username\n self.token = token", "def __init__(self, login, password, organization):\n self.login = login\n self.password = password\n self.organization = organization", "def __init__(self, context):\n self._context = context\n self._api_token = context.get_github_token()\n self._rate_limit_status = None", "def __init__(self):\n\n self._authorize()", "def __enter__(self):\n logging.debug(\"In the FMC __enter__() class method.\")\n self.mytoken = Token(host=self.host, username=self.username, password=self.password, verify_cert=self.VERIFY_CERT)\n self.uuid = self.mytoken.uuid\n self.base_url = \"https://{}/{}/domain/{}\".format(self.host, self.API_CONFIG_VERSION, self.uuid)\n return self", "def __init__(self, username=None, password=None, apitoken=None):\n self.__credentials = None\n self.__headers = {}\n if apitoken:\n self.authenticate_by_token(apitoken)\n if username and password:\n self.authenticate(username, password)", "def __init__(self, clientId, clientSecret, scopeList, applicationCallback):\n\n self.authServer = \"https://accounts.google.com/o/oauth2/device/code\"\n self.pollServer = \"https://www.googleapis.com/oauth2/v4/token\"\n self.refreshServer = self.pollServer\n self.grantType = \"http://oauth.net/grant_type/device/1.0\"\n self.refreshGrantType = \"refresh_token\"\n self.clientId = clientId\n self.clientSecret = clientSecret\n self.scopeList = scopeList\n self.applicationCallback = applicationCallback\n self.deviceCode = \"\"\n self.pollInterval = 10", "def __init__(self, callback_url):\n # Credientials\n self.URI_SCHEME = \"https\"\n self.API_ENDPOINT = \"rightsignature.com\"\n self.REQUEST_TOKEN_URL = \"/oauth/request_token\"\n self.ACCESS_TOKEN_URL = \"/oauth/access_token\"\n self.REDIRECT_URL = \"/oauth/authorize\"\n self.version = \"1.0\"\n self.signature_method = \"HMAC-SHA1\" # as I said\n self.BASE_URL = \"%s://%s\" % (self.URI_SCHEME, self.API_ENDPOINT)\n\n self.API_KEY = \"\"\n self.API_SECRET = \"\"\n self.CALLBACK_URL = callback_url\n self.request_token = None # that comes later\n self.access_token = None # that comes later and later\n\n self.request_token_secret = None\n self.access_token_secret = None\n\n self.verifier = None\n self.error = None\n\n self.request_oauth_nonce = None\n self.request_oauth_timestamp = None\n self.access_oauth_nonce = None\n self.access_oauth_timestamp = None\n self.request_oauth_error = None\n self.access_oauth_error = None", "def __init__(self, **kwargs):\n\t\tself._name = None\n\t\tself._username = None\n\t\tself._password = None\n\t\tself._context = self", "def __init__(self, **kwargs):\n\t\tself._name = None\n\t\tself._username = None\n\t\tself._password = None\n\t\tself._context = self", "def __init__(self, **kwargs):\n\t\tself._name = None\n\t\tself._username = None\n\t\tself._password = None\n\t\tself._context = self" ]
[ "0.7636313", "0.7517129", "0.7437664", "0.72003037", "0.7027544", "0.6943499", "0.6889966", "0.68749976", "0.6788117", "0.6651019", "0.6641739", "0.6631", "0.66123813", "0.6594493", "0.65795815", "0.6572527", "0.6505966", "0.65004337", "0.64906555", "0.64870405", "0.64369303", "0.6432948", "0.6413908", "0.64088815", "0.64004654", "0.634751", "0.6338384", "0.63266766", "0.63040555", "0.63017535", "0.629539", "0.62850654", "0.6283553", "0.6280793", "0.6277945", "0.6276907", "0.62717706", "0.62717706", "0.62676454", "0.6234435", "0.62334865", "0.62327725", "0.6232642", "0.6220798", "0.6187977", "0.6175181", "0.6175181", "0.6175181", "0.6172689", "0.6171752", "0.6169935", "0.6152863", "0.61507773", "0.61482584", "0.61482584", "0.61367494", "0.612561", "0.6120999", "0.6113882", "0.61030424", "0.6083758", "0.6083758", "0.6083758", "0.60833216", "0.6076565", "0.60668993", "0.60596126", "0.60448515", "0.60410464", "0.60340077", "0.6028402", "0.601837", "0.60060114", "0.6004717", "0.6003386", "0.59959424", "0.5971996", "0.5968252", "0.59663606", "0.59639883", "0.59618455", "0.59307104", "0.58982456", "0.589758", "0.58396184", "0.5829063", "0.58275473", "0.58255297", "0.5819738", "0.5807569", "0.58060026", "0.5802389", "0.5799827", "0.57855767", "0.5776457", "0.57705736", "0.5768637", "0.57622814", "0.57622814", "0.57622814" ]
0.7398528
3
Creates an instance of this model from a dictionary
def from_dictionary(cls, dictionary): if dictionary is None: return None # Extract variables from the dictionary access_token = dictionary.get('accessToken') token_type = dictionary.get('tokenType') error = cohesity_app_sdk.models.error.Error.from_dictionary(dictionary.get('error')) if dictionary.get('error') else None # Return an object of this model return cls(access_token, token_type, error)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def from_dictionary(cls,\n dictionary):\n if dictionary is None:\n return None\n\n # Extract variables from the dictionary\n id = dictionary.get('id')\n name = dictionary.get('name')\n mtype = dictionary.get('type')\n usage_bytes = dictionary.get('usageBytes')\n\n # Return an object of this model\n return cls(\n id,\n name,\n mtype,\n usage_bytes\n)", "def create(cls, dictionary):\n return cls(**dictionary)", "def create(cls, dictionary):\n return cls(**dictionary)", "def from_dictionary(cls,\r\n dictionary):\r\n if dictionary is None:\r\n return None\r\n\r\n # Extract variables from the dictionary\r\n name = dictionary.get('name')\r\n scheduling = meraki.models.scheduling_model.SchedulingModel.from_dictionary(dictionary.get('scheduling')) if dictionary.get('scheduling') else None\r\n bandwidth = meraki.models.bandwidth_model.BandwidthModel.from_dictionary(dictionary.get('bandwidth')) if dictionary.get('bandwidth') else None\r\n\r\n # Clean out expected properties from dictionary\r\n for key in cls._names.values():\r\n if key in dictionary:\r\n del dictionary[key]\r\n\r\n # Return an object of this model\r\n return cls(name,\r\n scheduling,\r\n bandwidth,\r\n dictionary)", "def from_dictionary(cls,\r\n dictionary):\r\n if dictionary is None:\r\n return None\r\n\r\n # Extract variables from the dictionary\r\n external_ids = dictionary.get('externalIds')\r\n harsh_accel_setting = dictionary.get('harsh_accel_setting')\r\n name = dictionary.get('name')\r\n\r\n # Return an object of this model\r\n return cls(external_ids,\r\n harsh_accel_setting,\r\n name)", "def from_dictionary(cls,\n dictionary):\n if dictionary is None:\n return None\n\n # Extract variables from the dictionary\n formatted_price = dictionary.get('FormattedPrice')\n amount = dictionary.get('Amount')\n currency_code = dictionary.get('CurrencyCode')\n\n # Return an object of this model\n return cls(formatted_price,\n amount,\n currency_code)", "def from_dictionary(cls,\r\n dictionary):\r\n if dictionary is None:\r\n return None\r\n\r\n # Extract variables from the dictionary\r\n id = dictionary.get('Id')\r\n name = dictionary.get('Name')\r\n last_edited = APIHelper.RFC3339DateTime.from_value(dictionary.get(\"LastEdited\")).datetime if dictionary.get(\"LastEdited\") else None\r\n\r\n # Clean out expected properties from dictionary\r\n for key in cls._names.values():\r\n if key in dictionary:\r\n del dictionary[key]\r\n\r\n # Return an object of this model\r\n return cls(id,\r\n name,\r\n last_edited,\r\n dictionary)", "def from_dictionary(cls,\n dictionary):\n if dictionary is None:\n return None\n\n # Extract variables from the dictionary\n mtype = dictionary.get(\"type\")\n filename = dictionary.get(\"filename\")\n payload = dictionary.get(\"payload\")\n\n # Return an object of this model\n return cls(mtype,\n filename,\n payload)", "def from_dictionary(cls,\n dictionary):\n if dictionary is None:\n return None\n\n # Extract variables from the dictionary\n protected_count = dictionary.get('protectedCount')\n protected_size = dictionary.get('protectedSize')\n unprotected_count = dictionary.get('unprotectedCount')\n unprotected_size = dictionary.get('unprotectedSize')\n\n # Return an object of this model\n return cls(\n protected_count,\n protected_size,\n unprotected_count,\n unprotected_size\n)", "def from_dict(cls, dictionary: Dict[str, Any]):\n return cls(**dictionary)", "def from_dictionary(cls,\r\n dictionary):\r\n if dictionary is None:\r\n return None\r\n\r\n # Extract variables from the dictionary\r\n primary_language = dictionary.get('PrimaryLanguage')\r\n secondary_language = dictionary.get('SecondaryLanguage')\r\n xml_signature = dictionary.get('XmlSignature')\r\n\r\n # Clean out expected properties from dictionary\r\n for key in cls._names.values():\r\n if key in dictionary:\r\n del dictionary[key]\r\n\r\n # Return an object of this model\r\n return cls(primary_language,\r\n secondary_language,\r\n xml_signature,\r\n dictionary)", "def from_dictionary(cls,\r\n dictionary):\r\n if dictionary is None:\r\n return None\r\n\r\n # Extract variables from the dictionary\r\n links = dictionary.get('links')\r\n email_config = dictionary.get('emailConfig')\r\n\r\n # Clean out expected properties from dictionary\r\n for key in cls._names.values():\r\n if key in dictionary:\r\n del dictionary[key]\r\n\r\n # Return an object of this model\r\n return cls(links,\r\n email_config,\r\n dictionary)", "def from_dictionary(cls,\r\n dictionary):\r\n if dictionary is None:\r\n return None\r\n\r\n # Extract variables from the dictionary\r\n id = dictionary.get('id')\r\n number = dictionary.get('number')\r\n name = dictionary.get('name')\r\n balance = dictionary.get('balance')\r\n mtype = dictionary.get('type')\r\n status = dictionary.get('status')\r\n customer_id = dictionary.get('customerId')\r\n institution_id = dictionary.get('institutionId')\r\n balance_date = dictionary.get('balanceDate')\r\n created_date = dictionary.get('createdDate')\r\n currency = dictionary.get('currency')\r\n institution_login_id = dictionary.get('institutionLoginId')\r\n display_position = dictionary.get('displayPosition')\r\n\r\n # Clean out expected properties from dictionary\r\n for key in cls._names.values():\r\n if key in dictionary:\r\n del dictionary[key]\r\n\r\n # Return an object of this model\r\n return cls(id,\r\n number,\r\n name,\r\n balance,\r\n mtype,\r\n status,\r\n customer_id,\r\n institution_id,\r\n balance_date,\r\n created_date,\r\n currency,\r\n institution_login_id,\r\n display_position,\r\n dictionary)", "def from_dictionary(cls,\r\n dictionary):\r\n if dictionary is None:\r\n return None\r\n\r\n # Extract variables from the dictionary\r\n alias = dictionary.get(\"alias\")\r\n cnam_lookups_enabled = dictionary.get(\"cnam_lookups_enabled\")\r\n number_type = dictionary.get(\"number_type\")\r\n rate_center = dictionary.get(\"rate_center\")\r\n state = dictionary.get(\"state\")\r\n value = dictionary.get(\"value\")\r\n\r\n # Return an object of this model\r\n return cls(alias,\r\n cnam_lookups_enabled,\r\n number_type,\r\n rate_center,\r\n state,\r\n value)", "def from_dictionary(cls,\r\n dictionary):\r\n if dictionary is None:\r\n return None\r\n\r\n # Extract variables from the dictionary\r\n username = dictionary.get('username')\r\n first_name = dictionary.get('firstName')\r\n last_name = dictionary.get('lastName')\r\n application_id = dictionary.get('applicationId')\r\n\r\n # Clean out expected properties from dictionary\r\n for key in cls._names.values():\r\n if key in dictionary:\r\n del dictionary[key]\r\n\r\n # Return an object of this model\r\n return cls(username,\r\n first_name,\r\n last_name,\r\n application_id,\r\n dictionary)", "def from_dict(cls, data):\n return cls(**data)", "def _from_dict(cls, _dict):\n return cls.from_dict(_dict)", "def _from_dict(cls, _dict):\n return cls.from_dict(_dict)", "def _from_dict(cls, _dict):\n return cls.from_dict(_dict)", "def _from_dict(cls, _dict):\n return cls.from_dict(_dict)", "def _from_dict(cls, _dict):\n return cls.from_dict(_dict)", "def _from_dict(cls, _dict):\n return cls.from_dict(_dict)", "def _from_dict(cls, _dict):\n return cls.from_dict(_dict)", "def _from_dict(cls, _dict):\n return cls.from_dict(_dict)", "def _from_dict(cls, _dict):\n return cls.from_dict(_dict)", "def _from_dict(cls, _dict):\n return cls.from_dict(_dict)", "def _from_dict(cls, _dict):\n return cls.from_dict(_dict)", "def _from_dict(cls, _dict):\n return cls.from_dict(_dict)", "def _from_dict(cls, _dict):\n return cls.from_dict(_dict)", "def _from_dict(cls, _dict):\n return cls.from_dict(_dict)", "def _from_dict(cls, _dict):\n return cls.from_dict(_dict)", "def _from_dict(cls, _dict):\n return cls.from_dict(_dict)", "def _from_dict(cls, _dict):\n return cls.from_dict(_dict)", "def _from_dict(cls, _dict):\n return cls.from_dict(_dict)", "def _from_dict(cls, _dict):\n return cls.from_dict(_dict)", "def _from_dict(cls, _dict):\n return cls.from_dict(_dict)", "def _from_dict(cls, _dict):\n return cls.from_dict(_dict)", "def _from_dict(cls, _dict):\n return cls.from_dict(_dict)", "def _from_dict(cls, _dict):\n return cls.from_dict(_dict)", "def _from_dict(cls, _dict):\n return cls.from_dict(_dict)", "def _from_dict(cls, _dict):\n return cls.from_dict(_dict)", "def _from_dict(cls, _dict):\n return cls.from_dict(_dict)", "def _from_dict(cls, _dict):\n return cls.from_dict(_dict)", "def _from_dict(cls, _dict):\n return cls.from_dict(_dict)", "def _from_dict(cls, _dict):\n return cls.from_dict(_dict)", "def _from_dict(cls, _dict):\n return cls.from_dict(_dict)", "def _from_dict(cls, _dict):\n return cls.from_dict(_dict)", "def _from_dict(cls, _dict):\n return cls.from_dict(_dict)", "def _from_dict(cls, _dict):\n return cls.from_dict(_dict)", "def _from_dict(cls, _dict):\n return cls.from_dict(_dict)", "def _from_dict(cls, _dict):\n return cls.from_dict(_dict)", "def _from_dict(cls, _dict):\n return cls.from_dict(_dict)", "def _from_dict(cls, _dict):\n return cls.from_dict(_dict)", "def _from_dict(cls, _dict):\n return cls.from_dict(_dict)", "def _from_dict(cls, _dict):\n return cls.from_dict(_dict)", "def _from_dict(cls, _dict):\n return cls.from_dict(_dict)", "def _from_dict(cls, _dict):\n return cls.from_dict(_dict)", "def _from_dict(cls, _dict):\n return cls.from_dict(_dict)", "def _from_dict(cls, _dict):\n return cls.from_dict(_dict)", "def _from_dict(cls, _dict):\n return cls.from_dict(_dict)", "def _from_dict(cls, _dict):\n return cls.from_dict(_dict)", "def from_dictionary(cls,\n dictionary):\n if dictionary is None:\n return None\n\n # Extract variables from the dictionary\n height = awsecommerceservice.models.decimal_with_units.DecimalWithUnits.from_dictionary(dictionary.get('Height')) if dictionary.get('Height') else None\n length = awsecommerceservice.models.decimal_with_units.DecimalWithUnits.from_dictionary(dictionary.get('Length')) if dictionary.get('Length') else None\n weight = awsecommerceservice.models.decimal_with_units.DecimalWithUnits.from_dictionary(dictionary.get('Weight')) if dictionary.get('Weight') else None\n width = awsecommerceservice.models.decimal_with_units.DecimalWithUnits.from_dictionary(dictionary.get('Width')) if dictionary.get('Width') else None\n\n # Return an object of this model\n return cls(height,\n length,\n weight,\n width)", "def from_dictionary(cls,\r\n dictionary):\r\n if dictionary is None:\r\n return None\r\n\r\n # Extract variables from the dictionary\r\n setup = idfy_rest_client.models.setup.Setup.from_dictionary(dictionary.get('setup')) if dictionary.get('setup') else None\r\n merge_fields = dictionary.get('mergeFields')\r\n\r\n # Clean out expected properties from dictionary\r\n for key in cls._names.values():\r\n if key in dictionary:\r\n del dictionary[key]\r\n\r\n # Return an object of this model\r\n return cls(setup,\r\n merge_fields,\r\n dictionary)", "def from_dictionary(cls,\r\n dictionary):\r\n if dictionary is None:\r\n return None\r\n\r\n # Extract variables from the dictionary\r\n to = dictionary.get('to')\r\n application_id = dictionary.get('applicationId')\r\n expiration_time_in_minutes = dictionary.get('expirationTimeInMinutes')\r\n code = dictionary.get('code')\r\n scope = dictionary.get('scope')\r\n\r\n # Return an object of this model\r\n return cls(to,\r\n application_id,\r\n expiration_time_in_minutes,\r\n code,\r\n scope)", "def from_dictionary(cls,\n dictionary):\n if dictionary is None:\n return None\n\n # Extract variables from the dictionary\n fqdn = dictionary.get('fqdn')\n guid = dictionary.get('guid')\n id = dictionary.get('id')\n name = dictionary.get('name')\n owner_id = dictionary.get('ownerId')\n status = dictionary.get('status')\n total_size_bytes = dictionary.get('totalSizeBytes')\n\n # Return an object of this model\n return cls(\n fqdn,\n guid,\n id,\n name,\n owner_id,\n status,\n total_size_bytes\n)", "def from_dictionary(cls,\n dictionary):\n if dictionary is None:\n return None\n\n # Extract variables from the dictionary\n force_delete = dictionary.get('forceDelete')\n id = dictionary.get('id')\n include_marked_for_removal = dictionary.get('includeMarkedForRemoval')\n retry = dictionary.get('retry')\n\n # Return an object of this model\n return cls(\n force_delete,\n id,\n include_marked_for_removal,\n retry\n)", "def from_dictionary(cls,\r\n dictionary):\r\n if dictionary is None:\r\n return None\r\n\r\n # Extract variables from the dictionary\r\n account_holder_name = dictionary.get('accountHolderName')\r\n routing_number = dictionary.get('routingNumber')\r\n\r\n # Return an object of this model\r\n return cls(account_holder_name,\r\n routing_number)", "def from_dict(cls, d):\n return cls(**d)", "def from_dictionary(cls,\r\n dictionary):\r\n if dictionary is None:\r\n return None\r\n\r\n # Extract variables from the dictionary\r\n ephemeral_public_key = dictionary.get('ephemeral_public_key')\r\n public_key_hash = dictionary.get('public_key_hash')\r\n transaction_id = dictionary.get('transaction_id')\r\n\r\n # Return an object of this model\r\n return cls(ephemeral_public_key,\r\n public_key_hash,\r\n transaction_id)", "def from_dictionary(cls,\r\n dictionary):\r\n if dictionary is None:\r\n return None\r\n\r\n # Extract variables from the dictionary\r\n name = dictionary.get('name')\r\n given_name = dictionary.get('givenName')\r\n middle_name = dictionary.get('middleName')\r\n family_name = dictionary.get('familyName')\r\n address = None\r\n if dictionary.get('address') != None:\r\n address = list()\r\n for structure in dictionary.get('address'):\r\n address.append(finicityapi.models.payroll_address.PayrollAddress.from_dictionary(structure))\r\n\r\n # Clean out expected properties from dictionary\r\n for key in cls._names.values():\r\n if key in dictionary:\r\n del dictionary[key]\r\n\r\n # Return an object of this model\r\n return cls(name,\r\n given_name,\r\n middle_name,\r\n family_name,\r\n address,\r\n dictionary)", "def from_dict(cls, dct):\n return cls(**dct)", "def from_dictionary(cls,\n dictionary):\n if dictionary is None:\n return None\n\n # Extract variables from the dictionary\n mor_item = dictionary.get('morItem')\n mor_type = dictionary.get('morType')\n uuid = dictionary.get('uuid')\n\n # Return an object of this model\n return cls(\n mor_item,\n mor_type,\n uuid\n)", "def from_dict(cls, data: Dict[str, any]):\n return cls(**data)", "def from_dict(cls, session: \"FigoSession\", data_dict: dict) -> \"ModelBase\":\n return cls(session, **data_dict)", "def from_dictionary(cls,\n dictionary):\n if dictionary is None:\n return None\n\n # Extract variables from the dictionary\n wind_speed = dictionary.get('windSpeed')\n power = dictionary.get('power')\n\n # Return an object of this model\n return cls(wind_speed,\n power)", "def from_dictionary(cls,\r\n dictionary):\r\n if dictionary is None:\r\n return None\r\n\r\n # Extract variables from the dictionary\r\n consumer_id = dictionary.get('consumerId')\r\n consumer_ssn = dictionary.get('consumerSsn')\r\n event_name = dictionary.get('eventName')\r\n id = dictionary.get('id')\r\n status = dictionary.get('status')\r\n mtype = dictionary.get('type')\r\n\r\n # Clean out expected properties from dictionary\r\n for key in cls._names.values():\r\n if key in dictionary:\r\n del dictionary[key]\r\n\r\n # Return an object of this model\r\n return cls(consumer_id,\r\n consumer_ssn,\r\n event_name,\r\n id,\r\n status,\r\n mtype,\r\n dictionary)", "def from_dictionary(cls,\r\n dictionary):\r\n if dictionary is None:\r\n return None\r\n\r\n # Extract variables from the dictionary\r\n account_id = dictionary.get('AccountId')\r\n name = dictionary.get('Name')\r\n org_no = dictionary.get('OrgNo')\r\n uni_customer_no = dictionary.get('UniCustomerNo')\r\n created = APIHelper.RFC3339DateTime.from_value(dictionary.get(\"Created\")).datetime if dictionary.get(\"Created\") else None\r\n last_modified = APIHelper.RFC3339DateTime.from_value(dictionary.get(\"LastModified\")).datetime if dictionary.get(\"LastModified\") else None\r\n dealer_id = dictionary.get('DealerId')\r\n dealer_name = dictionary.get('DealerName')\r\n dealer_reference = dictionary.get('DealerReference')\r\n enabled = dictionary.get('Enabled')\r\n\r\n # Clean out expected properties from dictionary\r\n for key in cls._names.values():\r\n if key in dictionary:\r\n del dictionary[key]\r\n\r\n # Return an object of this model\r\n return cls(account_id,\r\n name,\r\n org_no,\r\n uni_customer_no,\r\n created,\r\n last_modified,\r\n dealer_id,\r\n dealer_name,\r\n dealer_reference,\r\n enabled,\r\n dictionary)", "def from_dictionary(cls,\r\n dictionary):\r\n if dictionary is None:\r\n return None\r\n\r\n # Extract variables from the dictionary\r\n id = dictionary.get('id')\r\n title = dictionary.get('title')\r\n device_group = dictionary.get('deviceGroup')\r\n create_on = dictionary.get('createOn')\r\n update_on = dictionary.get('updateOn')\r\n created_by = dictionary.get('createdBy')\r\n create_on_persian_date = dictionary.get('createOnPersianDate')\r\n update_on_persian_date = dictionary.get('updateOnPersianDate')\r\n device_type_brand_model_title = dictionary.get('deviceTypeBrandModelTitle')\r\n device_brand_ids = dictionary.get('deviceBrandIds')\r\n device_type_id = dictionary.get('deviceTypeId')\r\n device_brand_id = dictionary.get('deviceBrandId')\r\n updated_by = dictionary.get('updatedBy')\r\n device_type_title = dictionary.get('deviceTypeTitle')\r\n device_brand_title = dictionary.get('deviceBrandTitle')\r\n device_brands_title = dictionary.get('deviceBrandsTitle')\r\n\r\n # Return an object of this model\r\n return cls(id,\r\n title,\r\n device_group,\r\n create_on,\r\n update_on,\r\n created_by,\r\n create_on_persian_date,\r\n update_on_persian_date,\r\n device_type_brand_model_title,\r\n device_brand_ids,\r\n device_type_id,\r\n device_brand_id,\r\n updated_by,\r\n device_type_title,\r\n device_brand_title,\r\n device_brands_title)", "def from_dictionary(cls,\r\n dictionary):\r\n if dictionary is None:\r\n return None\r\n\r\n # Extract variables from the dictionary\r\n password = dictionary.get('password')\r\n name = dictionary.get('name')\r\n tag_ids = dictionary.get('tagIds')\r\n eld_adverse_weather_exemption_enabled = dictionary.get('eldAdverseWeatherExemptionEnabled')\r\n eld_big_day_exemption_enabled = dictionary.get('eldBigDayExemptionEnabled')\r\n eld_day_start_hour = dictionary.get('eldDayStartHour')\r\n eld_exempt = dictionary.get('eldExempt')\r\n eld_exempt_reason = dictionary.get('eldExemptReason')\r\n eld_pc_enabled = dictionary.get(\"eldPcEnabled\") if dictionary.get(\"eldPcEnabled\") else False\r\n eld_ym_enabled = dictionary.get(\"eldYmEnabled\") if dictionary.get(\"eldYmEnabled\") else False\r\n external_ids = dictionary.get('externalIds')\r\n group_id = dictionary.get('groupId')\r\n license_number = dictionary.get('licenseNumber')\r\n license_state = dictionary.get('licenseState')\r\n notes = dictionary.get('notes')\r\n phone = dictionary.get('phone')\r\n username = dictionary.get('username')\r\n vehicle_id = dictionary.get('vehicleId')\r\n\r\n # Return an object of this model\r\n return cls(password,\r\n name,\r\n tag_ids,\r\n eld_adverse_weather_exemption_enabled,\r\n eld_big_day_exemption_enabled,\r\n eld_day_start_hour,\r\n eld_exempt,\r\n eld_exempt_reason,\r\n eld_pc_enabled,\r\n eld_ym_enabled,\r\n external_ids,\r\n group_id,\r\n license_number,\r\n license_state,\r\n notes,\r\n phone,\r\n username,\r\n vehicle_id)", "def from_dictionary(cls,\n dictionary):\n if dictionary is None:\n return None\n\n # Extract variables from the dictionary\n env_type = dictionary.get('envType')\n protected_count = dictionary.get('protectedCount')\n protected_size_bytes = dictionary.get('protectedSizeBytes')\n unprotected_count = dictionary.get('unprotectedCount')\n unprotected_size_bytes = dictionary.get('unprotectedSizeBytes')\n\n # Return an object of this model\n return cls(\n env_type,\n protected_count,\n protected_size_bytes,\n unprotected_count,\n unprotected_size_bytes\n)", "def from_dictionary(cls,\n dictionary):\n if dictionary is None:\n return None\n\n # Extract variables from the dictionary\n is_mail_enabled = dictionary.get('isMailEnabled')\n is_security_enabled = dictionary.get('isSecurityEnabled')\n member_count = dictionary.get('memberCount')\n visibility = dictionary.get('visibility')\n\n # Return an object of this model\n return cls(\n is_mail_enabled,\n is_security_enabled,\n member_count,\n visibility\n)", "def from_dict(cls, dictionary):\n normalised = cls.normalise_dict(dictionary)\n return cls.from_normalised_dict(normalised)", "def from_dictionary(cls,\r\n dictionary):\r\n if dictionary is None:\r\n return None\r\n\r\n # Extract variables from the dictionary\r\n name = dictionary.get('name')\r\n time_zone = dictionary.get('timeZone')\r\n tags = dictionary.get('tags')\r\n disable_my_meraki_com = dictionary.get('disableMyMerakiCom')\r\n disable_remote_status_page = dictionary.get('disableRemoteStatusPage')\r\n enrollment_string = dictionary.get('enrollmentString')\r\n\r\n # Return an object of this model\r\n return cls(name,\r\n time_zone,\r\n tags,\r\n disable_my_meraki_com,\r\n disable_remote_status_page,\r\n enrollment_string)", "def from_dictionary(cls,\r\n dictionary):\r\n if dictionary is None:\r\n return None\r\n\r\n # Extract variables from the dictionary\r\n name = dictionary.get('name')\r\n lan_ip = dictionary.get('lanIp')\r\n uplink = dictionary.get('uplink')\r\n public_port = dictionary.get('publicPort')\r\n local_port = dictionary.get('localPort')\r\n allowed_ips = dictionary.get('allowedIps')\r\n protocol = dictionary.get('protocol')\r\n\r\n # Return an object of this model\r\n return cls(name,\r\n lan_ip,\r\n uplink,\r\n public_port,\r\n local_port,\r\n allowed_ips,\r\n protocol)", "def from_dictionary(cls,\r\n dictionary):\r\n if dictionary is None:\r\n return None\r\n\r\n # Extract variables from the dictionary\r\n found = dictionary.get('found')\r\n displaying = dictionary.get('displaying')\r\n more_available = dictionary.get('moreAvailable')\r\n created_date = dictionary.get('createdDate')\r\n institutions = None\r\n if dictionary.get('institutions') != None:\r\n institutions = list()\r\n for structure in dictionary.get('institutions'):\r\n institutions.append(finicityapi.models.institution.Institution.from_dictionary(structure))\r\n\r\n # Clean out expected properties from dictionary\r\n for key in cls._names.values():\r\n if key in dictionary:\r\n del dictionary[key]\r\n\r\n # Return an object of this model\r\n return cls(found,\r\n displaying,\r\n more_available,\r\n created_date,\r\n institutions,\r\n dictionary)", "def from_dictionary(cls,\n dictionary):\n if dictionary is None:\n return None\n\n # Extract variables from the dictionary\n backup_file_path = dictionary.get('backupFilePath')\n excluded_file_paths = dictionary.get(\"excludedFilePaths\")\n skip_nested_volumes = dictionary.get('skipNestedVolumes')\n\n # Return an object of this model\n return cls(\n backup_file_path,\n excluded_file_paths,\n skip_nested_volumes\n)", "def from_dict(cls, data):\r\n instance = cls()\r\n for key, value in data.items():\r\n instance.__dict__[key] = value\r\n return instance", "def from_dictionary(cls,\n dictionary):\n if dictionary is None:\n return None\n\n # Extract variables from the dictionary\n lowest_new_price = awsecommerceservice.models.price.Price.from_dictionary(dictionary.get('LowestNewPrice')) if dictionary.get('LowestNewPrice') else None\n lowest_used_price = awsecommerceservice.models.price.Price.from_dictionary(dictionary.get('LowestUsedPrice')) if dictionary.get('LowestUsedPrice') else None\n lowest_collectible_price = awsecommerceservice.models.price.Price.from_dictionary(dictionary.get('LowestCollectiblePrice')) if dictionary.get('LowestCollectiblePrice') else None\n lowest_refurbished_price = awsecommerceservice.models.price.Price.from_dictionary(dictionary.get('LowestRefurbishedPrice')) if dictionary.get('LowestRefurbishedPrice') else None\n total_new = dictionary.get('TotalNew')\n total_used = dictionary.get('TotalUsed')\n total_collectible = dictionary.get('TotalCollectible')\n total_refurbished = dictionary.get('TotalRefurbished')\n\n # Return an object of this model\n return cls(lowest_new_price,\n lowest_used_price,\n lowest_collectible_price,\n lowest_refurbished_price,\n total_new,\n total_used,\n total_collectible,\n total_refurbished)", "def from_dict(cls, dictionary):\n instance = cls()\n for key, value in dictionary.items():\n instance.__dict__[key] = value\n\n return instance", "def from_dictionary(cls,\r\n dictionary):\r\n if dictionary is None:\r\n return None\r\n\r\n # Extract variables from the dictionary\r\n door_closed = dictionary.get('doorClosed')\r\n id = dictionary.get('id')\r\n name = dictionary.get('name')\r\n\r\n # Return an object of this model\r\n return cls(door_closed,\r\n id,\r\n name)", "def from_dictionary(cls,\n dictionary):\n if dictionary is None:\n return None\n\n # Extract variables from the dictionary\n id = dictionary.get('id')\n provider_id = dictionary.get('providerId')\n server_time = dictionary.get('serverTime')\n username = dictionary.get('username')\n driver_license_number = dictionary.get('driverLicenseNumber')\n country = dictionary.get('country')\n region = dictionary.get('region')\n driver_home_terminal = dictionary.get('driverHomeTerminal')\n\n # Return an object of this model\n return cls(id,\n provider_id,\n server_time,\n username,\n driver_license_number,\n country,\n region,\n driver_home_terminal)", "def from_dict(cls, data):\n instance = cls()\n instance._set_data(data)\n return instance", "def from_dictionary(cls,\n dictionary):\n if dictionary is None:\n return None\n\n # Extract variables from the dictionary\n is_group_site = dictionary.get('isGroupSite')\n is_private_channel_site = dictionary.get('isPrivateChannelSite')\n is_team_site = dictionary.get('isTeamSite')\n\n # Return an object of this model\n return cls(\n is_group_site,\n is_private_channel_site,\n is_team_site\n)", "def from_dictionary(cls,\n dictionary):\n if dictionary is None:\n return None\n\n # Extract variables from the dictionary\n id = dictionary.get('id')\n provider_id = dictionary.get('providerId')\n server_time = dictionary.get('serverTime')\n active_from = dictionary.get('activeFrom')\n active_to = dictionary.get('activeTo')\n rpm_over_value = dictionary.get('rpmOverValue')\n over_speed_value = dictionary.get('overSpeedValue')\n excess_speed_value = dictionary.get('excessSpeedValue')\n long_idle_value = dictionary.get('longIdleValue')\n hi_throttle_value = dictionary.get('hiThrottleValue')\n\n # Return an object of this model\n return cls(id,\n provider_id,\n server_time,\n active_from,\n active_to,\n rpm_over_value,\n over_speed_value,\n excess_speed_value,\n long_idle_value,\n hi_throttle_value)", "def from_dict(cls, dikt) -> 'ModelClass':\n return util.deserialize_model(dikt, cls)", "def from_dictionary(cls,\n dictionary):\n if dictionary is None:\n return None\n\n # Extract variables from the dictionary\n action = dictionary.get('action')\n cluster_info = dictionary.get('clusterInfo')\n details = dictionary.get('details')\n domain = dictionary.get('domain')\n entity_id = dictionary.get('entityId')\n entity_name = dictionary.get('entityName')\n entity_type = dictionary.get('entityType')\n human_timestamp = dictionary.get('humanTimestamp')\n impersonation = dictionary.get('impersonation')\n ip = dictionary.get('ip')\n new_record = dictionary.get('newRecord')\n original_tenant = cohesity_management_sdk.models.tenant.Tenant.from_dictionary(dictionary.get('originalTenant')) if dictionary.get('originalTenant') else None\n previous_record = dictionary.get('previousRecord')\n tenant = cohesity_management_sdk.models.tenant.Tenant.from_dictionary(dictionary.get('tenant')) if dictionary.get('tenant') else None\n timestamp_usecs = dictionary.get('timestampUsecs')\n user_name = dictionary.get('userName')\n\n # Return an object of this model\n return cls(\n action,\n cluster_info,\n details,\n domain,\n entity_id,\n entity_name,\n entity_type,\n human_timestamp,\n impersonation,\n ip,\n new_record,\n original_tenant,\n previous_record,\n tenant,\n timestamp_usecs,\n user_name\n)", "def from_dictionary(cls,\n dictionary):\n if dictionary is None:\n return None\n\n # Extract variables from the dictionary\n api_group = dictionary.get('apiGroup')\n api_version = dictionary.get('apiVersion')\n kind = dictionary.get('kind')\n name = dictionary.get('name')\n namespace = dictionary.get('namespace')\n resource_version = dictionary.get('resourceVersion')\n uid = dictionary.get('uid')\n\n # Return an object of this model\n return cls(\n api_group,\n api_version,\n kind,\n name,\n namespace,\n resource_version,\n uid\n)", "def from_dictionary(cls,\r\n dictionary):\r\n if dictionary is None:\r\n return None\r\n\r\n # Extract variables from the dictionary\r\n id = dictionary.get('id')\r\n consumer_id = dictionary.get('consumerId')\r\n consumer_ssn = dictionary.get('consumerSsn')\r\n requester_name = dictionary.get('requesterName')\r\n request_id = dictionary.get('requestId')\r\n constraints = finicityapi.models.report_constraints.ReportConstraints.from_dictionary(dictionary.get('constraints')) if dictionary.get('constraints') else None\r\n mtype = dictionary.get('type')\r\n status = dictionary.get('status')\r\n created_date = dictionary.get('createdDate')\r\n\r\n # Clean out expected properties from dictionary\r\n for key in cls._names.values():\r\n if key in dictionary:\r\n del dictionary[key]\r\n\r\n # Return an object of this model\r\n return cls(id,\r\n consumer_id,\r\n consumer_ssn,\r\n requester_name,\r\n request_id,\r\n constraints,\r\n mtype,\r\n status,\r\n created_date,\r\n dictionary)", "def from_dictionary(cls,\n dictionary):\n if dictionary is None:\n return None\n\n # Extract variables from the dictionary\n address = dictionary.get('address')\n port = dictionary.get('port')\n protocol = dictionary.get('protocol')\n is_cluster_auditing_enabled = dictionary.get('isClusterAuditingEnabled')\n is_data_protection_enabled = dictionary.get('isDataProtectionEnabled')\n is_filer_auditing_enabled = dictionary.get('isFilerAuditingEnabled')\n is_ssh_log_enabled = dictionary.get('isSshLogEnabled')\n name = dictionary.get('name')\n\n # Return an object of this model\n return cls(address,\n port,\n protocol,\n is_cluster_auditing_enabled,\n is_data_protection_enabled,\n is_filer_auditing_enabled,\n is_ssh_log_enabled,\n name)", "def from_dictionary(cls,\r\n dictionary):\r\n if dictionary is None:\r\n return None\r\n\r\n # Extract variables from the dictionary\r\n enabled = dictionary.get('enabled')\r\n spare_serial = dictionary.get('spareSerial')\r\n uplink_mode = dictionary.get('uplinkMode')\r\n virtual_ip_1 = dictionary.get('virtualIp1')\r\n virtual_ip_2 = dictionary.get('virtualIp2')\r\n\r\n # Return an object of this model\r\n return cls(enabled,\r\n spare_serial,\r\n uplink_mode,\r\n virtual_ip_1,\r\n virtual_ip_2)" ]
[ "0.8317533", "0.81689686", "0.81689686", "0.81188685", "0.808892", "0.7977596", "0.7949056", "0.79225504", "0.78983366", "0.7893884", "0.7887517", "0.7882494", "0.7881197", "0.7876339", "0.78581846", "0.78387725", "0.78037786", "0.78037786", "0.78037786", "0.78037786", "0.78037786", "0.78037786", "0.78037786", "0.78037786", "0.78037786", "0.78037786", "0.78037786", "0.78037786", "0.78037786", "0.78037786", "0.78037786", "0.78037786", "0.78037786", "0.78037786", "0.78037786", "0.78037786", "0.78037786", "0.78037786", "0.78037786", "0.78037786", "0.78037786", "0.78037786", "0.78037786", "0.78037786", "0.78037786", "0.78037786", "0.78037786", "0.78037786", "0.78037786", "0.78037786", "0.78037786", "0.78037786", "0.78037786", "0.78037786", "0.78037786", "0.78037786", "0.78037786", "0.78037786", "0.78037786", "0.78037786", "0.78037786", "0.7781262", "0.77776235", "0.7759004", "0.7742808", "0.7733484", "0.7704501", "0.76979876", "0.76804006", "0.7669518", "0.7665353", "0.76598775", "0.7659742", "0.7650863", "0.7629648", "0.7602984", "0.75930196", "0.7592132", "0.75897974", "0.75896966", "0.75797164", "0.75670844", "0.75665504", "0.7563158", "0.7495614", "0.7491285", "0.74897933", "0.7489466", "0.74845934", "0.74661463", "0.7464368", "0.7464251", "0.7455935", "0.7454465", "0.74472356", "0.74087965", "0.74003285", "0.7390348", "0.73875004", "0.7384281" ]
0.7370376
100
Operator for single operand
def _arithmetize1(self, operand: Any, op: str) -> Any: op_func = getattr(operator, op) # Data length might be changed after evaluation # operand = recycle_value(operand, self.data.shape[0]) return op_func(operand)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def operator(self):\n return self.__operator", "def operator(self):\n col = self.pos\n operators = [\"||\", \"&&\", \">>\", \"<<\", \"!=\", \">=\", \"<=\", \"==\", \"##\"] + \\\n [\"-\", \"+\", \"!\", \"*\", \"/\", \"|\", \"&\", \"^\", \"<\", \">\", \"?\", \":\", \"~\", \"#\", \"=\", \"%\"]\n try:\n index = self.match_any(operators)\n\n op = Operator(self.line, col, self.prev_white, operators[index])\n return op\n except TokenError:\n self.pos = col\n raise TokenError(\"Invalid operator.\")", "def applyOperator(self, operand1, operand2, operator):\n\n if operator == \"*\":\n return operand1 * operand2\n elif operator == \"/\":\n return operand1 / operand2\n elif operator == \"+\":\n return operand1 + operand2\n else:\n return operand1 - operand2", "def my_operator(self):\n return self._my_operator", "def op(self) -> Literal[\"==\"] | Literal[\"<=\"] | Literal[\">=\"]:\n ...", "def binary_operator(cls, quad):\n\t\tleft_op = cls.get_address_value(quad.left_operand)\n\t\tright_op = cls.get_address_value(quad.right_operand)\n\t\tresult = cls.execute_binary_operator(quad.operator, left_op, right_op)\n\t\tcls.set_address_value(quad.result, result)", "def perform_operation(operator, num_1, num_2):\n\n if operator == \"*\":\n return num_1 * num_2\n if operator == \"+\":\n return num_1 + num_2\n if operator == \"-\":\n return num_1 - num_2\n if operator == \"/\":\n return num_1 / num_2", "def op(self):\n return self.__op", "def op(self):\n return self.__op", "def reduce(self, binary_operator):\n return functools.reduce(binary_operator, self)", "def get_operator(self):\n if len(self) == 1:\n return self[0].get_operator()\n op = np.array(self._get_array_of_operators())\n return np.sum(op, axis=0)", "def gen_binop(self, expr: expressions.BinaryOperator):\n if expr.op in [\"*\", \"/\", \"%\", \"^\", \"|\", \"&\", \">>\", \"<<\"]:\n lhs = self.gen_expr(expr.a, rvalue=True)\n rhs = self.gen_expr(expr.b, rvalue=True)\n op = expr.op\n\n ir_typ = self.get_ir_type(expr.typ)\n value = self.builder.emit_binop(lhs, op, rhs, ir_typ)\n elif expr.op == \",\":\n # Handle the comma operator by returning the second result\n self.gen_expr(expr.a, rvalue=True)\n rhs = self.gen_expr(expr.b, rvalue=True)\n value = rhs\n elif expr.op == \"+\":\n # Pay attention to pointer arithmetics!\n lhs = self.gen_expr(expr.a, rvalue=True)\n rhs = self.gen_expr(expr.b, rvalue=True)\n\n # left and right are swapped in semantics if right is pointer.\n if expr.a.typ.is_pointer:\n assert expr.b.typ.is_integer\n esize = self.sizeof(expr.a.typ.element_type)\n assert esize > 0\n if esize != 1:\n esize = self.emit(ir.Const(esize, \"esize\", rhs.ty))\n rhs = self.builder.emit_mul(rhs, esize, rhs.ty)\n rhs = self.builder.emit_cast(rhs, ir.ptr)\n\n ir_typ = self.get_ir_type(expr.typ)\n value = self.builder.emit_binop(lhs, \"+\", rhs, ir_typ)\n elif expr.op == \"-\":\n # Pay attention to pointer arithmetics!\n lhs = self.gen_expr(expr.a, rvalue=True)\n rhs = self.gen_expr(expr.b, rvalue=True)\n ir_typ = self.get_ir_type(expr.typ)\n if expr.a.typ.is_pointer:\n esize = self.sizeof(expr.a.typ.element_type)\n assert esize > 0\n if expr.b.typ.is_pointer:\n # pointer - pointer\n value = self.builder.emit_binop(lhs, \"-\", rhs, ir.ptr)\n value = self.emit(ir.Cast(value, \"typecast\", ir_typ))\n if esize != 1:\n esize = self.emit(ir.Const(esize, \"esize\", ir_typ))\n value = self.emit(\n ir.Binop(value, \"/\", esize, \"rhs\", ir_typ)\n )\n else:\n # pointer - numeric\n if esize != 1:\n esize = self.emit(ir.Const(esize, \"esize\", rhs.ty))\n rhs = self.builder.emit_mul(rhs, esize, rhs.ty)\n rhs = self.builder.emit_cast(rhs, ir.ptr)\n value = self.builder.emit_binop(lhs, \"-\", rhs, ir_typ)\n else:\n # numeric - numeric\n value = self.builder.emit_binop(lhs, \"-\", rhs, ir_typ)\n\n elif expr.op in [\"<\", \">\", \"==\", \"!=\", \"<=\", \">=\", \"||\", \"&&\"]:\n value = self.gen_condition_to_integer(expr)\n elif expr.op in [\n \"=\",\n \"+=\",\n \"-=\",\n \"*=\",\n \"%=\",\n \"/=\",\n \">>=\",\n \"<<=\",\n \"&=\",\n \"|=\",\n \"~=\",\n \"^=\",\n ]:\n # Handle struct assignment special case:\n if expr.op == \"=\" and expr.a.typ.is_struct:\n lhs = self.gen_expr(expr.a, rvalue=False)\n rhs = self.gen_expr(expr.b, rvalue=False)\n amount = self.sizeof(expr.a.typ)\n self.gen_copy_struct(lhs, rhs, amount)\n value = None\n else:\n lhs = self.gen_expr(expr.a, rvalue=False)\n rhs = self.gen_expr(expr.b, rvalue=True)\n\n if expr.op == \"=\":\n value = rhs\n else:\n # Handle '+=' and friends:\n op = expr.op[:-1]\n ir_typ = self.get_ir_type(expr.typ)\n loaded = self._load_value(lhs, expr.typ)\n\n # pointer arithmatic:\n if op in [\"+\", \"-\"] and expr.a.typ.is_pointer:\n esize = self.sizeof(expr.a.typ.element_type)\n assert esize > 0\n if esize != 1:\n esize = self.emit(ir.Const(esize, \"esize\", rhs.ty))\n rhs = self.builder.emit_mul(rhs, esize, rhs.ty)\n\n value = self.builder.emit_binop(loaded, op, rhs, ir_typ)\n self._store_value(value, lhs)\n else: # pragma: no cover\n raise NotImplementedError(str(expr.op))\n return value", "def operator(self):\n return self.data.get('operator', 'and')", "def operator(self):\n return self.data.get('operator', 'and')", "def and_or_operator(cls, quad):\n\t\tleft_op = cls.get_address_value(quad.left_operand)\n\t\tright_op = cls.get_address_value(quad.right_operand)\n\t\t# TODO: The next set of lines will fail at a specific case\n\t\tif quad.operator == 10 :\n\t\t\tcls.set_address_value(quad.result, (left_op and right_op))\n\t\telif quad.operator == 11 :\n\t\t\tcls.set_address_value(quad.result, (left_op or right_op))", "def do_math(operator, op1, op2):\n if operator == \"*\":\n return op1 * op2\n if operator == \"/\":\n return op1 / op2\n if operator == \"+\":\n return op1 + op2\n if operator == \"-\":\n return op1 - op2\n if operator == \"^\":\n return op1**(op2)", "def op(self):\n\n return self._op", "def visit_BinaryOp(self, node):\n token = node.token\n if token.type == PLUS:\n return self.visit(node.left) + self.visit(node.right)\n if token.type == MINUS:\n return self.visit(node.left) - self.visit(node.right)\n if token.type == MUL:\n return self.visit(node.left) * self.visit(node.right)\n if token.type == DIV:\n result = self.visit(node.left) / self.visit(node.right)\n if result.is_integer():\n return int(result)\n return result\n self.raise_error()", "def operator_lhs(self, inp):\n assert self.operator is not None, \\\n \"Please set an operator with the set_operation method\"\n\n return self.operator_rhs(self.operator.forward(inp))", "def __or__(self, other):\r\n return self + other - self * other", "def execute_binary_operator(cls, val, x, y):\n\n\t\tif val == 0:\n\t\t\treturn operator.add(x,y)\n\t\telif val == 1:\n\t\t\treturn operator.sub(x,y)\n\t\telif val == 2:\n\t\t\treturn operator.mul(x,y)\n\t\telif val == 3:\n\t\t\treturn operator.div(x,y)\n\t\telif val == 4:\n\t\t\treturn operator.lt(x,y)\n\t\telif val == 5:\n\t\t\treturn operator.gt(x,y)\n\t\telif val == 6:\n\t\t\treturn operator.le(x,y)\n\t\telif val == 7:\n\t\t\treturn operator.ge(x,y)\n\t\telif val == 8:\n\t\t\treturn operator.eq(x,y)\n\t\telif val == 9:\n\t\t\treturn operator.ne(x,y)\n\t\telif val == 12:\n\t\t\treturn operator.mod(x,y)", "def is_arithmetic_op(self):\r\n return self.value in [\"+\", \"-\"]", "def _binaryop(self, other, op: str):\n raise NotImplementedError", "def operator(self) -> str:\n return self._operator", "def binary_operator(op):\n # When combining a Factor with a NumericalExpression, we use this\n # attrgetter instance to defer to the commuted implementation of the\n # NumericalExpression operator.\n commuted_method_getter = attrgetter(method_name_for_op(op, commute=True))\n\n def binary_operator(self, other):\n # This can't be hoisted up a scope because the types returned by\n # binop_return_type aren't defined when the top-level function is\n # invoked in the class body of Factor.\n return_type = binop_return_type(op)\n if isinstance(self, NumExprFactor):\n self_expr, other_expr, new_inputs = self.build_binary_op(\n op, other,\n )\n return return_type(\n \"({left}) {op} ({right})\".format(\n left=self_expr,\n op=op,\n right=other_expr,\n ),\n new_inputs,\n )\n elif isinstance(other, NumExprFactor):\n # NumericalExpression overrides ops to correctly handle merging of\n # inputs. Look up and call the appropriate reflected operator with\n # ourself as the input.\n return commuted_method_getter(other)(self)\n elif isinstance(other, Factor):\n if self is other:\n return return_type(\n \"x_0 {op} x_0\".format(op=op),\n (self,),\n )\n return return_type(\n \"x_0 {op} x_1\".format(op=op),\n (self, other),\n )\n elif isinstance(other, Number):\n return return_type(\n \"x_0 {op} ({constant})\".format(op=op, constant=other),\n binds=(self,),\n )\n raise BadBinaryOperator(op, self, other)\n\n binary_operator.__doc__ = \"Binary Operator: '%s'\" % op\n return binary_operator", "def calculate_expression(number1, number2, operator):\n\n if operator == '+':\n return number1 + number2\n elif operator == '-':\n return number1 - number2\n elif operator == '*':\n return number1 * number2", "def evaluate(self, operand: object) -> bool:\n pass", "def operator(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"operator\")", "def operator(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"operator\")", "def operator(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"operator\")", "def _arithmetize2(self, left: Any, right: Any, op: str) -> Any:\n op_func = getattr(operator, op)\n left, right = _recycle_left_right(left, right)\n return op_func(left, right)", "def operator(self) -> Optional[LogicalOperator]:\n return self.__operator", "def binary_operator_string(self, binary):\n return binary.operator == '%' and 'mod' or binary.operator", "def binop(x, y, op):\n\n if (x is None) and (y is None):\n return None\n\n x = x if (x is not None) else 0\n y = y if (y is not None) else 0\n return op(x,y)", "def __mul__(self, other):\n return And(self, other)", "def binary(op, l, r):\n if op == \"+\": return l + r\n if op == \"*\": return l * r\n if op == \"-\": return l - r\n if op == \"=\": return l == r\n if op == \"<>\": return l != r\n if op == \"!=\": return l != r\n if op == \"or\": return l or r\n if op == \"<\": return l < r\n if op == \">\": return l > r\n if op == \"/\": return l / r\n if op == \"and\": return bool(l and r)\n if op == \"in\": return l in r\n if op == \"==\": return l == r\n if op == \"<=\": return l <= r\n if op == \">=\": return l >= r\n raise Exception(\"binary op not implemented\")", "def evaluate_op(op, first, second):\n\n if op in ops_prec.keys():\n if op == '+':\n output = first + second\n elif op == '-':\n output = first - second\n elif op == '*':\n output = first * second\n elif op == '/' and second != 0:\n output = first / second\n else:\n print \"there's some error, maybe div/0?\"\n output = None\n return output", "def __and__(self, other: Any) -> Operators:\n return self.operate(and_, other)", "def unary(op, v):\n if op == \"+\":\n return v\n if op == \"-\":\n return -v\n if op.lower() == \"not\":\n return not(v)\n raise Exception(\"unary op not implemented\")", "def all_math(operator):\n a = int(request.args.get(\"a\"))\n b = int(request.args.get(\"b\"))\n return str(functions[operator](a,b))", "def _op(\n x: Union[bool, dts.Boolean, tps.BooleanValue],\n y: Union[bool, dts.Boolean, tps.BooleanValue],\n ) -> T:", "def parse_single_op_string(opstring) :\n ops = {'+' : \"plus\",\n '?' : \"opt\" , \n '*' : \"star\"}\n return '('.join(ops[c] for c in reversed(opstring)) + '('", "def calculate(operandOne, operandTwo, operation):\r\n if operation == '+':\r\n return operandOne + operandTwo\r\n elif operation == '-':\r\n return operandOne - operandTwo\r\n elif operation == '*':\r\n return operandOne * operandTwo\r\n elif operation == '/':\r\n return operandOne // operandTwo", "def _op1(self: 'SampledFieldType', operator: Callable) -> 'SampledFieldType':\n values = operator(self.values)\n extrapolation_ = operator(self._extrapolation)\n return self.with_values(values).with_extrapolation(extrapolation_)", "def _remove_operator(self, operator):", "def conditional_value(self) -> global___Expression.ConditionalOperator:", "def __or__(self, other: Any) -> Operators:\n return self.operate(or_, other)", "def make_op1(op, expr):\n\n if (op == None) or (expr == None):\n return None\n\n if op == 'NOT':\n op = '!'\n if is_assembler('beebasm') and (op == '!'):\n if isinstance(expr, utils.LazyString):\n return utils.LazyString(\"NOT(%s)\", expr)\n return 'NOT(' + expr + ')'\n if isinstance(expr, utils.LazyString):\n return utils.LazyString(\"%s%s\", op, bracket(expr))\n return op + bracket(expr)", "def visit_UnaryOp(self, node):\n token = node.token\n if token.type == PLUS:\n return self.visit(node.right)\n if token.type == MINUS:\n return -1 * self.visit(node.right)\n self.raise_error()", "def fun(op, v1, v2):\n if op == '+':\n return v1+v2\n elif op == '-':\n return v1-v2\n elif op == '*':\n return v1*v2\n elif op == '/':\n return v1", "def _apply_binary_op_elementwise(\n self: ConcreteStructuredMetricValue, other: ConcreteStructuredMetricValue,\n op: Callable[[float, float], float]) -> ConcreteStructuredMetricValue:\n ...", "def _op(\n x: Union[int, float, dts.Number, tps.NumericValue],\n y: Union[int, float, dts.Number, tps.NumericValue],\n ) -> T:", "def do_oprn(self, *args, operator=None, **kwargs):\n\t\tself.operator = operator\n\n\t\tif not self.operator:\n\t\t\treturn f'No operator provided'\n\n\t\tif self.operator == '+':\n\t\t\treturn self.sum(*args, **kwargs)\n\t\telif self.operator == '-':\n\t\t\treturn self.subtract(*args, **kwargs)\n\t\telif self.operator == '*':\n\t\t\treturn self.multiple(*args, **kwargs)\n\t\telif self.operator == '/':\n\t\t\treturn self.division(*args, **kwargs)\n\t\telse:\n\t\t\treturn f'Currently Operator ({operator}) is not Applicable'", "def getop(op):\n # y is search argument, x is the record's value\n ops = {\n \"==\": lambda y,x: x == y,\n \"!=\": lambda y,x: x != y,\n \">\": lambda y,x: x > y,\n \"<\": lambda y,x: x < y,\n \">=\": lambda y,x: x >= y,\n \"<=\": lambda y,x: x <= y,\n 'any': lambda y,x: x != None,\n 'noop': lambda y,x: True,\n 'starts': lambda y,x: unicode(y).lower() in unicode(x).lower(),\n }\n return ops[SYNONYMS.get(op, op)]", "def to_operator(self) -> Operator:\n return Operator(self.to_instruction())", "def less_than_or_equal(self) -> global___Expression:", "def test_operator(self):\n\t\tfor op in self.ops:\n\t\t\tself.filter.set_operator(op)\n\t\t\tself.assertEqual(self.filter.operator.value, op)", "def test_operator_adapt(self):\n\n # test string concatenation\n expr = test_table.c.data + \"somedata\"\n assert testing.db.execute(select([expr])).scalar() == \"somedatasomedata\"\n\n expr = test_table.c.id + 15\n assert testing.db.execute(select([expr])).scalar() == 16\n\n # test custom operator conversion\n expr = test_table.c.avalue + 40\n assert expr.type.__class__ is test_table.c.avalue.type.__class__\n\n # value here is calculated as (250 - 40) / 10 = 21\n # because \"40\" is an integer, not an \"avalue\"\n assert testing.db.execute(select([expr.label('foo')])).scalar() == 21\n\n expr = test_table.c.avalue + literal(40, type_=MyCustomType)\n \n # + operator converted to -\n # value is calculated as: (250 - (40 * 10)) / 10 == -15\n assert testing.db.execute(select([expr.label('foo')])).scalar() == -15\n\n # this one relies upon anonymous labeling to assemble result\n # processing rules on the column.\n assert testing.db.execute(select([expr])).scalar() == -15", "def reflected_binary_operator(op):\n assert not is_comparison(op)\n\n def reflected_binary_operator(self, other):\n\n if isinstance(self, NumericalExpression):\n self_expr, other_expr, new_inputs = self.build_binary_op(\n op, other\n )\n return NumExprFactor(\n \"({left}) {op} ({right})\".format(\n left=other_expr,\n right=self_expr,\n op=op,\n ),\n new_inputs,\n )\n\n # Only have to handle the numeric case because in all other valid cases\n # the corresponding left-binding method will be called.\n elif isinstance(other, Number):\n return NumExprFactor(\n \"{constant} {op} x_0\".format(op=op, constant=other),\n binds=(self,),\n )\n raise BadBinaryOperator(op, other, self)\n return reflected_binary_operator", "def __or__(self, other):\n return MyCustomNumber(self.value | other.value)", "def calc(operand_1, operand_2):\n return operand_1 + operand_2", "def calc(operand_1, operand_2):\n return operand_1 + operand_2", "def visit_UnaryOperator(self, node: UnaryOperator) -> Constant:\n\n operator = node.operator.type\n if operator == TokenType.PLUS:\n expression = self.visit(node.expression)\n return Constant(DoubleType(), float(+expression.constant))\n elif operator == TokenType.MINUS:\n expression = self.visit(node.expression)\n return Constant(DoubleType(), float(-expression.constant))", "def is_operator(obj):\n return isinstance(obj, Token) and obj[0] not in '/01234567890+-.<[('", "def test_unary_op_support():\n check_peval_expression(\"+(2)\", {}, \"2\", fully_evaluated=True, expected_value=2)\n check_peval_expression(\"-(-3)\", {}, \"3\", fully_evaluated=True, expected_value=3)\n check_peval_expression_bool(\"not 0\", {}, True)\n check_peval_expression(\"~(-4)\", {}, \"3\", fully_evaluated=True, expected_value=3)", "def isOperator(self):\n return _libsbml.ASTNode_isOperator(self)", "def operator(self) -> str:\n return pulumi.get(self, \"operator\")", "def operator(self) -> str:\n return pulumi.get(self, \"operator\")", "def operator(self) -> Optional[str]:\n return pulumi.get(self, \"operator\")", "def operator(self) -> Optional[str]:\n return pulumi.get(self, \"operator\")", "def operator(self) -> Optional[str]:\n return pulumi.get(self, \"operator\")", "def operator(self) -> Optional[str]:\n return pulumi.get(self, \"operator\")", "def operator(self) -> Optional[str]:\n return pulumi.get(self, \"operator\")", "def operator(self) -> Optional[str]:\n return pulumi.get(self, \"operator\")", "def operator(self) -> Optional[str]:\n return pulumi.get(self, \"operator\")", "def operator(self) -> Optional[str]:\n return pulumi.get(self, \"operator\")", "def operator(self) -> Optional[str]:\n return pulumi.get(self, \"operator\")", "def operator(self) -> Optional[str]:\n return pulumi.get(self, \"operator\")", "def operator(self) -> Optional[str]:\n return pulumi.get(self, \"operator\")", "def operator(self) -> Optional[str]:\n return pulumi.get(self, \"operator\")", "def operator(self) -> Optional[str]:\n return pulumi.get(self, \"operator\")", "def operator(self) -> Optional[str]:\n return pulumi.get(self, \"operator\")", "def operator(self) -> Optional[str]:\n return pulumi.get(self, \"operator\")", "def operator(self) -> Optional[str]:\n return pulumi.get(self, \"operator\")", "def operator(self) -> Optional[str]:\n return pulumi.get(self, \"operator\")", "def operator(self) -> Optional[str]:\n return pulumi.get(self, \"operator\")", "def operator(self) -> Optional[str]:\n return pulumi.get(self, \"operator\")", "def operator(self) -> Optional[str]:\n return pulumi.get(self, \"operator\")", "def operator(self) -> Optional[str]:\n return pulumi.get(self, \"operator\")", "def _process_operator(self, expr, operator, func, *args, **kwargs):\n for elt in self.model.xml_element_children(expr):\n self._process_operator(elt, operator, func, *args, **kwargs)\n if isinstance(expr, mathml_apply) and expr.operator().localName == operator:\n func(expr, *args, **kwargs)", "def operation(self, other=None, operator=None):\n terms = [self]\n if other is not None and operator is not EmptyQuery:\n terms.append(other)\n return Operation(terms, operator=operator)", "def op(self):\n return self.getop(self.pc)", "def evaluate_operation(\n statement: ast.BinOp,\n) -> Optional[Union[int, float, str, bytes]]:\n if isinstance(statement.left, ast.BinOp):\n left = evaluate_operation(statement.left)\n else:\n left = evaluate_node(statement.left)\n\n if isinstance(statement.right, ast.BinOp):\n right = evaluate_operation(statement.right)\n else:\n right = evaluate_node(statement.right)\n\n op = _AST_OPS_TO_OPERATORS.get(type(statement.op))\n\n evaluation = None\n if op is not None:\n with suppress(Exception):\n evaluation = op(left, right)\n\n return evaluation", "def __and__(self, other):\n return self.__mul__(other)", "def equalsOp(self, operand):\n if self.previousOperator:\n if self.previousOperand == None:\n self.previousOperand = operand\n self.computeTotal(self.previousOperator, self.previousOperand)", "def commutator(left_operator, right_operator):\n if not isinstance(left_operator, type(right_operator)):\n raise TypeError('operator_a and operator_b are not of the same type.')\n valueable_type = (QubitOperator, FermionOperator, QubitExcitationOperator)\n if not isinstance(left_operator, valueable_type):\n raise TypeError(\n \"Operator should be QubitOperator, FermionOperator or QubitExcitationOperator.\"\n )\n\n result = left_operator * right_operator\n result -= right_operator * left_operator\n return result", "def value(self):\n return self.computeValue(self.operator,\n self.leftOperand.value(),\n self.rightOperand.value())", "def is_operator(t_char):\r\n eax = 1\r\n if ord(t_char) == 42:\r\n # prodotto *\r\n eax = 0\r\n \r\n if ord(t_char) == 43:\r\n # somma +\r\n eax = 0\r\n \r\n if ord(t_char) == 45:\r\n # sottrazione -\r\n eax = 0\r\n \r\n if ord(t_char) == 47:\r\n # divisione /\r\n eax = 0\r\n \r\n return eax", "def is_operator(node):\n return node.startswith('$')", "def calculate(first, second, operator):\n result = \"\"\n if operator == \"+\":\n result = int(first) + int(second)\n elif operator == \"-\":\n result = int(first) - int(second)\n elif operator == \"/\":\n result = int(first) / int(second)\n elif operator == \"*\":\n result = int(first) * int(second)\n else:\n print \"Did not recognize: \" + operator\n\n return result" ]
[ "0.7114677", "0.694391", "0.6853135", "0.6847471", "0.6837405", "0.6791744", "0.6742106", "0.6664682", "0.6664682", "0.657418", "0.6553492", "0.6541233", "0.6475368", "0.6475368", "0.64606273", "0.64198357", "0.64169806", "0.64141864", "0.64120305", "0.6401596", "0.6390579", "0.6389005", "0.63698405", "0.6364054", "0.6335707", "0.6284129", "0.6282931", "0.6274894", "0.6274894", "0.6274894", "0.62520075", "0.6234674", "0.6216574", "0.6204451", "0.62014973", "0.6190167", "0.61797243", "0.61723334", "0.6171986", "0.615839", "0.61562693", "0.6134108", "0.61304367", "0.61261576", "0.6121281", "0.61176", "0.6112385", "0.6104391", "0.60931796", "0.6090923", "0.6087089", "0.60794455", "0.6072107", "0.60681", "0.6067739", "0.606322", "0.60621685", "0.6059502", "0.6055212", "0.604638", "0.6043298", "0.6043298", "0.6038862", "0.6026219", "0.6020266", "0.60067475", "0.6006739", "0.6006739", "0.60053796", "0.60053796", "0.60053796", "0.60053796", "0.60053796", "0.60053796", "0.60053796", "0.60053796", "0.60053796", "0.60053796", "0.60053796", "0.60053796", "0.60053796", "0.60053796", "0.60053796", "0.60053796", "0.60053796", "0.60053796", "0.60053796", "0.60053796", "0.60053796", "0.5997172", "0.59924865", "0.59890175", "0.59880173", "0.59876007", "0.5977097", "0.5976294", "0.59757864", "0.597197", "0.5970427", "0.5962867" ]
0.6933232
2
Operator for paired operands
def _arithmetize2(self, left: Any, right: Any, op: str) -> Any: op_func = getattr(operator, op) left, right = _recycle_left_right(left, right) return op_func(left, right)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def and_or_operator(cls, quad):\n\t\tleft_op = cls.get_address_value(quad.left_operand)\n\t\tright_op = cls.get_address_value(quad.right_operand)\n\t\t# TODO: The next set of lines will fail at a specific case\n\t\tif quad.operator == 10 :\n\t\t\tcls.set_address_value(quad.result, (left_op and right_op))\n\t\telif quad.operator == 11 :\n\t\t\tcls.set_address_value(quad.result, (left_op or right_op))", "def gen_binop(self, expr: expressions.BinaryOperator):\n if expr.op in [\"*\", \"/\", \"%\", \"^\", \"|\", \"&\", \">>\", \"<<\"]:\n lhs = self.gen_expr(expr.a, rvalue=True)\n rhs = self.gen_expr(expr.b, rvalue=True)\n op = expr.op\n\n ir_typ = self.get_ir_type(expr.typ)\n value = self.builder.emit_binop(lhs, op, rhs, ir_typ)\n elif expr.op == \",\":\n # Handle the comma operator by returning the second result\n self.gen_expr(expr.a, rvalue=True)\n rhs = self.gen_expr(expr.b, rvalue=True)\n value = rhs\n elif expr.op == \"+\":\n # Pay attention to pointer arithmetics!\n lhs = self.gen_expr(expr.a, rvalue=True)\n rhs = self.gen_expr(expr.b, rvalue=True)\n\n # left and right are swapped in semantics if right is pointer.\n if expr.a.typ.is_pointer:\n assert expr.b.typ.is_integer\n esize = self.sizeof(expr.a.typ.element_type)\n assert esize > 0\n if esize != 1:\n esize = self.emit(ir.Const(esize, \"esize\", rhs.ty))\n rhs = self.builder.emit_mul(rhs, esize, rhs.ty)\n rhs = self.builder.emit_cast(rhs, ir.ptr)\n\n ir_typ = self.get_ir_type(expr.typ)\n value = self.builder.emit_binop(lhs, \"+\", rhs, ir_typ)\n elif expr.op == \"-\":\n # Pay attention to pointer arithmetics!\n lhs = self.gen_expr(expr.a, rvalue=True)\n rhs = self.gen_expr(expr.b, rvalue=True)\n ir_typ = self.get_ir_type(expr.typ)\n if expr.a.typ.is_pointer:\n esize = self.sizeof(expr.a.typ.element_type)\n assert esize > 0\n if expr.b.typ.is_pointer:\n # pointer - pointer\n value = self.builder.emit_binop(lhs, \"-\", rhs, ir.ptr)\n value = self.emit(ir.Cast(value, \"typecast\", ir_typ))\n if esize != 1:\n esize = self.emit(ir.Const(esize, \"esize\", ir_typ))\n value = self.emit(\n ir.Binop(value, \"/\", esize, \"rhs\", ir_typ)\n )\n else:\n # pointer - numeric\n if esize != 1:\n esize = self.emit(ir.Const(esize, \"esize\", rhs.ty))\n rhs = self.builder.emit_mul(rhs, esize, rhs.ty)\n rhs = self.builder.emit_cast(rhs, ir.ptr)\n value = self.builder.emit_binop(lhs, \"-\", rhs, ir_typ)\n else:\n # numeric - numeric\n value = self.builder.emit_binop(lhs, \"-\", rhs, ir_typ)\n\n elif expr.op in [\"<\", \">\", \"==\", \"!=\", \"<=\", \">=\", \"||\", \"&&\"]:\n value = self.gen_condition_to_integer(expr)\n elif expr.op in [\n \"=\",\n \"+=\",\n \"-=\",\n \"*=\",\n \"%=\",\n \"/=\",\n \">>=\",\n \"<<=\",\n \"&=\",\n \"|=\",\n \"~=\",\n \"^=\",\n ]:\n # Handle struct assignment special case:\n if expr.op == \"=\" and expr.a.typ.is_struct:\n lhs = self.gen_expr(expr.a, rvalue=False)\n rhs = self.gen_expr(expr.b, rvalue=False)\n amount = self.sizeof(expr.a.typ)\n self.gen_copy_struct(lhs, rhs, amount)\n value = None\n else:\n lhs = self.gen_expr(expr.a, rvalue=False)\n rhs = self.gen_expr(expr.b, rvalue=True)\n\n if expr.op == \"=\":\n value = rhs\n else:\n # Handle '+=' and friends:\n op = expr.op[:-1]\n ir_typ = self.get_ir_type(expr.typ)\n loaded = self._load_value(lhs, expr.typ)\n\n # pointer arithmatic:\n if op in [\"+\", \"-\"] and expr.a.typ.is_pointer:\n esize = self.sizeof(expr.a.typ.element_type)\n assert esize > 0\n if esize != 1:\n esize = self.emit(ir.Const(esize, \"esize\", rhs.ty))\n rhs = self.builder.emit_mul(rhs, esize, rhs.ty)\n\n value = self.builder.emit_binop(loaded, op, rhs, ir_typ)\n self._store_value(value, lhs)\n else: # pragma: no cover\n raise NotImplementedError(str(expr.op))\n return value", "def __or__(self, other: Any) -> Operators:\n return self.operate(or_, other)", "def __and__(self, other: Any) -> Operators:\n return self.operate(and_, other)", "def __or__(self, other):\r\n return self + other - self * other", "def commutator(left_operator, right_operator):\n if not isinstance(left_operator, type(right_operator)):\n raise TypeError('operator_a and operator_b are not of the same type.')\n valueable_type = (QubitOperator, FermionOperator, QubitExcitationOperator)\n if not isinstance(left_operator, valueable_type):\n raise TypeError(\n \"Operator should be QubitOperator, FermionOperator or QubitExcitationOperator.\"\n )\n\n result = left_operator * right_operator\n result -= right_operator * left_operator\n return result", "def distribute_and_over_or(s):\n if s.op == '|':\n s = associate('|', s.args)\n if s.op != '|':\n return distribute_and_over_or(s)\n if len(s.args) == 0:\n return FALSE\n if len(s.args) == 1:\n return distribute_and_over_or(s.args[0])\n conj = find_if((lambda d: d.op == '&'), s.args)\n if not conj:\n return s\n others = [a for a in s.args if a is not conj]\n rest = associate('|', others)\n return associate('&', [distribute_and_over_or(c|rest)\n for c in conj.args])\n elif s.op == '&':\n return associate('&', map(distribute_and_over_or, s.args))\n else:\n return s", "def applyOperator(self, operand1, operand2, operator):\n\n if operator == \"*\":\n return operand1 * operand2\n elif operator == \"/\":\n return operand1 / operand2\n elif operator == \"+\":\n return operand1 + operand2\n else:\n return operand1 - operand2", "def op(self) -> Literal[\"==\"] | Literal[\"<=\"] | Literal[\">=\"]:\n ...", "def tuple_operation(a: list, b: list, op: str) -> list:\n o = []\n for i in range(0, 3):\n if op == \"xor\":\n o.append(a[i] ^ b[i])\n elif op == \"and\":\n o.append(a[i] & b[i])\n elif op == \"or\":\n o.append(a[i] | b[i])\n else:\n raise RuntimeError('Unknown operation')\n return o[0], o[1], o[2]", "def binary_operator(cls, quad):\n\t\tleft_op = cls.get_address_value(quad.left_operand)\n\t\tright_op = cls.get_address_value(quad.right_operand)\n\t\tresult = cls.execute_binary_operator(quad.operator, left_op, right_op)\n\t\tcls.set_address_value(quad.result, result)", "def _binaryop(self, other, op: str):\n raise NotImplementedError", "def __mul__(self, other):\n return And(self, other)", "def logical_op(self, other):\n if isinstance(other, plist):\n if len(self) == len(other):\n try:\n return plist([op(x, o) for x, o in zip(self, other)])\n except Exception:\n pass\n self_flat = self.ungroup(-1)\n other_flat = other.ungroup(-1)\n ids = op(set([id(x) for x in self_flat]),\n set([id(x) for x in other_flat]))\n if op is operator.__and__ or op is operator.__iand__:\n return plist([x for x in self_flat if id(x) in ids]) # Don't pass root -- we are uprooting\n else:\n return plist(\n [ids.remove(id(x)) or x for x in self_flat if id(x) in ids] +\n [ids.remove(id(x)) or x for x in other_flat if id(x) in ids]\n ) # Don't pass root -- we are uprooting\n else:\n return plist([op(x, other) for x in self], root=self.__root__)", "def reduce(self, binary_operator):\n return functools.reduce(binary_operator, self)", "def __or__(self, other):\n return self.__add__(other)", "def _conjunction_op(spec, *expressions):", "def _conjunction_op(spec, *expressions):", "def _apply_pairwise_op(op, tensor):\n _check_tensor_shapes([tensor])\n return op(tf.expand_dims(tensor, 2), tf.expand_dims(tensor, 1))", "def operator(self):\n col = self.pos\n operators = [\"||\", \"&&\", \">>\", \"<<\", \"!=\", \">=\", \"<=\", \"==\", \"##\"] + \\\n [\"-\", \"+\", \"!\", \"*\", \"/\", \"|\", \"&\", \"^\", \"<\", \">\", \"?\", \":\", \"~\", \"#\", \"=\", \"%\"]\n try:\n index = self.match_any(operators)\n\n op = Operator(self.line, col, self.prev_white, operators[index])\n return op\n except TokenError:\n self.pos = col\n raise TokenError(\"Invalid operator.\")", "def test_sqpp_oddly_capped_operators(self):\n self.assertEqual(self.parser.parse_query('foo oR bar'),\n ['+', 'foo', '|', 'bar'])", "def _append_operator(self, operator):", "def binary_op(self, other):\n if (other is pstar\n or other is defaultpdict\n or other is frozenpset\n or other is pdict\n or other is plist\n or other is pset\n or other is ptuple\n ):\n if sys.version_info[0] < 3:\n name = op.__name__.replace('__', '__r', 1)\n else:\n name = '__r%s__' % op.__name__\n return getattr(other.__class__, name)(other, self)\n if isinstance(other, plist):\n if len(self) == len(other):\n return plist([op(x, o) for x, o in zip(self, other)], root=self.__root__)\n return plist([op(x, other) for x in self], root=self.__root__)", "def visit_and(self, left_result: T, right_result: T) -> T:", "def plus(self, other):\n return self | other", "def operands(app):\n return cdr(app)", "def __or__(self, other):\n return MyCustomNumber(self.value | other.value)", "def __or__(self, other):\n return self.fam.c_binop('or', self, other)", "def __mul__(self,other):\n return compositeORGenerator(left = self, right = other)", "def binary_operator(op):\n # When combining a Factor with a NumericalExpression, we use this\n # attrgetter instance to defer to the commuted implementation of the\n # NumericalExpression operator.\n commuted_method_getter = attrgetter(method_name_for_op(op, commute=True))\n\n def binary_operator(self, other):\n # This can't be hoisted up a scope because the types returned by\n # binop_return_type aren't defined when the top-level function is\n # invoked in the class body of Factor.\n return_type = binop_return_type(op)\n if isinstance(self, NumExprFactor):\n self_expr, other_expr, new_inputs = self.build_binary_op(\n op, other,\n )\n return return_type(\n \"({left}) {op} ({right})\".format(\n left=self_expr,\n op=op,\n right=other_expr,\n ),\n new_inputs,\n )\n elif isinstance(other, NumExprFactor):\n # NumericalExpression overrides ops to correctly handle merging of\n # inputs. Look up and call the appropriate reflected operator with\n # ourself as the input.\n return commuted_method_getter(other)(self)\n elif isinstance(other, Factor):\n if self is other:\n return return_type(\n \"x_0 {op} x_0\".format(op=op),\n (self,),\n )\n return return_type(\n \"x_0 {op} x_1\".format(op=op),\n (self, other),\n )\n elif isinstance(other, Number):\n return return_type(\n \"x_0 {op} ({constant})\".format(op=op, constant=other),\n binds=(self,),\n )\n raise BadBinaryOperator(op, self, other)\n\n binary_operator.__doc__ = \"Binary Operator: '%s'\" % op\n return binary_operator", "def perform_operation(operator, num_1, num_2):\n\n if operator == \"*\":\n return num_1 * num_2\n if operator == \"+\":\n return num_1 + num_2\n if operator == \"-\":\n return num_1 - num_2\n if operator == \"/\":\n return num_1 / num_2", "def operator(self):\n return self.__operator", "def __ror__(self, other):\n return self._operation_or(other)", "def _apply_binary_op_elementwise(\n self: ConcreteStructuredMetricValue, other: ConcreteStructuredMetricValue,\n op: Callable[[float, float], float]) -> ConcreteStructuredMetricValue:\n ...", "def OR(self, operand2, *operands):\n\t\treturn OR((self, operand2) + operands)", "def operate(\n self, op: OperatorType, *other: Any, **kwargs: Any\n ) -> Operators:\n raise NotImplementedError(str(op))", "def __and__(self, other):\n return self.__mul__(other)", "def __and__(self, other):\n return self >> (lambda _: other)", "def operation(self, other=None, operator=None):\n terms = [self]\n if other is not None and operator is not EmptyQuery:\n terms.append(other)\n return Operation(terms, operator=operator)", "def operator_lhs(self, inp):\n assert self.operator is not None, \\\n \"Please set an operator with the set_operation method\"\n\n return self.operator_rhs(self.operator.forward(inp))", "def __radd__(self, other: Any) -> ColumnOperators:\n return self.reverse_operate(add, other)", "def __and__(self, other):\r\n return self * other", "def __rtruediv__(self, other: Any) -> ColumnOperators:\n return self.reverse_operate(truediv, other)", "def AND(self, operand2, *operands):\n\t\treturn AND((self, operand2) + operands)", "def _op(\n x: Union[bool, dts.Boolean, tps.BooleanValue],\n y: Union[bool, dts.Boolean, tps.BooleanValue],\n ) -> T:", "def visit_or(self, left_result: T, right_result: T) -> T:", "def __le__(self, other: Any) -> ColumnOperators:\n return self.operate(le, other)", "def binary_ops(self, ctx: Context) -> Iterator[AnnotatedExpression]:\n for type, expr_group in ctx.groupby_type():\n if type in (bool, Callable):\n continue\n # TODO: Allow tuple comparisons?\n if TypeAnnotation(type).iterable:\n continue\n\n for commutative_operator in self.commutative_operators:\n for left, right in combinations(expr_group, 2):\n yield AnnotatedExpression(\n ast.BinOp(\n left=left.expr, op=commutative_operator(), right=right.expr\n ),\n TypeAnnotation(type),\n )\n for dependent_operator in self.non_commutative_operators:\n for left, right in permutations(expr_group, 2):\n yield AnnotatedExpression(\n ast.BinOp(\n left=left.expr, op=dependent_operator(), right=right.expr\n ),\n TypeAnnotation(type),\n )", "def __or__(self, other):\n return self._operation_or(other)", "def operator(self):\n return self.data.get('operator', 'and')", "def operator(self):\n return self.data.get('operator', 'and')", "def my_operator(self):\n return self._my_operator", "def _process_operator(self, expr, operator, func, *args, **kwargs):\n for elt in self.model.xml_element_children(expr):\n self._process_operator(elt, operator, func, *args, **kwargs)\n if isinstance(expr, mathml_apply) and expr.operator().localName == operator:\n func(expr, *args, **kwargs)", "def __rmul__(self, other: Any) -> ColumnOperators:\n return self.reverse_operate(mul, other)", "def _build_logical_op(op):\n def logical_op(self, other):\n \"\"\"`plist` logical operation. **Logical operations perform set operations on `plist`s.**\n\n **IMPORTANT:** `plist` logical operations between two `plist`s perform `set` operations\n on the two `plist`s. Logical operations between a `plist` and any other type attempts\n to perform that operation on the values in the `plist` and `other` itself.\n\n `logical_op` is not callable directly from `plist`. It implements the various\n python logical operations: `&`, `|`, `^`, etc. The logical operators\n can be called directly with their corresponding 'magic' functions,\n `plist.__and__`, `plist.__or__`, `plist.__xor__`, etc., but are generally just\n called implicitly.\n\n Examples:\n ```python\n foos = plist([pdict(foo=0, bar=0), pdict(foo=1, bar=1), pdict(foo=2, bar=0)])\n (foos.bar == 0).baz = 3 + (foos.bar == 0).foo\n (foos.bar == 1).baz = 6\n\n assert (((foos.bar == 0) & (foos.baz == 3)).aslist() ==\n [{'baz': 3, 'foo': 0, 'bar': 0}])\n\n assert (((foos.bar == 0) | (foos.baz == 3)).aslist() ==\n [{'bar': 0, 'baz': 3, 'foo': 0}, {'bar': 0, 'baz': 5, 'foo': 2}])\n\n assert (((foos.bar == 0) ^ (foos.baz == 3)).aslist() ==\n [{'bar': 0, 'baz': 5, 'foo': 2}])\n\n by_bar = foos.bar.groupby()\n\n assert (((by_bar.bar == 0) & (by_bar.bar == 1)).aslist() ==\n [[], []])\n assert (((by_bar.bar == 0) & (by_bar.bar <= 1)).aslist() ==\n [[{'bar': 0, 'baz': 3, 'foo': 0}, {'bar': 0, 'baz': 5, 'foo': 2}], []])\n\n assert (((by_bar.baz == 3) | (by_bar.baz == 6)).aslist() ==\n [[{'bar': 0, 'baz': 3, 'foo': 0}], [{'bar': 1, 'baz': 6, 'foo': 1}]])\n assert (((by_bar.baz == 6) | (by_bar.baz <= 4)).aslist() ==\n [[{'bar': 0, 'baz': 3, 'foo': 0}], [{'bar': 1, 'baz': 6, 'foo': 1}]])\n\n assert (((by_bar.baz == 3) ^ (by_bar.baz == 6)).aslist() ==\n [[{'bar': 0, 'baz': 3, 'foo': 0}], [{'bar': 1, 'baz': 6, 'foo': 1}]])\n assert (((by_bar.baz == 6) ^ (by_bar.bar <= 4)).aslist() ==\n [[{'bar': 0, 'baz': 3, 'foo': 0}, {'bar': 0, 'baz': 5, 'foo': 2}], []])\n ```\n\n Logical operations can be applied element-wise if `other` is not a `plist`:\n ```python\n assert ((foos.baz & 1).aslist() ==\n [1, 0, 1])\n assert ((by_bar.baz | 1).aslist() ==\n [[3, 5], [7]])\n assert ((1 ^ by_bar.baz).aslist() ==\n [[2, 4], [7]])\n ```\n\n Args:\n other: Object to perform the logical operation with.\n\n Returns:\n New `plist`, merging `self` and `other` according to the operation provided\n to `_build_logical_op`.\n \"\"\"\n if isinstance(other, plist):\n if len(self) == len(other):\n try:\n return plist([op(x, o) for x, o in zip(self, other)])\n except Exception:\n pass\n self_flat = self.ungroup(-1)\n other_flat = other.ungroup(-1)\n ids = op(set([id(x) for x in self_flat]),\n set([id(x) for x in other_flat]))\n if op is operator.__and__ or op is operator.__iand__:\n return plist([x for x in self_flat if id(x) in ids]) # Don't pass root -- we are uprooting\n else:\n return plist(\n [ids.remove(id(x)) or x for x in self_flat if id(x) in ids] +\n [ids.remove(id(x)) or x for x in other_flat if id(x) in ids]\n ) # Don't pass root -- we are uprooting\n else:\n return plist([op(x, other) for x in self], root=self.__root__)\n\n return logical_op", "def calc(operand_1, operand_2):\n return operand_1 + operand_2", "def calc(operand_1, operand_2):\n return operand_1 + operand_2", "def or_(a, b):", "def eliminate_implications(s):\n if not s.args or is_symbol(s.op): return s ## (Atoms are unchanged.)\n args = map(eliminate_implications, s.args)\n a, b = args[0], args[-1]\n if s.op == '>>':\n return (b | ~a)\n elif s.op == '<<':\n return (a | ~b)\n elif s.op == '<=>':\n return (a | ~b) & (b | ~a)\n elif s.op == '^':\n assert len(args) == 2 ## TODO: relax this restriction\n return (a & ~b) | (~a & b)\n else:\n assert s.op in ('&', '|', '~')\n return Expr(s.op, *args)", "def commute_operands(self, node):\n def is_assumption(n):\n \"\"\"Return whether a node is an assumption.\"\"\"\n if not isinstance(n, types.Symbol):\n return False\n symbol = self.symbol_table.lookup(n.name)\n if symbol and symbol.type_ == SymbolType.StackItem:\n return True\n return False\n\n def has_assumption(n):\n \"\"\"Return whether a BinOpCode contains an assumption.\"\"\"\n if not isinstance(n, types.BinOpCode):\n return False\n return any(is_assumption(i) for i in [n.left, n.right])\n\n def should_commute(n):\n return is_assumption(n) or has_assumption(n)\n\n # Commute operands of different operations.\n # e.g. 2 + assumption + 3 --> 2 + 3 + assumption\n if self.is_commutative(node) and has_assumption(node.left) and node.left.name == node.name:\n # Move the assumption so we can be sure it's in the attribute 'right'.\n if is_assumption(node.left.left):\n node.left.left, node.left.right = node.left.right, node.left.left\n\n self.debug('Commuting operations for %s and %s' % (format_structural_op(node.left), format_structural_op(node.right)), node.lineno)\n right = node.right\n node.right = node.left.right\n node.left.right = right\n\n if should_commute(node.left) or not should_commute(node.right):\n return\n\n if self.is_commutative(node):\n self.debug('Commuting operands for %s' % format_structural_op(node), node.lineno)\n node.left, node.right = node.right, node.left\n elif self.has_logical_equivalent(node):\n logmsg = 'Replacing %s with logical equivalent ' % format_structural_op(node)\n node.name = logical_equivalents[node.name]\n node.left, node.right = node.right, node.left\n logmsg += format_structural_op(node)\n self.debug(logmsg, node.lineno)", "def __or__(self,other):\n #TODO: Operators should raise an exception if the combination attempted does not make sense.\n return compositeConditionalGenerator(left=self, right=other)", "def _disjunction_op(spec, *expressions):", "def _operators_conductor(operator_name, _bool=None):\n func = getattr(Series, operator_name)\n if _bool is None:\n # return bool series.\n _pre, _post = bool, bool\n else:\n # return ints.\n _pre, _post = int, int\n\n @wraps(func)\n def operator_method(self, other=None):\n if other is None:\n # for unary such as pos, neg, invert\n def not_(df: dF):\n return func(df.pipe(self.copy().pop())).apply(_post)\n\n return not_\n\n # if not isinstance(other, Condition):\n # raise TypeError(\"only conditions can add, got %r\" % type(other))\n\n def comb(df: dF) -> Series:\n return func(df.pipe(self).apply(_pre), df.pipe(other).apply(_pre)).apply(_post)\n\n return comb\n\n return operator_method", "def operator_rhs(self, inp):\n assert self.operator is not None, \\\n \"Please set an operator with the set_operation method\"\n\n return self.operator.adjoint(inp)", "def __mul__(self, other: Any) -> ColumnOperators:\n return self.operate(mul, other)", "def __and__(self, other):\n\n if isinstance(other, (int, type(Zero()))):\n if other == 0:\n return 0\n ol = 0\n if isinstance(other, Dyadic):\n for i, v in enumerate(self.args):\n for i2, v2 in enumerate(other.args):\n ol += v[0] * v2[0] * (v[2] & v2[1]) * (v[1] | v2[2])\n elif isinstance(other, Vector):\n for i, v in enumerate(self.args):\n ol += v[0] * v[1] * (v[2] & other)\n else:\n raise TypeError('Need to supply a Vector or Dyadic')\n return ol", "def op(self):\n return self.__op", "def op(self):\n return self.__op", "def test_or(\n self,\n left: Result[int, str],\n right: Result[int, str],\n exp: Result[int, str],\n ) -> None:\n assert left.or_(right) == exp", "def __and__(self, other):\n return self.fam.c_binop('and', self, other)", "def special_math_func(state, other, operator):\n if not hasattr(other, '__iter__'):\n # other is just a number\n results = [getattr(state[each], operator)(other)\n for each in state.keys()]\n else:\n try:\n # Both are dictionaries\n results = [getattr(state[each], operator)(other[each])\n for each in state]\n except IndexError:\n # Both are iterables, but other is not a dictionary\n results = [getattr(state[i], operator)(j)\n for i, j in zip(state, other)]\n out = State(zip(state.keys(), results))\n return out", "def get_operands(self, context, cls=None):\n arg1 = self.get_argument(context, cls=cls)\n if arg1 is None:\n return None, None\n\n arg2 = self.get_argument(context, index=1, cls=cls)\n if arg2 is None:\n return None, None\n\n if isinstance(arg1, Decimal) and isinstance(arg2, float):\n return arg1, Decimal(arg2)\n elif isinstance(arg2, Decimal) and isinstance(arg1, float):\n return Decimal(arg1), arg2\n\n return arg1, arg2", "def get_operator(self):\n if len(self) == 1:\n return self[0].get_operator()\n op = np.array(self._get_array_of_operators())\n return np.sum(op, axis=0)", "def __mod__(self, other: Any) -> ColumnOperators:\n return self.operate(mod, other)", "def AND(r, s):\n return lambda l, i: r(l, i) and s(l, i)", "def convert_elemwise(self, op):\n try:\n from tflite.Operator import Operator\n from tflite.AddOptions import AddOptions\n from tflite.SubOptions import SubOptions\n from tflite.MulOptions import MulOptions\n from tflite.DivOptions import DivOptions\n from tflite.BuiltinOptions import BuiltinOptions\n from tflite.ActivationFunctionType import ActivationFunctionType\n except ImportError:\n raise ImportError(\"The tflite package must be installed\")\n\n assert isinstance(op, Operator)\n input_tensors = self.get_input_tensors(op)\n assert len(input_tensors) == 2, \"input tensors length should be 2\"\n\n def get_input_nodes(tensor):\n if tensor.tensor_idx in self.tensor_tab:\n # In most cases, we can assume that TOCO fuses elemwise operators\n # with constants - it means both will be tensors.\n return self.tensor_tab[tensor.tensor_idx]\n else:\n # However, in some corner cases, the elemwise operator is not fused,\n # we can receive as constant.\n t_value = self.get_tensor_value(tensor)\n return self.nn_new_const(tensor, t_value)\n\n lhs_nodes = get_input_nodes(input_tensors[0])\n rhs_nodes = get_input_nodes(input_tensors[1])\n\n assert len(lhs_nodes) in [1, 3], \"Nodes list size should be 1 or 3\"\n assert len(lhs_nodes) == len(rhs_nodes), \"Left and right nodes list size should be equal\"\n\n output_tensors = self.get_output_tensors(op)\n assert len(output_tensors) == 1, \"output tensors length should be 1\"\n output_tensor = output_tensors[0]\n output_tensor_idx = output_tensor.tensor_idx\n output_tensor_shape = output_tensor.tensor.ShapeAsNumpy()\n\n # Options (fused_activation_function)\n options = None\n if op.BuiltinOptionsType() == BuiltinOptions.AddOptions:\n op_type = \"Add\"\n options = AddOptions()\n elif op.BuiltinOptionsType() == BuiltinOptions.SubOptions:\n op_type = \"Sub\"\n options = SubOptions()\n elif op.BuiltinOptionsType() == BuiltinOptions.MulOptions:\n op_type = \"Mul\"\n options = MulOptions()\n elif op.BuiltinOptionsType() == BuiltinOptions.DivOptions:\n op_type = \"Div\"\n options = DivOptions()\n\n if options is not None:\n op_options = op.BuiltinOptions()\n options.Init(op_options.Bytes, op_options.Pos)\n fused_activation_fn = options.FusedActivationFunction()\n # if we have activation fn\n assert fused_activation_fn == ActivationFunctionType.NONE, \\\n 'Elemwise operators with fused activation are not supported yet.'\n\n out_nodes = self.nn_elemwise(lhs_nodes, rhs_nodes, op_type, output_tensor_shape)\n\n self.tensor_tab[output_tensor_idx] = out_nodes\n return out_nodes", "def do_oprn(self, *args, operator=None, **kwargs):\n\t\tself.operator = operator\n\n\t\tif not self.operator:\n\t\t\treturn f'No operator provided'\n\n\t\tif self.operator == '+':\n\t\t\treturn self.sum(*args, **kwargs)\n\t\telif self.operator == '-':\n\t\t\treturn self.subtract(*args, **kwargs)\n\t\telif self.operator == '*':\n\t\t\treturn self.multiple(*args, **kwargs)\n\t\telif self.operator == '/':\n\t\t\treturn self.division(*args, **kwargs)\n\t\telse:\n\t\t\treturn f'Currently Operator ({operator}) is not Applicable'", "def __add__(self, other):\n return Or(self, other)", "def _op(\n x: Union[int, float, dts.Number, tps.NumericValue],\n y: Union[int, float, dts.Number, tps.NumericValue],\n ) -> T:", "def get_new_quantifier_and_axioms(binary_op, old_quantifier, is_left):\n new_quantifier = old_quantifier\n if binary_op == '&':\n axioms = [2, 3] if is_left else [4, 5]\n\n elif binary_op == '|':\n axioms = [6, 7] if is_left else [8, 9]\n\n # ->\n else:\n if is_left:\n new_quantifier = 'A' if old_quantifier == 'E' else 'E'\n axioms = [10, 11]\n else:\n axioms = [12, 13]\n\n axioms = tuple(map(lambda axiom_num: ADDITIONAL_QUANTIFICATION_AXIOMS[axiom_num], axioms))\n return new_quantifier, axioms[0] if old_quantifier == 'A' else axioms[1]", "def test_etuple_term():\n # Make sure that we don't lose underlying `eval_obj`s\n # when taking apart and re-creating expression tuples\n # using `kanren`'s `operator`, `arguments` and `term`\n # functions.\n e1 = etuple(add, (object(),), (object(),))\n e1_obj = e1.eval_obj\n\n e1_dup = (operator(e1),) + arguments(e1)\n\n assert isinstance(e1_dup, ExpressionTuple)\n assert e1_dup.eval_obj == e1_obj\n\n e1_dup_2 = term(operator(e1), arguments(e1))\n assert e1_dup_2 == e1_obj", "def dot(self, other):\n if isinstance(other, Operator): # A * B\n return _prodOperator(self, other)\n elif type(other) in [int, float]: # A * c or c * A\n return _scaledOperator(self, other)\n elif isinstance(other, list) and isinstance(self, Vstack):\n assert len(other) == self.n, \"Other lenght and self lenght mismatch\"\n return Vstack([_scaledOperator(self.ops[i], other[i]) for i in range(self.n)])\n elif isinstance(other, list) and isinstance(self, Hstack):\n assert len(other) == self.n, \"Other lenght and self lenght mismatch\"\n return Hstack([_scaledOperator(self.ops[i], other[i]) for i in range(self.n)])\n elif isinstance(other, Vector) or isinstance(other, superVector): # A * x\n temp = self.range.clone()\n self.forward(False, other, temp)\n return temp\n else:\n raise TypeError('Expected Operator, (super)Vector or scalar, got %r' % other)", "def bitwise_and(self, other: Any) -> ColumnOperators:\n\n return self.operate(bitwise_and_op, other)", "def bitwise_or(self, other: Any) -> ColumnOperators:\n\n return self.operate(bitwise_or_op, other)", "def binop(x, y, op):\n\n if (x is None) and (y is None):\n return None\n\n x = x if (x is not None) else 0\n y = y if (y is not None) else 0\n return op(x,y)", "def __or__(self, other):\n\t\tif isinstance(other, int):\n\t\t\treturn self.value | other\n\t\telif type(self) is type(other):\n\t\t\treturn self.value | other.value", "def calc(operand_1, operand_2):\n\n return operand_1 + operand_2", "def _op_or_(self, left: Any, right: Any) -> Any:\n if isinstance(left, list):\n return Collection(left, right)\n\n left, right = _recycle_left_right(left, right)\n left = Series(left).fillna(False)\n right = Series(right).fillna(False)\n return left | right", "def test_operator_adapt(self):\n\n # test string concatenation\n expr = test_table.c.data + \"somedata\"\n assert testing.db.execute(select([expr])).scalar() == \"somedatasomedata\"\n\n expr = test_table.c.id + 15\n assert testing.db.execute(select([expr])).scalar() == 16\n\n # test custom operator conversion\n expr = test_table.c.avalue + 40\n assert expr.type.__class__ is test_table.c.avalue.type.__class__\n\n # value here is calculated as (250 - 40) / 10 = 21\n # because \"40\" is an integer, not an \"avalue\"\n assert testing.db.execute(select([expr.label('foo')])).scalar() == 21\n\n expr = test_table.c.avalue + literal(40, type_=MyCustomType)\n \n # + operator converted to -\n # value is calculated as: (250 - (40 * 10)) / 10 == -15\n assert testing.db.execute(select([expr.label('foo')])).scalar() == -15\n\n # this one relies upon anonymous labeling to assemble result\n # processing rules on the column.\n assert testing.db.execute(select([expr])).scalar() == -15", "def visit_BinaryOperator(self, node: BinaryOperator) -> Instruction:\n\n left = self.visit(node.left)\n right = self.visit(node.right)\n\n if isinstance(left, VarSymbol):\n left_symbol = self.GLOBAL_MEMORY[left.name]\n else:\n left_symbol = left\n\n if isinstance(right, VarSymbol):\n right_symbol = self.GLOBAL_MEMORY[right.name]\n else:\n right_symbol = right\n\n if node.operator.type == TokenType.PLUS:\n return self.builder.fadd(left_symbol, right_symbol, \"addtmp\")\n elif node.operator.type == TokenType.MINUS:\n return self.builder.fsub(left_symbol, right_symbol, \"subtmp\")\n elif node.operator.type == TokenType.MUL:\n return self.builder.fmul(left_symbol, right_symbol, \"multmp\")\n elif node.operator.type == TokenType.INTEGER_DIV:\n return self.builder.fdiv(left_symbol, right_symbol, \"udivtmp\")\n elif node.operator.type == TokenType.FLOAT_DIV:\n return self.builder.fdiv(left_symbol, right_symbol, \"fdivtmp\")", "def test_and(\n self,\n left: Result[int, str],\n right: Result[int, str],\n exp: Result[int, str],\n ) -> None:\n assert left.and_(right) == exp", "def execute_binary_operator(cls, val, x, y):\n\n\t\tif val == 0:\n\t\t\treturn operator.add(x,y)\n\t\telif val == 1:\n\t\t\treturn operator.sub(x,y)\n\t\telif val == 2:\n\t\t\treturn operator.mul(x,y)\n\t\telif val == 3:\n\t\t\treturn operator.div(x,y)\n\t\telif val == 4:\n\t\t\treturn operator.lt(x,y)\n\t\telif val == 5:\n\t\t\treturn operator.gt(x,y)\n\t\telif val == 6:\n\t\t\treturn operator.le(x,y)\n\t\telif val == 7:\n\t\t\treturn operator.ge(x,y)\n\t\telif val == 8:\n\t\t\treturn operator.eq(x,y)\n\t\telif val == 9:\n\t\t\treturn operator.ne(x,y)\n\t\telif val == 12:\n\t\t\treturn operator.mod(x,y)", "def fun(op, v1, v2):\n if op == '+':\n return v1+v2\n elif op == '-':\n return v1-v2\n elif op == '*':\n return v1*v2\n elif op == '/':\n return v1", "def __add__(self, other: Any) -> ColumnOperators:\n return self.operate(add, other)", "def calculate(A, B, operator): # HELPER\n base_dict = {\n '+' : float(A) + float(B),\n '-' : float(A) - float(B),\n '*' : float(A) * float(B),\n '/' : float(A) / float(B), \n '**': float(A) **float(B), \n '<<': float(A) * (2**float(B)), # left shift\n '>>': float(A) / (2**float(B)) # right shift\n }\n return base_dict[operator]", "def __ror__(self, other):\n return whitespaces.CURRENT.normalize(other) | self", "def _lex_operators(self):\n try:\n val = self._current\n type = Lexer._OPERATORS[self._current]\n self._advance()\n return Token(val, type)\n except KeyError:\n raise ParserError(self._expr,\n \"Encountered invalid token '{t}' at {i}\".format(\n t=self._current, i=self._index))", "def __or__(self, y):\n return self.__and__(y) ^ self ^ y", "def _to_ops(from_op):\n\n for to_op in OPERATORS:\n if to_op and isinstance(from_op, ast.Not):\n # 'not' can only be removed but not replaced with\n # '+', '-' or '~' b/c that may lead to strange results\n pass\n elif isinstance(from_op, ast.UAdd) and (to_op is None):\n # '+1' => '1' yields equivalent mutations\n pass\n else:\n yield to_op", "def _less_than_or_equal_to_op(spec):" ]
[ "0.70254517", "0.6778513", "0.6722453", "0.67095107", "0.6646131", "0.6616339", "0.6596597", "0.6542849", "0.6527473", "0.6483588", "0.64554465", "0.63938665", "0.6341946", "0.62985724", "0.62963104", "0.6284674", "0.6228084", "0.6228084", "0.6227012", "0.6224514", "0.6223301", "0.6221573", "0.6185613", "0.6155851", "0.615082", "0.6133309", "0.61299735", "0.61252403", "0.6115203", "0.6114843", "0.6110937", "0.6105918", "0.6079817", "0.6079", "0.60789037", "0.6069797", "0.6062713", "0.60597926", "0.6055471", "0.60482705", "0.60481167", "0.60296386", "0.60252255", "0.60063815", "0.60027176", "0.599779", "0.5993548", "0.5975402", "0.59744686", "0.59411365", "0.59411365", "0.59379727", "0.59321713", "0.5920946", "0.5912383", "0.58780026", "0.58780026", "0.5871796", "0.58597845", "0.5848456", "0.5848003", "0.5845567", "0.5835404", "0.58327246", "0.5832113", "0.5832067", "0.5831043", "0.5831043", "0.58275884", "0.58268034", "0.5817154", "0.5816633", "0.5812413", "0.581101", "0.57975864", "0.57937884", "0.5792986", "0.5790013", "0.5785293", "0.57845515", "0.5783769", "0.57773477", "0.57640636", "0.5757729", "0.575625", "0.57531375", "0.57468987", "0.57433677", "0.574051", "0.5737315", "0.57363737", "0.57298195", "0.57252264", "0.5722501", "0.5719172", "0.5711918", "0.5710096", "0.57008576", "0.5699562", "0.5697335" ]
0.6676819
4
Mimic the & operator in R. This has to have Expression objects to be involved to work
def _op_and_(self, left: Any, right: Any) -> Any: if isinstance(left, list): # induce an intersect with Collection return Intersect(left, right) left, right = _recycle_left_right(left, right) left = Series(left).fillna(False) right = Series(right).fillna(False) return left & right
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def AND(f, g):\n def _and(x):\n return f(x) & g(x)\n return _and", "def and_(a, b):", "def __and__(self, other):\n return self.fam.c_binop('and', self, other)", "def __and__(self, obj):\n return self._boolean_operation(obj, operator.__and__)", "def _and(cls, arg1, arg2):\n return arg1 and arg2", "def AND(r, s):\n return lambda l, i: r(l, i) and s(l, i)", "def __and__(self, other):\n return self.__class__(self.value + '&' + str(other))", "def __and__(self, other):\n return self._operation_and(other)", "def AND(self, value):\n self.reg.A = self.reg.A & value\n self.reg.Z = self.reg.A == 0\n self.reg.N = self.reg.A >> 7", "def __and__(self, other):\n return MyCustomNumber(self.value & other.value)", "def visit_and(self, left_result: T, right_result: T) -> T:", "def __and__(self, other: Any) -> Operators:\n return self.operate(and_, other)", "def bitwise_and(self):\n register = self.return_middle_registers(self.opcode)\n self.registers[register[0]] = (\n self.registers[register[0]] & self.registers[register[1]])\n logger.info(\"Bitwise AND on V{} and V{} for {}\".format(\n register[0],\n register[1],\n self.registers[register[0]]))", "def and_(*args, **kwargs):\n ...", "def logical_and(x1, x2, f=None):\n return _cur_framework(x1, f=f).logical_and(x1, x2)", "def test_and(\n self,\n left: Result[int, str],\n right: Result[int, str],\n exp: Result[int, str],\n ) -> None:\n assert left.and_(right) == exp", "def and_bexp(env, node):\n left_value = node.left.interpret(env)\n right_value = node.right.interpret(env)\n return 1 if left_value and right_value else 0", "def __and__(self, other):\n return self.and_(other)", "def bitwise_and(lhs, rhs):\n return _make.bitwise_and(lhs, rhs)", "def logical_and(lhs, rhs):\n return _make.logical_and(lhs, rhs)", "def f_and(*args):\n f = And(*args).factor()\n return f if f in B else f.factor()", "def __and__(self, other):\n a, b = Trits.match_length(self, other)\n return Trits([x & y for x, y in zip(a, b)])", "def __and__(self, other):\n\t\tif isinstance(other, int):\n\t\t\treturn self.value & other\n\t\telif type(self) is type(other):\n\t\t\treturn self.value & other.value", "def instruction_and(self, register, a, b):\n if Vm.is_register(a):\n a = self.get_register(a)\n\n if Vm.is_register(b):\n b = self.get_register(b)\n\n self.set_register(register, (a & b) % MAX_INT)", "def __and__(self, other):\n return self >> (lambda _: other)", "def _operation_and(self, other):\n self._check_items(other)\n return ReadingSet(self._set & self._get_other_set(other))", "def _andReg(address, mask):\n _setReg(address, _getReg(address)&mask)", "def __iand__(self, other: t.Any) -> te.Self:\n return self._op_inplace('__iand__', other)", "def convert_broadcast_logical_and(node, **kwargs):\n return create_basic_op_node('And', node, kwargs)", "def and_or_operator(cls, quad):\n\t\tleft_op = cls.get_address_value(quad.left_operand)\n\t\tright_op = cls.get_address_value(quad.right_operand)\n\t\t# TODO: The next set of lines will fail at a specific case\n\t\tif quad.operator == 10 :\n\t\t\tcls.set_address_value(quad.result, (left_op and right_op))\n\t\telif quad.operator == 11 :\n\t\t\tcls.set_address_value(quad.result, (left_op or right_op))", "def Nand(*args):\n return Not(And(*args))", "def _and(it):\n return 1 if it[0]==1 and it[1]==1 else 0", "def AND(*expressions):\n return {'$and': list(expressions)}", "def _daat_and(self):\n raise NotImplementedError", "def AND(self, operand2, *operands):\n\t\treturn AND((self, operand2) + operands)", "def and_filter(self):\n return self.__and", "def __and__(self, other):\n if other is None:\n return self.copy()\n elif isinstance(other, (Query, QueryCompound)):\n return self.and_(other)\n else:\n out = self.copy()\n out.addMath(Query.Math.And, other)\n return out", "def __and__(self, other):\n for k, v in other.items():\n if k in self._values:\n self._values[k] = str(SpecifierSet(self._values[k]) & v)\n else:\n self._values[k] = v\n return self", "def __mul__(self, obj):\n return self & obj", "def get_bprop_logical_and(self):\n\n def bprop(x, y, out, dout):\n return zeros_like(x), zeros_like(y)\n return bprop", "def __and__(self, other: t.Any) -> InspectableSet[_C]:\n return self._op_copy('__and__', other)", "def _(obj: And, visitor: BooleanExpressionVisitor[T]) -> T:\n left_result: T = visit(obj.left, visitor=visitor)\n right_result: T = visit(obj.right, visitor=visitor)\n return visitor.visit_and(left_result=left_result, right_result=right_result)", "def __iand__(self, y):\n if is_tensor(y) or isinstance(y, int):\n self.share &= y\n elif isinstance(y, BinarySharedTensor):\n self.share.set_(beaver.AND(self, y).share.data)\n else:\n raise TypeError(\"Cannot AND %s with %s.\" % (type(y), type(self)))\n return self", "def andExpr( ): #DOUBLE CHECK THIS\n\n\ttok = tokens.peek( )\n\tif debug: print(\"andExpr: \", tok)\n\tleft = relationalExpr( ) #does the left side of the grammar\n\ttok = tokens.peek( )\n\twhile tok == \"and\": #checks to see if there is the token \"and\" and will preform what is inside the curly bracket since it is a series \n\t\ttokens.next()\n\t\tright = relationalExpr( )\n\t\tleft = BinaryExpr(tok, left, right)#MIGHT HAVE TO CHANGE TO STRING \n\t\ttok = tokens.peek( )\n\treturn left", "def _and(self, _and):\n\n self.__and = _and", "def _and(self, _and):\n\n self.__and = _and", "def _and(self, _and):\n\n self.__and = _and", "def _and(self, _and):\n\n self.__and = _and", "def _and(self, _and):\n\n self.__and = _and", "def __and__(self, other):\n if not (isNumeric(other) or isinstance(other, Expression)):\n raise excep.biogemeError(\n f'This is not a valid expression: {other}'\n )\n return And(self, other)", "def bitwise_and(self, other: Any) -> ColumnOperators:\n\n return self.operate(bitwise_and_op, other)", "def __and__(self, other):\n return np.logical_and(self.array, other.array)", "def test_predicate11(self):\n xpb = XPathBuilder()\n xp = xpb.a.b.c[(xpb.attr('d') == 'e') & xpb.foo[xpb.attr('z') == 'ab']]\n exp = '/a/b/c[@d = \"e\" and /foo[@z = \"ab\"]]'\n self.assertEqual(xp.tostring(), exp)", "def __and__(self, other):\n return self.__mul__(other)", "def to_not_and(formula: Formula) -> Formula:\r\n # Task 3.6a\r\n map_operators = {'->': Formula.parse('~(~~p&~q)'),\r\n '+': Formula.parse('~(~(p&~q)&~(~p&q))'),\r\n '<->': Formula.parse('~~(~(p&~q)&~(~p&q))'),\r\n '-&': Formula.parse('~(p&q)'),\r\n '-|': Formula.parse('~~(~p&~q)'),\r\n 'F': Formula.parse('(p&~p)'),\r\n 'T': Formula.parse('~(p&~p)'),\r\n '|': Formula.parse('~(~p&~q)')}\r\n return formula.substitute_operators(map_operators)", "def __iand__(self, other):\n self.truths = self.truths | other.truths\n return self", "def __and__(self, other):\n assert isinstance(other, Filter)\n new_query = \"({}) & ({})\".format(self.query, other.query)\n return Filter(query=new_query)", "def __and__(self, other):\r\n if self.field.characteristic == 2:\r\n return runtime.and_(self, other)\r\n\r\n return super().__and__(other)", "def _logical_and(*args):\n args_ = [_static_value(x) for x in args]\n if any(x is not None and not bool(x) for x in args_):\n return constant_op.constant(False)\n if all(x is not None and bool(x) for x in args_):\n return constant_op.constant(True)\n if len(args) == 2:\n return math_ops.logical_and(*args)\n return math_ops.reduce_all(args)", "def __and__(self, other):\n return BitBoard(self.num & other.num)", "def test_predicate7(self):\n xpb = XPathBuilder()\n xp = xpb.foo.bar[(xpb.attr('name') == 'foo') & (xpb.attr('x') == 'x')]\n exp = '/foo/bar[@name = \"foo\" and @x = \"x\"]'\n self.assertEqual(xp.tostring(), exp)", "def and__inplace(a,b):", "def on_true(self) -> global___Expression:", "def le(cls, __and=True, __key=None, **kwargs):\r\n return _queries(\"<=\", __key, __and, kwargs.items())", "def __invert__(self) -> BooleanExpression:", "def __or__(self, y):\n return self.__and__(y) ^ self ^ y", "def __and__(self, other):\n return self.intersection(other)", "def ge(cls, __and=True, __key=None, **kwargs):\r\n return _queries(\">=\", __key, __and, kwargs.items())", "def __and__(self, query):\r\n return And([self, query]).normalize()", "def g3(a, b): \n return not (a and b)", "def __mul__(self, other):\n return And(self, other)", "def __le__(self, *args):\n return _ida_hexrays.operand_locator_t___le__(self, *args)", "def __ge__(self, *args):\n return _ida_hexrays.operand_locator_t___ge__(self, *args)", "def __and__(self, other):\r\n return self * other", "def And(*conditions):\n def andPred(db):\n from functools import reduce\n return reduce(lambda result, c: c(result),\n conditions, db)\n\n return andPred", "def and_list(conditionList):\n return functools.reduce(numpy.logical_and, conditionList)", "def t_and(self, other):\n if self is TRUE and other is TRUE:\n return TRUE\n if self is FALSE or other is FALSE:\n return FALSE\n return UNKNOWN", "def test_andOperator(self):\n xp = XPathQuery(\"//bar[@attrib4='value4' and @attrib5='value5']\")\n self.assertEqual(xp.matches(self.e), True)\n self.assertEqual(xp.queryForNodes(self.e), [self.bar5])", "def equivalence_of(formula1: Formula, formula2: Formula) -> Formula:\r\n return Formula('&', Formula('->', formula1, formula2),\r\n Formula('->', formula2, formula1))", "def _conjunction_op(spec, *expressions):", "def _conjunction_op(spec, *expressions):", "def _disjunction_op(spec, *expressions):", "def equivalence_of(formula1, formula2):\n return Formula('&', Formula('->', formula1, formula2),\n Formula('->', formula2, formula1))", "def pl_true(exp, model={}):\n op, args = exp.op, exp.args\n if exp == TRUE:\n return True\n elif exp == FALSE:\n return False\n elif is_prop_symbol(op):\n return model.get(exp)\n elif op == '~':\n p = pl_true(args[0], model)\n if p is None: return None\n else: return not p\n elif op == '|':\n result = False\n for arg in args:\n p = pl_true(arg, model)\n if p is True: return True\n if p is None: result = None\n return result\n elif op == '&':\n result = True\n for arg in args:\n p = pl_true(arg, model)\n if p is False: return False\n if p is None: result = None\n return result\n p, q = args\n if op == '>>':\n return pl_true(~p | q, model)\n elif op == '<<':\n return pl_true(p | ~q, model)\n pt = pl_true(p, model)\n if pt is None: return None\n qt = pl_true(q, model)\n if qt is None: return None\n if op == '<=>':\n return pt == qt\n elif op == '^':\n return pt != qt\n else:\n raise ValueError, \"illegal operator in logic expression\" + str(exp)", "def test_evaluate_and_expression(self):\n value = self.evaluate_common(\"false and false\")\n self.assertTrue(\n value.type_code == edm.SimpleType.Boolean, \"Expected Boolean\")\n self.assertTrue(value.value is False, \"Expected False\")\n try:\n value = self.evaluate_common(\"false and 0\")\n self.fail(\"Integer promotion to Boolean\")\n except odata.EvaluationError:\n pass\n value = self.evaluate_common(\"false and true\")\n self.assertTrue(value.value is False, \"Expected False\")\n value = self.evaluate_common(\"true and false\")\n self.assertTrue(value.value is False, \"Expected False\")\n value = self.evaluate_common(\"true and true\")\n self.assertTrue(value.value is True, \"Expected True\")\n value = self.evaluate_common(\"true and null\")\n self.assertTrue(\n value.type_code == edm.SimpleType.Boolean, \"Expected Boolean\")\n self.assertTrue(value.value is False, \"Expected False\")\n value = self.evaluate_common(\"false and null\")\n self.assertTrue(value.value is False, \"Expected False\")\n value = self.evaluate_common(\"false and false\")\n self.assertTrue(value.value is False, \"Expected False\")", "def __rand__(self, other):\n return self._operation_and(other)", "def test_bin_op_support():\n check_peval_expression(\"1 + 2\", {}, \"3\", fully_evaluated=True, expected_value=3)\n check_peval_expression(\"2 - 1\", {}, \"1\", fully_evaluated=True, expected_value=1)\n check_peval_expression(\"2 * 3\", {}, \"6\", fully_evaluated=True, expected_value=6)\n check_peval_expression(\"9 / 2\", {}, \"4.5\", fully_evaluated=True, expected_value=4.5)\n check_peval_expression(\"9 // 2\", {}, \"4\", fully_evaluated=True, expected_value=4)\n check_peval_expression(\"9 % 2\", {}, \"1\", fully_evaluated=True, expected_value=1)\n check_peval_expression(\"2 ** 4\", {}, \"16\", fully_evaluated=True, expected_value=16)\n check_peval_expression(\"3 << 2\", {}, \"12\", fully_evaluated=True, expected_value=12)\n check_peval_expression(\"64 >> 3\", {}, \"8\", fully_evaluated=True, expected_value=8)\n check_peval_expression(\"17 | 3\", {}, \"19\", fully_evaluated=True, expected_value=19)\n check_peval_expression(\"17 ^ 3\", {}, \"18\", fully_evaluated=True, expected_value=18)\n check_peval_expression(\"17 & 3\", {}, \"1\", fully_evaluated=True, expected_value=1)", "def test_multi_chains_flatten():\n age = User.age >= 3\n name = User.name == \"foo\"\n email = User.email != \"bar\"\n\n and_condition = bloop.condition.Condition()\n or_condition = bloop.condition.Condition()\n for c in [age, name, email]:\n and_condition &= c\n or_condition |= c\n assert and_condition == bloop.condition.And(age, name, email)\n assert or_condition == bloop.condition.Or(age, name, email)", "def ff_add(a, b):\n return a ^ b", "def to_not_and_or(formula: Formula) -> Formula:\r\n # Task 3.5\r\n\r\n map_operators = {'->': Formula.parse('(~p|q)'),\r\n '+': Formula.parse('((p&~q)|(~p&q))'),\r\n '<->': Formula.parse('~((p&~q)|(~p&q))'),\r\n '-&': Formula.parse('~(p&q)'),\r\n '-|': Formula.parse('~(p|q)'),\r\n 'F': Formula.parse('(p&~p)'),\r\n 'T': Formula.parse('~(p&~p)')}\r\n return formula.substitute_operators(map_operators)", "def __and__(self, other):\n return intersect(self, other)", "def and_(self, other):\n if not isinstance(other, (Query, QueryCompound)) or other.isNull():\n return self.copy()\n elif self.isNull():\n return other.copy()\n else:\n # grow this if the operators are the same\n if self.__op == QueryCompound.Op.And:\n queries = list(self.__queries) + [other]\n return QueryCompound(*queries, op=QueryCompound.Op.And)\n else:\n return QueryCompound(self, other, op=QueryCompound.Op.And)", "def test_pathop1(self):\n xpb = XPathBuilder()\n xp = xpb.foo.bar & xpb.bar.foo\n exp = '/foo/bar and /bar/foo'\n self.assertEqual(xp.tostring(), exp)", "def op(self) -> Literal[\"==\"] | Literal[\"<=\"] | Literal[\">=\"]:\n ...", "def test_pathop10(self):\n xpb = XPathBuilder()\n xp = (xpb.foo & xpb.bar | xpb.baz).parenthesize() & xpb.foobar\n exp = '(/foo and /bar or /baz) and /foobar'\n self.assertEqual(xp.tostring(), exp)", "def __le__(self, other):\n return _generate_relational_expression(_le, self, other)", "def less_than_or_equal(self) -> global___Expression:", "def __and__(self, other):\n union = proto.FilterExpression()\n domains = [self.filter, other.filter]\n union.filter_union.filter_expressions.extend(domains)\n self.filter = union\n return self", "def filter_and(filters):\n def filt(item):\n for f in filters:\n if not f(item):\n return False\n return True\n return filt", "def test_and_then(\n self,\n start: Result[int, int],\n first: t.Callable[[int], Result[int, int]],\n second: t.Callable[[int], Result[int, int]],\n exp: Result[int, int],\n ) -> None:\n assert start.and_then(first).and_then(second) == exp" ]
[ "0.6913351", "0.6844835", "0.6834847", "0.68041515", "0.6614185", "0.6585983", "0.65602845", "0.65299505", "0.6528684", "0.6510841", "0.6501651", "0.64942497", "0.6466398", "0.6432646", "0.639087", "0.6376711", "0.6334177", "0.6331774", "0.63316846", "0.6288898", "0.62308896", "0.62308514", "0.62090707", "0.62063646", "0.61766183", "0.61751795", "0.617076", "0.61665165", "0.6148593", "0.60970813", "0.6046134", "0.6040575", "0.6003985", "0.59954596", "0.599482", "0.5944566", "0.5912189", "0.5905758", "0.5879148", "0.58318156", "0.58232903", "0.5822931", "0.5821608", "0.579306", "0.57708895", "0.57708895", "0.57708895", "0.57708895", "0.57708895", "0.5745658", "0.57407916", "0.57351184", "0.5720683", "0.5714338", "0.5713976", "0.5676874", "0.5669976", "0.56577057", "0.5650796", "0.56462824", "0.5627038", "0.5626583", "0.5602917", "0.55926865", "0.5585748", "0.5575975", "0.55558485", "0.5552535", "0.553487", "0.5531191", "0.55120945", "0.5501878", "0.5493118", "0.54901034", "0.5482169", "0.54508764", "0.5446951", "0.54214096", "0.5411467", "0.53934443", "0.53934443", "0.53542286", "0.53526646", "0.5337076", "0.5336293", "0.5284144", "0.52558523", "0.5242038", "0.5241389", "0.523373", "0.5216926", "0.52088875", "0.5208302", "0.52061176", "0.5197937", "0.5195612", "0.51852405", "0.518513", "0.5184801", "0.51795256" ]
0.62697506
20
Mimic the & operator in R. This has to have Expression objects to be involved to work
def _op_or_(self, left: Any, right: Any) -> Any: if isinstance(left, list): return Collection(left, right) left, right = _recycle_left_right(left, right) left = Series(left).fillna(False) right = Series(right).fillna(False) return left | right
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def AND(f, g):\n def _and(x):\n return f(x) & g(x)\n return _and", "def and_(a, b):", "def __and__(self, other):\n return self.fam.c_binop('and', self, other)", "def __and__(self, obj):\n return self._boolean_operation(obj, operator.__and__)", "def _and(cls, arg1, arg2):\n return arg1 and arg2", "def AND(r, s):\n return lambda l, i: r(l, i) and s(l, i)", "def __and__(self, other):\n return self.__class__(self.value + '&' + str(other))", "def __and__(self, other):\n return self._operation_and(other)", "def AND(self, value):\n self.reg.A = self.reg.A & value\n self.reg.Z = self.reg.A == 0\n self.reg.N = self.reg.A >> 7", "def __and__(self, other):\n return MyCustomNumber(self.value & other.value)", "def visit_and(self, left_result: T, right_result: T) -> T:", "def __and__(self, other: Any) -> Operators:\n return self.operate(and_, other)", "def bitwise_and(self):\n register = self.return_middle_registers(self.opcode)\n self.registers[register[0]] = (\n self.registers[register[0]] & self.registers[register[1]])\n logger.info(\"Bitwise AND on V{} and V{} for {}\".format(\n register[0],\n register[1],\n self.registers[register[0]]))", "def and_(*args, **kwargs):\n ...", "def logical_and(x1, x2, f=None):\n return _cur_framework(x1, f=f).logical_and(x1, x2)", "def test_and(\n self,\n left: Result[int, str],\n right: Result[int, str],\n exp: Result[int, str],\n ) -> None:\n assert left.and_(right) == exp", "def and_bexp(env, node):\n left_value = node.left.interpret(env)\n right_value = node.right.interpret(env)\n return 1 if left_value and right_value else 0", "def __and__(self, other):\n return self.and_(other)", "def bitwise_and(lhs, rhs):\n return _make.bitwise_and(lhs, rhs)", "def logical_and(lhs, rhs):\n return _make.logical_and(lhs, rhs)", "def _op_and_(self, left: Any, right: Any) -> Any:\n if isinstance(left, list):\n # induce an intersect with Collection\n return Intersect(left, right)\n\n left, right = _recycle_left_right(left, right)\n left = Series(left).fillna(False)\n right = Series(right).fillna(False)\n return left & right", "def f_and(*args):\n f = And(*args).factor()\n return f if f in B else f.factor()", "def __and__(self, other):\n a, b = Trits.match_length(self, other)\n return Trits([x & y for x, y in zip(a, b)])", "def __and__(self, other):\n\t\tif isinstance(other, int):\n\t\t\treturn self.value & other\n\t\telif type(self) is type(other):\n\t\t\treturn self.value & other.value", "def instruction_and(self, register, a, b):\n if Vm.is_register(a):\n a = self.get_register(a)\n\n if Vm.is_register(b):\n b = self.get_register(b)\n\n self.set_register(register, (a & b) % MAX_INT)", "def __and__(self, other):\n return self >> (lambda _: other)", "def _operation_and(self, other):\n self._check_items(other)\n return ReadingSet(self._set & self._get_other_set(other))", "def _andReg(address, mask):\n _setReg(address, _getReg(address)&mask)", "def __iand__(self, other: t.Any) -> te.Self:\n return self._op_inplace('__iand__', other)", "def convert_broadcast_logical_and(node, **kwargs):\n return create_basic_op_node('And', node, kwargs)", "def and_or_operator(cls, quad):\n\t\tleft_op = cls.get_address_value(quad.left_operand)\n\t\tright_op = cls.get_address_value(quad.right_operand)\n\t\t# TODO: The next set of lines will fail at a specific case\n\t\tif quad.operator == 10 :\n\t\t\tcls.set_address_value(quad.result, (left_op and right_op))\n\t\telif quad.operator == 11 :\n\t\t\tcls.set_address_value(quad.result, (left_op or right_op))", "def Nand(*args):\n return Not(And(*args))", "def _and(it):\n return 1 if it[0]==1 and it[1]==1 else 0", "def AND(*expressions):\n return {'$and': list(expressions)}", "def _daat_and(self):\n raise NotImplementedError", "def AND(self, operand2, *operands):\n\t\treturn AND((self, operand2) + operands)", "def and_filter(self):\n return self.__and", "def __and__(self, other):\n if other is None:\n return self.copy()\n elif isinstance(other, (Query, QueryCompound)):\n return self.and_(other)\n else:\n out = self.copy()\n out.addMath(Query.Math.And, other)\n return out", "def __and__(self, other):\n for k, v in other.items():\n if k in self._values:\n self._values[k] = str(SpecifierSet(self._values[k]) & v)\n else:\n self._values[k] = v\n return self", "def __mul__(self, obj):\n return self & obj", "def get_bprop_logical_and(self):\n\n def bprop(x, y, out, dout):\n return zeros_like(x), zeros_like(y)\n return bprop", "def __and__(self, other: t.Any) -> InspectableSet[_C]:\n return self._op_copy('__and__', other)", "def _(obj: And, visitor: BooleanExpressionVisitor[T]) -> T:\n left_result: T = visit(obj.left, visitor=visitor)\n right_result: T = visit(obj.right, visitor=visitor)\n return visitor.visit_and(left_result=left_result, right_result=right_result)", "def __iand__(self, y):\n if is_tensor(y) or isinstance(y, int):\n self.share &= y\n elif isinstance(y, BinarySharedTensor):\n self.share.set_(beaver.AND(self, y).share.data)\n else:\n raise TypeError(\"Cannot AND %s with %s.\" % (type(y), type(self)))\n return self", "def andExpr( ): #DOUBLE CHECK THIS\n\n\ttok = tokens.peek( )\n\tif debug: print(\"andExpr: \", tok)\n\tleft = relationalExpr( ) #does the left side of the grammar\n\ttok = tokens.peek( )\n\twhile tok == \"and\": #checks to see if there is the token \"and\" and will preform what is inside the curly bracket since it is a series \n\t\ttokens.next()\n\t\tright = relationalExpr( )\n\t\tleft = BinaryExpr(tok, left, right)#MIGHT HAVE TO CHANGE TO STRING \n\t\ttok = tokens.peek( )\n\treturn left", "def _and(self, _and):\n\n self.__and = _and", "def _and(self, _and):\n\n self.__and = _and", "def _and(self, _and):\n\n self.__and = _and", "def _and(self, _and):\n\n self.__and = _and", "def _and(self, _and):\n\n self.__and = _and", "def __and__(self, other):\n if not (isNumeric(other) or isinstance(other, Expression)):\n raise excep.biogemeError(\n f'This is not a valid expression: {other}'\n )\n return And(self, other)", "def bitwise_and(self, other: Any) -> ColumnOperators:\n\n return self.operate(bitwise_and_op, other)", "def __and__(self, other):\n return np.logical_and(self.array, other.array)", "def test_predicate11(self):\n xpb = XPathBuilder()\n xp = xpb.a.b.c[(xpb.attr('d') == 'e') & xpb.foo[xpb.attr('z') == 'ab']]\n exp = '/a/b/c[@d = \"e\" and /foo[@z = \"ab\"]]'\n self.assertEqual(xp.tostring(), exp)", "def __and__(self, other):\n return self.__mul__(other)", "def to_not_and(formula: Formula) -> Formula:\r\n # Task 3.6a\r\n map_operators = {'->': Formula.parse('~(~~p&~q)'),\r\n '+': Formula.parse('~(~(p&~q)&~(~p&q))'),\r\n '<->': Formula.parse('~~(~(p&~q)&~(~p&q))'),\r\n '-&': Formula.parse('~(p&q)'),\r\n '-|': Formula.parse('~~(~p&~q)'),\r\n 'F': Formula.parse('(p&~p)'),\r\n 'T': Formula.parse('~(p&~p)'),\r\n '|': Formula.parse('~(~p&~q)')}\r\n return formula.substitute_operators(map_operators)", "def __iand__(self, other):\n self.truths = self.truths | other.truths\n return self", "def __and__(self, other):\n assert isinstance(other, Filter)\n new_query = \"({}) & ({})\".format(self.query, other.query)\n return Filter(query=new_query)", "def __and__(self, other):\r\n if self.field.characteristic == 2:\r\n return runtime.and_(self, other)\r\n\r\n return super().__and__(other)", "def _logical_and(*args):\n args_ = [_static_value(x) for x in args]\n if any(x is not None and not bool(x) for x in args_):\n return constant_op.constant(False)\n if all(x is not None and bool(x) for x in args_):\n return constant_op.constant(True)\n if len(args) == 2:\n return math_ops.logical_and(*args)\n return math_ops.reduce_all(args)", "def __and__(self, other):\n return BitBoard(self.num & other.num)", "def test_predicate7(self):\n xpb = XPathBuilder()\n xp = xpb.foo.bar[(xpb.attr('name') == 'foo') & (xpb.attr('x') == 'x')]\n exp = '/foo/bar[@name = \"foo\" and @x = \"x\"]'\n self.assertEqual(xp.tostring(), exp)", "def and__inplace(a,b):", "def on_true(self) -> global___Expression:", "def le(cls, __and=True, __key=None, **kwargs):\r\n return _queries(\"<=\", __key, __and, kwargs.items())", "def __invert__(self) -> BooleanExpression:", "def __or__(self, y):\n return self.__and__(y) ^ self ^ y", "def __and__(self, other):\n return self.intersection(other)", "def ge(cls, __and=True, __key=None, **kwargs):\r\n return _queries(\">=\", __key, __and, kwargs.items())", "def __and__(self, query):\r\n return And([self, query]).normalize()", "def g3(a, b): \n return not (a and b)", "def __mul__(self, other):\n return And(self, other)", "def __le__(self, *args):\n return _ida_hexrays.operand_locator_t___le__(self, *args)", "def __ge__(self, *args):\n return _ida_hexrays.operand_locator_t___ge__(self, *args)", "def __and__(self, other):\r\n return self * other", "def And(*conditions):\n def andPred(db):\n from functools import reduce\n return reduce(lambda result, c: c(result),\n conditions, db)\n\n return andPred", "def and_list(conditionList):\n return functools.reduce(numpy.logical_and, conditionList)", "def t_and(self, other):\n if self is TRUE and other is TRUE:\n return TRUE\n if self is FALSE or other is FALSE:\n return FALSE\n return UNKNOWN", "def test_andOperator(self):\n xp = XPathQuery(\"//bar[@attrib4='value4' and @attrib5='value5']\")\n self.assertEqual(xp.matches(self.e), True)\n self.assertEqual(xp.queryForNodes(self.e), [self.bar5])", "def equivalence_of(formula1: Formula, formula2: Formula) -> Formula:\r\n return Formula('&', Formula('->', formula1, formula2),\r\n Formula('->', formula2, formula1))", "def _conjunction_op(spec, *expressions):", "def _conjunction_op(spec, *expressions):", "def _disjunction_op(spec, *expressions):", "def equivalence_of(formula1, formula2):\n return Formula('&', Formula('->', formula1, formula2),\n Formula('->', formula2, formula1))", "def pl_true(exp, model={}):\n op, args = exp.op, exp.args\n if exp == TRUE:\n return True\n elif exp == FALSE:\n return False\n elif is_prop_symbol(op):\n return model.get(exp)\n elif op == '~':\n p = pl_true(args[0], model)\n if p is None: return None\n else: return not p\n elif op == '|':\n result = False\n for arg in args:\n p = pl_true(arg, model)\n if p is True: return True\n if p is None: result = None\n return result\n elif op == '&':\n result = True\n for arg in args:\n p = pl_true(arg, model)\n if p is False: return False\n if p is None: result = None\n return result\n p, q = args\n if op == '>>':\n return pl_true(~p | q, model)\n elif op == '<<':\n return pl_true(p | ~q, model)\n pt = pl_true(p, model)\n if pt is None: return None\n qt = pl_true(q, model)\n if qt is None: return None\n if op == '<=>':\n return pt == qt\n elif op == '^':\n return pt != qt\n else:\n raise ValueError, \"illegal operator in logic expression\" + str(exp)", "def test_evaluate_and_expression(self):\n value = self.evaluate_common(\"false and false\")\n self.assertTrue(\n value.type_code == edm.SimpleType.Boolean, \"Expected Boolean\")\n self.assertTrue(value.value is False, \"Expected False\")\n try:\n value = self.evaluate_common(\"false and 0\")\n self.fail(\"Integer promotion to Boolean\")\n except odata.EvaluationError:\n pass\n value = self.evaluate_common(\"false and true\")\n self.assertTrue(value.value is False, \"Expected False\")\n value = self.evaluate_common(\"true and false\")\n self.assertTrue(value.value is False, \"Expected False\")\n value = self.evaluate_common(\"true and true\")\n self.assertTrue(value.value is True, \"Expected True\")\n value = self.evaluate_common(\"true and null\")\n self.assertTrue(\n value.type_code == edm.SimpleType.Boolean, \"Expected Boolean\")\n self.assertTrue(value.value is False, \"Expected False\")\n value = self.evaluate_common(\"false and null\")\n self.assertTrue(value.value is False, \"Expected False\")\n value = self.evaluate_common(\"false and false\")\n self.assertTrue(value.value is False, \"Expected False\")", "def __rand__(self, other):\n return self._operation_and(other)", "def test_bin_op_support():\n check_peval_expression(\"1 + 2\", {}, \"3\", fully_evaluated=True, expected_value=3)\n check_peval_expression(\"2 - 1\", {}, \"1\", fully_evaluated=True, expected_value=1)\n check_peval_expression(\"2 * 3\", {}, \"6\", fully_evaluated=True, expected_value=6)\n check_peval_expression(\"9 / 2\", {}, \"4.5\", fully_evaluated=True, expected_value=4.5)\n check_peval_expression(\"9 // 2\", {}, \"4\", fully_evaluated=True, expected_value=4)\n check_peval_expression(\"9 % 2\", {}, \"1\", fully_evaluated=True, expected_value=1)\n check_peval_expression(\"2 ** 4\", {}, \"16\", fully_evaluated=True, expected_value=16)\n check_peval_expression(\"3 << 2\", {}, \"12\", fully_evaluated=True, expected_value=12)\n check_peval_expression(\"64 >> 3\", {}, \"8\", fully_evaluated=True, expected_value=8)\n check_peval_expression(\"17 | 3\", {}, \"19\", fully_evaluated=True, expected_value=19)\n check_peval_expression(\"17 ^ 3\", {}, \"18\", fully_evaluated=True, expected_value=18)\n check_peval_expression(\"17 & 3\", {}, \"1\", fully_evaluated=True, expected_value=1)", "def test_multi_chains_flatten():\n age = User.age >= 3\n name = User.name == \"foo\"\n email = User.email != \"bar\"\n\n and_condition = bloop.condition.Condition()\n or_condition = bloop.condition.Condition()\n for c in [age, name, email]:\n and_condition &= c\n or_condition |= c\n assert and_condition == bloop.condition.And(age, name, email)\n assert or_condition == bloop.condition.Or(age, name, email)", "def ff_add(a, b):\n return a ^ b", "def to_not_and_or(formula: Formula) -> Formula:\r\n # Task 3.5\r\n\r\n map_operators = {'->': Formula.parse('(~p|q)'),\r\n '+': Formula.parse('((p&~q)|(~p&q))'),\r\n '<->': Formula.parse('~((p&~q)|(~p&q))'),\r\n '-&': Formula.parse('~(p&q)'),\r\n '-|': Formula.parse('~(p|q)'),\r\n 'F': Formula.parse('(p&~p)'),\r\n 'T': Formula.parse('~(p&~p)')}\r\n return formula.substitute_operators(map_operators)", "def __and__(self, other):\n return intersect(self, other)", "def and_(self, other):\n if not isinstance(other, (Query, QueryCompound)) or other.isNull():\n return self.copy()\n elif self.isNull():\n return other.copy()\n else:\n # grow this if the operators are the same\n if self.__op == QueryCompound.Op.And:\n queries = list(self.__queries) + [other]\n return QueryCompound(*queries, op=QueryCompound.Op.And)\n else:\n return QueryCompound(self, other, op=QueryCompound.Op.And)", "def test_pathop1(self):\n xpb = XPathBuilder()\n xp = xpb.foo.bar & xpb.bar.foo\n exp = '/foo/bar and /bar/foo'\n self.assertEqual(xp.tostring(), exp)", "def op(self) -> Literal[\"==\"] | Literal[\"<=\"] | Literal[\">=\"]:\n ...", "def test_pathop10(self):\n xpb = XPathBuilder()\n xp = (xpb.foo & xpb.bar | xpb.baz).parenthesize() & xpb.foobar\n exp = '(/foo and /bar or /baz) and /foobar'\n self.assertEqual(xp.tostring(), exp)", "def __le__(self, other):\n return _generate_relational_expression(_le, self, other)", "def less_than_or_equal(self) -> global___Expression:", "def __and__(self, other):\n union = proto.FilterExpression()\n domains = [self.filter, other.filter]\n union.filter_union.filter_expressions.extend(domains)\n self.filter = union\n return self", "def filter_and(filters):\n def filt(item):\n for f in filters:\n if not f(item):\n return False\n return True\n return filt", "def test_and_then(\n self,\n start: Result[int, int],\n first: t.Callable[[int], Result[int, int]],\n second: t.Callable[[int], Result[int, int]],\n exp: Result[int, int],\n ) -> None:\n assert start.and_then(first).and_then(second) == exp" ]
[ "0.6913351", "0.6844835", "0.6834847", "0.68041515", "0.6614185", "0.6585983", "0.65602845", "0.65299505", "0.6528684", "0.6510841", "0.6501651", "0.64942497", "0.6466398", "0.6432646", "0.639087", "0.6376711", "0.6334177", "0.6331774", "0.63316846", "0.6288898", "0.62697506", "0.62308896", "0.62308514", "0.62090707", "0.62063646", "0.61766183", "0.61751795", "0.617076", "0.61665165", "0.6148593", "0.60970813", "0.6046134", "0.6040575", "0.6003985", "0.59954596", "0.599482", "0.5944566", "0.5912189", "0.5905758", "0.5879148", "0.58318156", "0.58232903", "0.5822931", "0.5821608", "0.579306", "0.57708895", "0.57708895", "0.57708895", "0.57708895", "0.57708895", "0.5745658", "0.57407916", "0.57351184", "0.5720683", "0.5714338", "0.5713976", "0.5676874", "0.5669976", "0.56577057", "0.5650796", "0.56462824", "0.5627038", "0.5626583", "0.5602917", "0.55926865", "0.5585748", "0.5575975", "0.55558485", "0.5552535", "0.553487", "0.5531191", "0.55120945", "0.5501878", "0.5493118", "0.54901034", "0.5482169", "0.54508764", "0.5446951", "0.54214096", "0.5411467", "0.53934443", "0.53934443", "0.53542286", "0.53526646", "0.5337076", "0.5336293", "0.5284144", "0.52558523", "0.5242038", "0.5241389", "0.523373", "0.5216926", "0.52088875", "0.5208302", "0.52061176", "0.5197937", "0.5195612", "0.51852405", "0.518513", "0.5184801", "0.51795256" ]
0.0
-1
Interpret for left != right
def _op_ne(self, left: Any, right: Any) -> BoolOrIter: out = self._op_eq(left, right) if isinstance(out, (numpy.ndarray, Series)): neout = ~out # neout[pandas.isna(out)] = numpy.nan return neout # out is always a numpy.ndarray return not out # pragma: no cover
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __ne__(left, right):\n return (not (left == right))", "def ne (x,y):\n\n return not (le(x,y) and le(y,x))", "def nexact(cls, lhs, rhs):\n return lhs != rhs", "def ne (self, other):\n return not (self == other) # opposite of eq", "def _isLeft(P0, P1, P2):\n return (P1.x - P0.x)*(P2.y - P0.y) - (P2.x - P0.x)*(P1.y - P0.y)", "def __ne__(self, other):\n return tf.math.not_equal(self._ordinals, other.ordinal())", "def __ne__(self, other):\n return np.all(self.grid != other.grid) or np.all(self.pos != other.pos)", "def __neq__(self, other): \n return not self == other", "def __ne__(self, other):\n return self.x != other.x or self.y != other.y", "def assert_equal(left, right):\n msg = \"{} != {}\".format(left, right)\n assert left == right, msg", "def equal_mirror(t, s):\n if t is None and s is None:\n return True\n if t is None or s is None:\n return False\n if t.value != s.value:\n return False\n return equal_mirror(t.left, s.right) and equal_mirror(t.right, s.left)", "def test_not_equal(self):\n utils.compare_tracing_methods(\n SimpleCompareOpsModule(\"notEqual\"),\n torch.randn(3, 4, 5),\n torch.randn(3, 4, 5),\n fusible_ops={\"aten::ne\"},\n )", "def __neq__(self, other):\n return self.index != other.index", "def __ne__(self, other):\r\n if isinstance(other, vec4):\r\n return self.x!=other.x or self.y!=other.y or self.z!=other.z\r\n else:\r\n return 1", "def __ne__(self, other):\n return not self == other", "def __ne__(self, rhs):\n return not self.__eq__(rhs)", "def __ne__(self, other):\n\t\treturn not self == other", "def __ne__(self, other):\r\n\t\treturn (self.type != other.type or self.value != other.value)", "def __ne__(self, other):\r\n return not self == other", "def __ne__(self, other):\r\n return not self == other", "def __invert__(self) -> BooleanExpression:", "def __ne__(self, other):\n return tuple(self) != tuple(other)", "def __ne__(self, other):\n return not self == other", "def __ne__(self, other):\n return not self == other", "def __ne__(self, other):\n self.numerator=self.numerator*other.denominator\n other.numerator=self.denominator*other.numerator\n if(self.numerator!=other.numerator):\n return True\n else:\n return False", "def __ne__(self, other):\n return not self == other", "def __ne__(self,other):\n return not self == other", "def __ne__(self, other):\r\n return not (self == other)", "def pairwise(a,b):\n return a != b", "def __ne__(self,other):\n return not (self == other)", "def __ne__(self, other: 'LTL'):\n return not (self == other)", "def __ne__(self, other):\n return not self == other", "def __ne__(self, other):\n return not self == other", "def __ne__(self, other):\n return not self == other", "def __ne__(self, other):\n return not self == other", "def __ne__(self, other):\n return not self == other", "def __ne__(self, other):\n return not self == other", "def __ne__(self, other):\n return not self == other", "def __ne__(self, other):\n return not self == other", "def __ne__(self, other):\n return not self == other", "def __ne__(self, other):\n return not self == other", "def __ne__(self, other):\n return not self == other", "def __ne__(self, other):\n return not self == other", "def __ne__(self, other):\n return not self == other", "def __ne__(self, other):\n return not self == other", "def __ne__(self, other):\n return not self == other", "def __ne__(self, other):\n return not self == other", "def __ne__(self, other):\n return not self == other", "def __ne__(self, other):\n return not self == other", "def __ne__(self, other):\n return not self == other", "def __ne__(self, other):\n return not self == other", "def __ne__(self, other):\n return not self == other", "def __ne__(self, other):\n return not self == other", "def __ne__(self, other):\n return not self == other", "def __ne__(self, other):\n return not self == other", "def __ne__(self, other):\n return not self == other", "def __ne__(self, other):\n return not self == other", "def __ne__(self, other):\n return not self == other", "def __ne__(self, other):\n return not self == other", "def __ne__(self, other):\n return not self == other", "def __ne__(self, other):\n return not self == other", "def __ne__(self, other):\n return not self == other", "def __ne__(self, other):\n return not self == other", "def __ne__(self, other):\n return not self == other", "def __ne__(self, other):\n return not self == other", "def __ne__(self, other):\n return not self == other", "def __ne__(self, other):\n return not self == other", "def __ne__(self, other):\n return not self == other", "def __ne__(self, other):\n return not self == other", "def __ne__(self, other):\n return not self == other", "def __ne__(self, other):\n return not self == other", "def __ne__(self, other):\n return not self == other", "def __ne__(self, other):\n return not self == other", "def __ne__(self, other):\n return not self == other", "def __ne__(self, other):\n return not self == other", "def __ne__(self, other):\n return not self == other", "def __ne__(self, other):\n return not self == other", "def __ne__(self, other):\n return not self == other", "def __ne__(self, other):\n return not self == other", "def __ne__(self, other):\n return not self == other", "def __ne__(self, other):\n return not self == other", "def __ne__(self, other):\n return not self == other", "def __ne__(self, other):\n return not self == other", "def __ne__(self, other):\n return not self == other", "def __ne__(self, other):\n return not self == other", "def __ne__(self, other):\n return not self == other", "def __ne__(self, other):\n return not self == other", "def __ne__(self, other):\n return not self == other", "def __ne__(self, other):\n return not self == other", "def __ne__(self, other):\n return not self == other", "def __ne__(self, other):\n return not self == other", "def __ne__(self, other):\n return not self == other", "def __ne__(self, other):\n return not self == other", "def __ne__(self, other):\n return not self == other", "def __ne__(self, other):\n return not self == other", "def __ne__(self, other):\n return not self == other", "def __ne__(self, other):\n return not self == other", "def __ne__(self, other):\n return not self == other", "def __ne__(self, other):\n return not self == other", "def __ne__(self, other):\n return not self == other" ]
[ "0.8098229", "0.70341194", "0.6655749", "0.64999557", "0.64726496", "0.6470631", "0.64697796", "0.6455494", "0.6385901", "0.63324577", "0.6331694", "0.6274154", "0.6211507", "0.6198755", "0.61944705", "0.6193615", "0.6182596", "0.6180644", "0.6173583", "0.6173583", "0.6162615", "0.6160705", "0.6140654", "0.6140654", "0.6138237", "0.6127185", "0.61099315", "0.60963964", "0.6086259", "0.6075714", "0.60756177", "0.6067457", "0.6067457", "0.6067457", "0.6067457", "0.6067457", "0.6067457", "0.6067457", "0.6067457", "0.6067457", "0.6067457", "0.6067457", "0.6067457", "0.6067457", "0.6067457", "0.6067457", "0.6067457", "0.6067457", "0.6067457", "0.6067457", "0.6067457", "0.6067457", "0.6067457", "0.6067457", "0.6067457", "0.6067457", "0.6067457", "0.6067457", "0.6067457", "0.6067457", "0.6067457", "0.6067457", "0.6067457", "0.6067457", "0.6067457", "0.6067457", "0.6067457", "0.6067457", "0.6067457", "0.6067457", "0.6067457", "0.6067457", "0.6067457", "0.6067457", "0.6067457", "0.6067457", "0.6067457", "0.6067457", "0.6067457", "0.6067457", "0.6067457", "0.6067457", "0.6067457", "0.6067457", "0.6067457", "0.6067457", "0.6067457", "0.6067457", "0.6067457", "0.6067457", "0.6067457", "0.6067457", "0.6067457", "0.6067457", "0.6067457", "0.6067457", "0.6067457", "0.6067457", "0.6067457", "0.6067457" ]
0.72011685
1
Recycle left right operands to each other
def _recycle_left_right(left: Any, right: Any) -> Tuple: try: left = recycle_value(left, length_of(right)) except DataUnrecyclable: right = recycle_value(right, length_of(left)) return left, right
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _arithmetize2(self, left: Any, right: Any, op: str) -> Any:\n op_func = getattr(operator, op)\n left, right = _recycle_left_right(left, right)\n return op_func(left, right)", "def RewriteOR(self, left, right):\n return None", "def __call__(self):\n return self._left() + self._right()", "def __radd__(self, left):\n return self.value() + left", "def __radd__(self, other: Any) -> ColumnOperators:\n return self.reverse_operate(add, other)", "def __rshift__(self, other: Any) -> ColumnOperators:\n return self.operate(rshift, other)", "def _op_or_(self, left: Any, right: Any) -> Any:\n if isinstance(left, list):\n return Collection(left, right)\n\n left, right = _recycle_left_right(left, right)\n left = Series(left).fillna(False)\n right = Series(right).fillna(False)\n return left | right", "def __rmul__(self, other: Any) -> ColumnOperators:\n return self.reverse_operate(mul, other)", "def apply(self) -> Operation:\n op = self.popleft()\n op()\n return op", "def __ror__(self, other):\n return whitespaces.CURRENT.normalize(other) | self", "def __rmod__(self, other: Any) -> ColumnOperators:\n return self.reverse_operate(mod, other)", "def commutator(left_operator, right_operator):\n if not isinstance(left_operator, type(right_operator)):\n raise TypeError('operator_a and operator_b are not of the same type.')\n valueable_type = (QubitOperator, FermionOperator, QubitExcitationOperator)\n if not isinstance(left_operator, valueable_type):\n raise TypeError(\n \"Operator should be QubitOperator, FermionOperator or QubitExcitationOperator.\"\n )\n\n result = left_operator * right_operator\n result -= right_operator * left_operator\n return result", "def gen_binop(self, expr: expressions.BinaryOperator):\n if expr.op in [\"*\", \"/\", \"%\", \"^\", \"|\", \"&\", \">>\", \"<<\"]:\n lhs = self.gen_expr(expr.a, rvalue=True)\n rhs = self.gen_expr(expr.b, rvalue=True)\n op = expr.op\n\n ir_typ = self.get_ir_type(expr.typ)\n value = self.builder.emit_binop(lhs, op, rhs, ir_typ)\n elif expr.op == \",\":\n # Handle the comma operator by returning the second result\n self.gen_expr(expr.a, rvalue=True)\n rhs = self.gen_expr(expr.b, rvalue=True)\n value = rhs\n elif expr.op == \"+\":\n # Pay attention to pointer arithmetics!\n lhs = self.gen_expr(expr.a, rvalue=True)\n rhs = self.gen_expr(expr.b, rvalue=True)\n\n # left and right are swapped in semantics if right is pointer.\n if expr.a.typ.is_pointer:\n assert expr.b.typ.is_integer\n esize = self.sizeof(expr.a.typ.element_type)\n assert esize > 0\n if esize != 1:\n esize = self.emit(ir.Const(esize, \"esize\", rhs.ty))\n rhs = self.builder.emit_mul(rhs, esize, rhs.ty)\n rhs = self.builder.emit_cast(rhs, ir.ptr)\n\n ir_typ = self.get_ir_type(expr.typ)\n value = self.builder.emit_binop(lhs, \"+\", rhs, ir_typ)\n elif expr.op == \"-\":\n # Pay attention to pointer arithmetics!\n lhs = self.gen_expr(expr.a, rvalue=True)\n rhs = self.gen_expr(expr.b, rvalue=True)\n ir_typ = self.get_ir_type(expr.typ)\n if expr.a.typ.is_pointer:\n esize = self.sizeof(expr.a.typ.element_type)\n assert esize > 0\n if expr.b.typ.is_pointer:\n # pointer - pointer\n value = self.builder.emit_binop(lhs, \"-\", rhs, ir.ptr)\n value = self.emit(ir.Cast(value, \"typecast\", ir_typ))\n if esize != 1:\n esize = self.emit(ir.Const(esize, \"esize\", ir_typ))\n value = self.emit(\n ir.Binop(value, \"/\", esize, \"rhs\", ir_typ)\n )\n else:\n # pointer - numeric\n if esize != 1:\n esize = self.emit(ir.Const(esize, \"esize\", rhs.ty))\n rhs = self.builder.emit_mul(rhs, esize, rhs.ty)\n rhs = self.builder.emit_cast(rhs, ir.ptr)\n value = self.builder.emit_binop(lhs, \"-\", rhs, ir_typ)\n else:\n # numeric - numeric\n value = self.builder.emit_binop(lhs, \"-\", rhs, ir_typ)\n\n elif expr.op in [\"<\", \">\", \"==\", \"!=\", \"<=\", \">=\", \"||\", \"&&\"]:\n value = self.gen_condition_to_integer(expr)\n elif expr.op in [\n \"=\",\n \"+=\",\n \"-=\",\n \"*=\",\n \"%=\",\n \"/=\",\n \">>=\",\n \"<<=\",\n \"&=\",\n \"|=\",\n \"~=\",\n \"^=\",\n ]:\n # Handle struct assignment special case:\n if expr.op == \"=\" and expr.a.typ.is_struct:\n lhs = self.gen_expr(expr.a, rvalue=False)\n rhs = self.gen_expr(expr.b, rvalue=False)\n amount = self.sizeof(expr.a.typ)\n self.gen_copy_struct(lhs, rhs, amount)\n value = None\n else:\n lhs = self.gen_expr(expr.a, rvalue=False)\n rhs = self.gen_expr(expr.b, rvalue=True)\n\n if expr.op == \"=\":\n value = rhs\n else:\n # Handle '+=' and friends:\n op = expr.op[:-1]\n ir_typ = self.get_ir_type(expr.typ)\n loaded = self._load_value(lhs, expr.typ)\n\n # pointer arithmatic:\n if op in [\"+\", \"-\"] and expr.a.typ.is_pointer:\n esize = self.sizeof(expr.a.typ.element_type)\n assert esize > 0\n if esize != 1:\n esize = self.emit(ir.Const(esize, \"esize\", rhs.ty))\n rhs = self.builder.emit_mul(rhs, esize, rhs.ty)\n\n value = self.builder.emit_binop(loaded, op, rhs, ir_typ)\n self._store_value(value, lhs)\n else: # pragma: no cover\n raise NotImplementedError(str(expr.op))\n return value", "def __rsub__(self, other: Any) -> ColumnOperators:\n return self.reverse_operate(sub, other)", "def _rconcat(self, other: Any) -> ColumnOperators:\n return self.reverse_operate(concat_op, other)", "def r_degenerate(self):\n self.tmp = self.left\n self.left = self.right", "def test_chained_right(self):\n n1, n2, n3 = Node('a'), Node('b'), Node('c')\n result = n1 | n2 * 'foo' | n3\n self.assertEqual(n1.eout, [Edge(n1, n2)])\n self.assertEqual(n2.ein, [Edge(n1, n2)])\n self.assertEqual(n2.eout, [Edge(n2, n3, 'foo')])\n self.assertEqual(n3.ein, [Edge(n2, n3, 'foo')])", "def commute_operands(self, node):\n def is_assumption(n):\n \"\"\"Return whether a node is an assumption.\"\"\"\n if not isinstance(n, types.Symbol):\n return False\n symbol = self.symbol_table.lookup(n.name)\n if symbol and symbol.type_ == SymbolType.StackItem:\n return True\n return False\n\n def has_assumption(n):\n \"\"\"Return whether a BinOpCode contains an assumption.\"\"\"\n if not isinstance(n, types.BinOpCode):\n return False\n return any(is_assumption(i) for i in [n.left, n.right])\n\n def should_commute(n):\n return is_assumption(n) or has_assumption(n)\n\n # Commute operands of different operations.\n # e.g. 2 + assumption + 3 --> 2 + 3 + assumption\n if self.is_commutative(node) and has_assumption(node.left) and node.left.name == node.name:\n # Move the assumption so we can be sure it's in the attribute 'right'.\n if is_assumption(node.left.left):\n node.left.left, node.left.right = node.left.right, node.left.left\n\n self.debug('Commuting operations for %s and %s' % (format_structural_op(node.left), format_structural_op(node.right)), node.lineno)\n right = node.right\n node.right = node.left.right\n node.left.right = right\n\n if should_commute(node.left) or not should_commute(node.right):\n return\n\n if self.is_commutative(node):\n self.debug('Commuting operands for %s' % format_structural_op(node), node.lineno)\n node.left, node.right = node.right, node.left\n elif self.has_logical_equivalent(node):\n logmsg = 'Replacing %s with logical equivalent ' % format_structural_op(node)\n node.name = logical_equivalents[node.name]\n node.left, node.right = node.right, node.left\n logmsg += format_structural_op(node)\n self.debug(logmsg, node.lineno)", "def reverse_operate(\n self, op: OperatorType, other: Any, **kwargs: Any\n ) -> Operators:\n raise NotImplementedError(str(op))", "def l_un_degenerate(self):\n self.right = self.tmp", "def __rtruediv__(self, other: Any) -> ColumnOperators:\n return self.reverse_operate(truediv, other)", "def rotate_right(self):\n\t\ttemp = self.left\n\t\tself.left = temp.right\n\t\ttemp.right = self\n\t\tself = temp", "def __mul__(self,other):\n return compositeORGenerator(left = self, right = other)", "def reverse_distribute(node: NodeT) -> NodeT:\n\n def visitor(node: NodeT, left_distribute: bool) -> NodeT:\n \"\"\"Apply left- or right-distributive property in reverse, if possible\n\n Args:\n node: ir.Node to process.\n left_distribute: Whether to apply *left*-distributive property.\n\n Returns:\n Processed node.\n \"\"\"\n if isinstance(node, ir.AddSub):\n items = OrderedDict() # type: Dict[ir.Node, List[Tuple[str, ir.Node]]]\n new_operators = []\n new_operands = []\n for operator, operand in zip(('+',) + getattr(node, 'operator'),\n getattr(node, 'operand')):\n if (operator == '+' and isinstance(operand, ir.MulDiv) and\n getattr(operand, 'operator') == ('*',)):\n if left_distribute:\n coeff, item = getattr(operand, 'operand')\n else:\n item, coeff = getattr(operand, 'operand')\n items.setdefault(coeff, []).append((operator, item))\n else:\n new_operators.append(operator)\n new_operands.append(operand)\n for coeff, item in items.items():\n operator, operand = zip(*item)\n assert operator[0] == '+'\n new_operators.append(operator[0])\n if len(operand) > 1:\n new_item = ir.AddSub(operator=operator[1:], operand=operand)\n else:\n new_item = operand[0]\n if left_distribute:\n children = coeff, new_item\n else:\n children = new_item, coeff\n new_operands.append(ir.MulDiv(operator=('*',), operand=children))\n if len(new_operands) > 1:\n assert new_operators[0] == '+'\n new_node = ir.AddSub(operator=tuple(new_operators[1:]),\n operand=tuple(new_operands))\n if new_node != node:\n return new_node # type: ignore\n elif new_operands and new_operands[0] != node:\n return new_operands[0]\n return node\n\n return node.visit(visitor, True).visit(visitor, False)", "def __rrshift__(self, other):\n if isinstance(other, Callable):\n return self @ other\n else:\n return self(other) # Function application", "def right_shift(lhs, rhs):\n return _make.right_shift(lhs, rhs)", "def r_un_degenerate(self):\n self.left = self.tmp", "def __rshift__(self, other):\n other.set_upstream(self)\n # return other so a >> b >> c works\n return other", "def __rmul__(self, left):\n return Factorization([(left, 1)]) * self", "def l_degenerate(self):\n self.tmp = self.right\n self.right = self.left", "def __or__(self, other):\r\n return self + other - self * other", "def __init__(self, left, right):\n super(compositeORGenerator,self).__init__()\n self._left = left\n self._right = right", "def __rxor__(self, other):\n return whitespaces.CURRENT.normalize(other) ^ self", "def visit_or(self, left_result: T, right_result: T) -> T:", "def __call__(self, value):\n self.right.left = self.__class__(value)\n self.right.left.right = self.right\n self.right = self.right.left\n self.right.left = self", "def __ror__(self, other):\n return self._operation_or(other)", "def rotate_left(self):\n\t\ttemp = self.right\n\t\tself.right = temp.left\n\t\ttemp.left = self\n\t\tself = temp", "def product(self):\n return self.right[self.i:] + self.left[:self.i], self.left[self.i:] + self.right[:self.i]", "def __lshift__(self, other: Any) -> ColumnOperators:\n return self.operate(lshift, other)", "def visit_and(self, left_result: T, right_result: T) -> T:", "def __rshift__(self, fn):\n if self is Nothing:\n return Nothing\n else:\n v = self.right if self.is_right() else self.left\n fn = liftF(fn, self.__class__)\n return unlift(fn(v))", "def _reduce_expr(tree, tok):\n second = tree.pop()\n if len(tree) > 0 and not Parser._is_unary_op(tok):\n first = tree.pop()\n expr = BinaryExpression(first, tok, second)\n else:\n expr = UnaryExpression(second, tok)\n tree.append(expr)", "def right(self, right):\n self.ptr.right(right)", "def plus(self, other):\n return self | other", "def _right(node):\n return 2 * node + 2", "def bitwise_rshift(self, other: Any) -> ColumnOperators:\n\n return self.operate(bitwise_rshift_op, other)", "def __rshift__(self, other):\n if isinstance(other, Composable):\n return other @ self\n elif isinstance(other, Callable):\n return Function(lambda x: other(self(x)))\n else:\n return NotImplemented", "def _auto_right_assoc(self) -> DifferentialProgram:\n while isinstance(self.left, DiffPair):\n old_left = self.left\n self.left = self.left.left\n # Note: this is a sneaky recursive call!\n self.right = DiffPair(old_left.right, self.right)\n\n assert not isinstance(self.left, DiffPair), \"Left should now be a non-pair!\"", "def invert(self):\n if self.type == 'binary':\n self.leftentityid, self.rightentityid = self.rightentityid, self.leftentityid", "def reduce_right(self, func, init=None):\n return self.reverse().reduce(func, init)", "def applyOperators(self):\r\n return [self.moveUp(), self.moveDown(),\r\n self.moveLeft(), self.moveRight()]", "def visit_BinaryOperator(self, node: BinaryOperator) -> Instruction:\n\n left = self.visit(node.left)\n right = self.visit(node.right)\n\n if isinstance(left, VarSymbol):\n left_symbol = self.GLOBAL_MEMORY[left.name]\n else:\n left_symbol = left\n\n if isinstance(right, VarSymbol):\n right_symbol = self.GLOBAL_MEMORY[right.name]\n else:\n right_symbol = right\n\n if node.operator.type == TokenType.PLUS:\n return self.builder.fadd(left_symbol, right_symbol, \"addtmp\")\n elif node.operator.type == TokenType.MINUS:\n return self.builder.fsub(left_symbol, right_symbol, \"subtmp\")\n elif node.operator.type == TokenType.MUL:\n return self.builder.fmul(left_symbol, right_symbol, \"multmp\")\n elif node.operator.type == TokenType.INTEGER_DIV:\n return self.builder.fdiv(left_symbol, right_symbol, \"udivtmp\")\n elif node.operator.type == TokenType.FLOAT_DIV:\n return self.builder.fdiv(left_symbol, right_symbol, \"fdivtmp\")", "def product(self):\n return self.left[:self.i] + self.right[self.i:], self.right[:self.i] + self.left[self.i:]", "def reduce(self, binary_operator):\n return functools.reduce(binary_operator, self)", "def _rotate_right(self):\n pivot = self.left\n if pivot is None:\n return\n self.val, pivot.val = pivot.val, self.val\n self.left = pivot.left\n if self.left is not None:\n self.left.parent = self\n pivot.left = pivot.right\n pivot.right = self.right\n if pivot.right is not None:\n pivot.right.parent = pivot\n self.right = pivot", "def _flatten(self, op):\n if isinstance(self, op):\n for i, arg in enumerate(self._args):\n if isinstance(arg, self.DUAL):\n others = self._args[:i] + self._args[i+1:]\n expr = op.DUAL(*[op(a, *others) for a in arg.args])\n if isinstance(expr, OrAnd):\n return expr._flatten(op)\n else:\n return expr\n else:\n return self\n else:\n nested, others = list(), list()\n for arg in self._args:\n if arg.depth > 1:\n nested.append(arg)\n else:\n others.append(arg)\n args = [arg._flatten(op) for arg in nested] + others\n return op.DUAL(*args)", "def fn(node):\n if not node: return \n node.left, node.right = fn(node.right), fn(node.left)\n return node", "def __rshift__(self, other):\n return Implies(self, other)", "def pull_out_quantifications_from_left_across_binary_operator(formula:\r\n Formula) -> \\\r\n Tuple[Formula, Proof]:\r\n assert has_uniquely_named_variables(formula)\r\n assert is_binary(formula.root)\r\n # Task 11.7.1\r\n\r\n\r\n prover = Prover(Prover.AXIOMS.union(ADDITIONAL_QUANTIFICATION_AXIOMS))\r\n\r\n # Basic Case - No quantifier to change n = 0 and no n = 1\r\n if not is_quantifier(formula.first.root):\r\n ccl = equivalence_of(formula, formula)\r\n prover.add_tautology(ccl)\r\n return formula, prover.qed()\r\n\r\n\r\n # Without the predicate\r\n form = Formula(formula.root, formula.first.predicate, formula.second)\r\n pred, proof = pull_out_quantifications_from_left_across_binary_operator(form)\r\n\r\n my_quantifier = formula.first.root\r\n\r\n # Define (or change) the quantifier and define the axioms depending on the binary operator\r\n if formula.root == \"->\":\r\n if my_quantifier == \"A\":\r\n my_quantifier = \"E\"\r\n axiom_scd = 10\r\n else: # \"E\"\r\n my_quantifier = \"A\"\r\n axiom_scd = 11\r\n\r\n elif formula.root == \"&\":\r\n axiom_scd = 2 if my_quantifier == \"A\" else 3\r\n\r\n else: # \"|\" or\r\n axiom_scd = 6 if my_quantifier == \"A\" else 7\r\n\r\n\r\n\r\n # proof for changing quantifier\r\n # because add_proof() is my friend\r\n step1 = prover.add_proof(proof.conclusion, proof)\r\n\r\n form2 = Formula(\"->\", proof.conclusion, equivalence_of(Formula(my_quantifier, formula.first.variable, form),\r\n Formula(my_quantifier, formula.first.variable, pred)))\r\n my_map2 = {'R': str(form.substitute({formula.first.variable: Term(\"_\")})),\r\n 'Q': str(pred.substitute({formula.first.variable: Term(\"_\")})), \"x\": formula.first.variable, \"y\": formula.first.variable}\r\n\r\n step2 = prover.add_instantiated_assumption(form2,\r\n ADDITIONAL_QUANTIFICATION_AXIOMS[14 if my_quantifier==\"A\" else 15], my_map2)\r\n\r\n step3 = prover.add_mp(equivalence_of(Formula(my_quantifier, formula.first.variable, form),\r\n Formula(my_quantifier, formula.first.variable, pred)), step1, step2)\r\n\r\n\r\n my_map4 = {'R': str(formula.first.predicate.substitute({formula.first.variable: Term(\"_\")})), \"x\": formula.first.variable, \"Q\" : str(formula.second)}\r\n form4 = equivalence_of(formula, Formula(my_quantifier, formula.first.variable, form))\r\n step4 = prover.add_instantiated_assumption(form4,\r\n ADDITIONAL_QUANTIFICATION_AXIOMS[axiom_scd], my_map4)\r\n\r\n prover.add_tautological_implication(equivalence_of(formula, Formula(my_quantifier, formula.first.variable, pred)), [step3, step4])\r\n\r\n return Formula(my_quantifier, formula.first.variable, pred), prover.qed()", "def __radd__(self, left_arr):\n concat_arr = left_arr.copy() # Create new instance to return\n concat_arr.extend(self)\n return concat_arr", "def substitute_type_and_operands(self, old_inst, new_inst):\n if self.type_id == old_inst.result_id:\n self.type_id = new_inst.result_id\n for idx in range(len(self.operands)):\n if self.operands[idx] == old_inst.result_id:\n self.operands[idx] = new_inst.result_id", "def __or__(self, other):\n return self.__add__(other)", "def __rmul__(self, other):\n return self.runtime.mul(self, other)", "def logical_op(self, other):\n if isinstance(other, plist):\n if len(self) == len(other):\n try:\n return plist([op(x, o) for x, o in zip(self, other)])\n except Exception:\n pass\n self_flat = self.ungroup(-1)\n other_flat = other.ungroup(-1)\n ids = op(set([id(x) for x in self_flat]),\n set([id(x) for x in other_flat]))\n if op is operator.__and__ or op is operator.__iand__:\n return plist([x for x in self_flat if id(x) in ids]) # Don't pass root -- we are uprooting\n else:\n return plist(\n [ids.remove(id(x)) or x for x in self_flat if id(x) in ids] +\n [ids.remove(id(x)) or x for x in other_flat if id(x) in ids]\n ) # Don't pass root -- we are uprooting\n else:\n return plist([op(x, other) for x in self], root=self.__root__)", "def distribute_and_over_or(s):\n if s.op == '|':\n s = associate('|', s.args)\n if s.op != '|':\n return distribute_and_over_or(s)\n if len(s.args) == 0:\n return FALSE\n if len(s.args) == 1:\n return distribute_and_over_or(s.args[0])\n conj = find_if((lambda d: d.op == '&'), s.args)\n if not conj:\n return s\n others = [a for a in s.args if a is not conj]\n rest = associate('|', others)\n return associate('&', [distribute_and_over_or(c|rest)\n for c in conj.args])\n elif s.op == '&':\n return associate('&', map(distribute_and_over_or, s.args))\n else:\n return s", "def __radd__(self,that):\n return self.__opExpand2(that,np.add)", "def __invert__(self) -> Operators:\n return self.operate(inv)", "def __rmul__(self, other):\n return self * other", "def __rmul__(self, other):\n return self * other", "def __rsub__(self, left):\n return left - self.value()", "def flatten(self):\n to_remove = []\n for elem in self.operands:\n # if element belong to same class (nested And's, Or's)\n if isinstance(elem, self.__class__):\n # recursive flattening first\n elem.flatten()\n # remove from current list\n to_remove.append(elem)\n\n # add new elements\n for elem in to_remove:\n self.operands.remove(elem)\n self.operands.extend(elem.operands)", "def left_shift(lhs, rhs):\n return _make.left_shift(lhs, rhs)", "def __init__(self, operation, left, right):\n self.operation = operation\n self.left = left\n self.right = right", "def eliminate_immediate_recursion(self):\n assert self.is_canonical\n # Eliminate immediate left recursion\n # Replace rules\n # A -> A alpha1 | A alpha2 | beta1 | beta2\n # with\n # A -> beta1 A' | beta2 A'\n # A' -> alpha1 A' | alpha2 A' | epsilon\n #\n # When A can produce epsilon directly:\n # A -> A alpha1 | A alpha2 | beta1 | beta2 | epsilon\n # with\n # A -> beta1 A' | beta2 A' | A'\n # A' -> alpha1 A' | alpha2 A' | epsilon\n preorder_names = self.preorder()\n for rule_name in preorder_names:\n rule = self.rules[rule_name]\n changed = False\n has_immediate_left_recursion = False\n for rhs in rule.as_container():\n first = rhs.as_container()[0]\n if first.is_symbol_name() and first.content is rule_name:\n has_immediate_left_recursion = True\n break\n if has_immediate_left_recursion:\n self_parts = [] # Becomes new right-hand-side for A\n rest_name = \"{}.rest\".format(rule_name)\n assert rest_name not in self.rules\n rest_parts = [] # Becomes new right-hand-side for A'\n for rhs in rule.as_container():\n phrase = rhs.as_container()\n first = phrase[0]\n rest = phrase[1:]\n if first.is_symbol_name() and first.content is rule_name:\n rest_parts.append(self.MakeSeq(rest + [self.MakeSymbolName(rest_name)]))\n else:\n # TODO: use list_without_empty to shorten this\n if len(phrase) > 0 and phrase[0].is_empty():\n # beta is epsilon\n assert len(phrase) == 1\n self_parts.append( self.MakeSymbolName(rest_name) )\n else:\n self_parts.append( self.MakeSeq([x for x in phrase] + [self.MakeSymbolName(rest_name)]) )\n rest_parts.append(self.MakeEmpty())\n self.rules[rule_name] = self.MakeChoice(self_parts)\n self.rules[rest_name] = self.MakeChoice(rest_parts)", "def make_looped(self) -> None:\n self.most_right.right_node = self.most_left\n self.most_left.left_node = self.most_right", "def __rmul__(self, other):\n\n return self * other", "def __rshift__(self, other):\r\n return NotImplemented", "def __rmul__(self, other):\n return self.__mul__(other)", "def __rmul__(self, other):\n return self.__mul__(other)", "def __rmul__(self, other):\n return self.__mul__(other)", "def __rrshift__(self, other):\r\n return NotImplemented", "def __rrshift__(self, other):\r\n return NotImplemented", "def get_new_quantifier_and_axioms(binary_op, old_quantifier, is_left):\n new_quantifier = old_quantifier\n if binary_op == '&':\n axioms = [2, 3] if is_left else [4, 5]\n\n elif binary_op == '|':\n axioms = [6, 7] if is_left else [8, 9]\n\n # ->\n else:\n if is_left:\n new_quantifier = 'A' if old_quantifier == 'E' else 'E'\n axioms = [10, 11]\n else:\n axioms = [12, 13]\n\n axioms = tuple(map(lambda axiom_num: ADDITIONAL_QUANTIFICATION_AXIOMS[axiom_num], axioms))\n return new_quantifier, axioms[0] if old_quantifier == 'A' else axioms[1]", "def reversals(series, left=False, right=False):\n series = iter(series)\n\n x_last, x = next(series), next(series)\n d_last = (x - x_last)\n\n if left:\n yield x_last\n for x_next in series:\n if x_next == x:\n continue\n d_next = x_next - x\n if d_last * d_next < 0:\n yield x\n x_last, x = x, x_next\n d_last = d_next\n if right:\n yield x_next", "def __ror__(self, other: t.Any) -> InspectableSet[_C]:\n return self._op_copy('__ror__', other)", "def __left(self,i):\n return 1+(i<<1)", "def __rrshift__(self, other):\n return Implies(other, self)", "def controlRight(*args):", "def controlRight(*args):", "def controlRight(*args):", "def controlRight(*args):", "def controlRight(*args):", "def controlRight(*args):", "def controlRight(*args):", "def controlRight(*args):", "def controlRight(*args):", "def controlRight(*args):", "def controlRight(*args):", "def right (x):\n\n return Sinary(side(x.v,0))", "def and__inplace(a,b):" ]
[ "0.68653905", "0.6566106", "0.6530686", "0.6372209", "0.6346159", "0.62722206", "0.6268917", "0.6268676", "0.6260108", "0.6258728", "0.6246955", "0.6224608", "0.62071043", "0.61998665", "0.6179604", "0.6081393", "0.6077263", "0.6012032", "0.5989172", "0.5984579", "0.59795463", "0.592946", "0.5928578", "0.5902864", "0.59005296", "0.5877099", "0.5871691", "0.58697677", "0.5861441", "0.5858072", "0.5850527", "0.5814853", "0.58118224", "0.5810698", "0.5809762", "0.5803975", "0.58036476", "0.575253", "0.57380384", "0.5718986", "0.5711518", "0.57011837", "0.5674583", "0.56731987", "0.5666539", "0.5664246", "0.56546825", "0.56515676", "0.56301546", "0.5627709", "0.5625413", "0.5602889", "0.5600796", "0.5596821", "0.5592916", "0.55852354", "0.5583336", "0.5579253", "0.556273", "0.5546919", "0.5527028", "0.55169386", "0.5515338", "0.55144775", "0.551217", "0.5511333", "0.550636", "0.5505682", "0.5505682", "0.5496459", "0.54866475", "0.54847676", "0.5482464", "0.548091", "0.5478244", "0.5471705", "0.5457987", "0.5455487", "0.5455487", "0.5455487", "0.54502606", "0.54502606", "0.5448797", "0.54474413", "0.54350036", "0.5426618", "0.5426447", "0.54212576", "0.54212576", "0.54212576", "0.54212576", "0.54212576", "0.54212576", "0.54212576", "0.54212576", "0.54212576", "0.54212576", "0.54212576", "0.54183185", "0.54123425" ]
0.6620655
1
returns the l2 penalty on (trainable) network parameters combined as sum
def get_l2_penalty(nnet, include_bias=False, pow=2): l2_penalty = 0 # do not include OC-SVM layer in regularization if Cfg.ocsvm_loss: if include_bias: for layer in nnet.trainable_layers: if not layer.issvm: if layer.b is not None: l2_penalty = (l2_penalty + T.sum(abs(layer.W) ** pow) + T.sum(abs(layer.b) ** pow)) else: l2_penalty = l2_penalty + T.sum(abs(layer.W) ** pow) else: for layer in nnet.trainable_layers: if not layer.issvm: l2_penalty = l2_penalty + T.sum(abs(layer.W) ** pow) else: if include_bias: for layer in nnet.trainable_layers: if layer.b is not None: l2_penalty = (l2_penalty + T.sum(abs(layer.W) ** pow) + T.sum(abs(layer.b) ** pow)) else: l2_penalty = l2_penalty + T.sum(abs(layer.W) ** pow) else: for layer in nnet.trainable_layers: l2_penalty = l2_penalty + T.sum(abs(layer.W) ** pow) return T.cast(l2_penalty, dtype='floatX')
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _get_l2_reg(self) -> torch.Tensor:\n loss = 0\n for param in self.model.parameters():\n loss += (param ** 2).sum()\n return loss", "def l2_regularization_penalty(self):\n return self.l2 * (np.linalg.norm(self.weights)**2)", "def l2_training_penalty(batched_out: base.Output):\n if isinstance(batched_out, base.OutputWithPrior):\n return 0.5 * jnp.mean(jnp.square(batched_out.train))\n else:\n logging.warning('L2 weight penalty only works for OutputWithPrior.')\n return 0.", "def l2_reg_cost(cost, lambtha, weights, L, m):\n sumWeights = 0\n for i in range(1, L + 1):\n sumWeights += np.linalg.norm(weights['W' + str(i)])\n return cost + sumWeights * lambtha / (2 * m)", "def l2_reg_cost(cost, lambtha, weights, L, m):\n enorm = 0\n for i in range(1, L + 1):\n layer = 'W{}'.format(i)\n enorm += np.linalg.norm(weights[layer])\n return cost + (lambtha / (2 * m)) * enorm", "def l2_reg_cost(cost, lambtha, weights, L, m):\n w_norm = 0\n for i in range(1, L + 1):\n w_norm += np.linalg.norm(weights['W' + str(i)])\n L2 = cost + (lambtha / (2 * m) * w_norm)\n return L2", "def get_cost(self, y_enc, output, w1, w2):\n cost = - np.sum(y_enc*np.log(output))\n # add the L2 regularization by taking the L2-norm of the weights and multiplying it with our constant.\n l2_term = (self.l2/2.0) * (np.sum(np.square(w1[:, 1:])) + np.sum(np.square(w2[:, 1:])))\n cost = cost + l2_term\n return cost/y_enc.shape[1]", "def l2_reg_cost(cost, lambtha, weights, L, m):\n f = 0\n while (L):\n index = \"W{}\".format(L)\n weight = weights[index]\n f += np.linalg.norm(weight)\n L -= 1\n return cost + lambtha / (2 * m) * f", "def l2_reg_cost(cost, lambtha, weights, L, m):\n Frobenius = 0\n for k, v in weights.items():\n if k[0] == \"W\":\n Frobenius += np.linalg.norm(v)\n return cost + (lambtha/(2*m)) * Frobenius", "def l2_regularization(W, reg_strength):\n # TODO: Copy from the previous assignment\n loss = reg_strength*np.sum(W*W)\n grad = 2*reg_strength*W\n return loss, grad", "def regularization_loss(params: hk.Params) -> jnp.ndarray:\r\n\r\n # L1 Loss\r\n sum_in_layer = lambda p: jnp.sum(jnp.abs(p))\r\n sum_p_layers = [sum_in_layer(p) for p in jax.tree_leaves(params)]\r\n l1_loss = sum(sum_p_layers)\r\n\r\n # L2 Loss\r\n l2_loss = 0.5 * sum(jnp.sum(jnp.square(p)) for p in jax.tree_leaves(params))\r\n\r\n return l2_coef * l2_loss + l1_coef * l1_loss", "def get_prod_penalty(nnet):\n\n assert Cfg.ocsvm_loss is True\n\n penalty = 0\n layers = nnet.trainable_layers\n num_layers = len(layers) - 1 # do not regularize parameters of oc-svm layer\n assert num_layers > 0\n\n W_norm_prod = 1.0\n\n if layers[num_layers-1].b is not None:\n penalty += T.sum(layers[num_layers-1].b ** 2)\n\n for i in range(num_layers-1):\n W_norm_prod *= T.sum(layers[num_layers-1-i].W ** 2)\n if layers[num_layers-2-i].b is not None:\n penalty += W_norm_prod * T.sum(layers[num_layers-2-i].b ** 2)\n\n W_norm_prod *= T.sum(layers[0].W ** 2)\n\n penalty += W_norm_prod\n penalty *= T.sum(nnet.ocsvm_layer.W ** 2)\n\n return penalty", "def l2(weights):\n\treturn np.sqrt(np.sum(weights * weights))", "def l2_reg_cost(cost):\n return cost + tf.losses.get_regularization_losses()", "def l2_reg_cost(cost):\n return cost + tf.losses.get_regularization_losses()", "def l2_reg_cost(cost):\n return cost + tf.losses.get_regularization_losses()", "def l2_reg_cost(cost):\n\n return cost + tf.losses.get_regularization_losses()", "def get_spectral_penalty(nnet, include_bias=False):\n\n penalty = 0\n\n for layer in nnet.trainable_layers:\n if not layer.issvm:\n eigenvalues, eigvec = T.nlinalg.eigh(T.dot(layer.W, layer.W.T))\n eig_max = T.max(eigenvalues)\n penalty = penalty + eig_max\n\n if include_bias:\n for layer in nnet.trainable_layers:\n if (not layer.issvm) and (layer.b is not None):\n penalty = penalty + T.sum(abs(layer.b) ** 2)\n\n return T.cast(penalty, dtype='floatX')", "def l2_regularization(cg, rate=0.01):\n W = VariableFilter(roles=[WEIGHT])(cg.variables)\n L2_cost = rate * l2_norm(W)\n\n return L2_cost", "def l2(parameter, bias=None, reg=0.01, lr=0.1):\n \n if bias is not None:\n w_and_b = torch.cat((parameter, bias.unfold(0,1,1)),1)\n else:\n w_and_b = parameter\n L2 = reg # lambda: regularization strength\n Norm = (lr*L2/w_and_b.norm(2))\n if Norm.is_cuda:\n ones_w = torch.ones(parameter.size(), device=torch.device(\"cuda\"))\n else:\n ones_w = torch.ones(parameter.size(), device=torch.device(\"cpu\"))\n l2T = 1.0 - torch.min(ones_w, Norm)\n update = (parameter*l2T) \n parameter.data = update\n # Update bias\n if bias is not None:\n if Norm.is_cuda:\n ones_b = torch.ones(bias.size(), device=torch.device(\"cuda\"))\n else:\n ones_b = torch.ones(bias.size(), device=torch.device(\"cpu\"))\n l2T = 1.0 - torch.min(ones_b, bias)\n update_b = (bias*l2T)\n bias.data = update_b", "def compute_cost(AL, Y, parameters ,lambd):\n L = len(parameters) // 2\n m = Y.shape[1]\n cost = -1 / m * np.sum(np.nan_to_num(Y * np.log(AL) + (1-Y) * np.log(1-AL)))\n cost+= 0.5*(lambd/m)*sum(np.linalg.norm(parameters['W' + str(i)])**2 for i in range(1,L))\n return cost", "def l2_loss(params):\n \"\"\" It is a vec for each branch\"\"\"\n loss_branches_vec = []\n # TODO This is hardcoded but all our cases rigth now uses four branches\n for i in range(len(params['branches']) -1):\n loss_branches_vec.append(((params['branches'][i] - params['targets']) **2\n * params['controls_mask'][i])\n * params['branch_weights'][i])\n \"\"\" The last branch is a speed branch\"\"\"\n # TODO: Activate or deactivate speed branch loss\n loss_branches_vec.append((params['branches'][-1] - params['inputs']) ** 2\n * params['branch_weights'][-1])\n return loss_branches_vec, {}", "def L2Learning(**kwargs):\n past_task_params = kwargs['past_task_params']\n dataloader = kwargs['dataloader']\n epochs = kwargs['epochs']\n optim = kwargs['optim']\n crit = kwargs['crit']\n net = kwargs['net']\n ld = kwargs['ld']\n\n for epoch in range(epochs):\n running_loss = 0.0\n for x, y in dataloader:\n if torch.cuda.is_available():\n x = x.cuda()\n y = y.cuda()\n\n optim.zero_grad()\n outputs = net(x)\n loss = crit(outputs, y)\n\n reg = 0.0\n for past_param in past_task_params:\n for i, param in enumerate(net.parameters()):\n penalty = (past_param[i] - param) ** 2\n reg += penalty.sum()\n loss += reg * (ld / 2)\n\n loss.backward()\n optim.step()\n running_loss += loss.item()\n\n if epoch % 10 == 9:\n print(\"[Epoch %d/%d] Loss: %.3f\"%(epoch+1, epochs, running_loss))\n\n ### Save parameters to use next task learning\n tensor_param = []\n for params in net.parameters():\n tensor_param.append(params.detach().clone())\n '''\n tensor_param = torch.stack(tensor_param)\n\n if past_task_params.nelement() > 0:\n past_task_params = torch.cat((past_task_params, tensor_param.unsqueeze(0)))\n else:\n past_task_params = tensor_param.unsqueeze(0)\n '''\n past_task_params.append(tensor_param)", "def _construct_reg_costs(self):\n param_reg_cost = sum([T.sum(p**2.0) for p in self.joint_params])\n return param_reg_cost", "def nn_cost_function(nn_params, input_layer_size, hidden_layer_size, num_labels, X, y, l):\n Theta_1 = np.reshape(nn_params[0:(hidden_layer_size * (input_layer_size + 1)), ],\n (hidden_layer_size, input_layer_size + 1))\n Theta_2 = np.reshape(nn_params[(hidden_layer_size * (input_layer_size + 1)):, ],\n (num_labels, hidden_layer_size + 1))\n\n m, n = X.shape\n X = np.hstack((np.ones((m, 1)), X))\n\n Z_2 = X.dot(Theta_1.T)\n A_2 = sigmoid(Z_2)\n A_2 = np.hstack((np.ones((m, 1)), A_2))\n\n Z_3 = A_2.dot(Theta_2.T)\n A_3 = sigmoid(Z_3)\n\n Y = np.zeros((m, num_labels))\n for i in range(m):\n Y[i, y[i] - 1] = 1\n\n j = 0.0\n for i in range(m):\n j += np.log(A_3[i, ]).dot(-Y[i, ].T) - np.log(1 - A_3[i, ]).dot(1 - Y[i, ].T)\n j /= m\n\n Theta_1_square = np.square(Theta_1[:, 1:])\n Theta_2_square = np.square(Theta_2[:, 1:])\n reg = 1.0 * l / (2 * m) * (np.sum(Theta_1_square) + np.sum(Theta_2_square))\n j += reg\n\n d_3 = A_3 - Y\n D_2 = d_3.T.dot(A_2)\n\n Z_2 = np.hstack((np.ones((m, 1)), Z_2))\n d_2 = d_3.dot(Theta_2) * sigmoid_gradient(Z_2)\n d_2 = d_2[:, 1:]\n D_1 = d_2.T.dot(X)\n\n Theta_1_grad = 1.0 * D_1 / m\n Theta_1_grad[:, 1:] = Theta_1_grad[:, 1:] + 1.0 * l / m * Theta_1[:, 1:]\n\n Theta_2_grad = 1.0 * D_2 / m\n Theta_2_grad[:, 1:] = Theta_2_grad[:, 1:] + 1.0 * l / m * Theta_2[:, 1:]\n\n grad = np.hstack((Theta_1_grad.ravel(), Theta_2_grad.ravel()))\n\n return j, grad", "def penalty_objective(Z_0, Z_1, Z_2, psi, theta):\n return theta * sum(map(l1_od_norm, Z_0)) + (1 - theta) * sum(map(psi, Z_2 - Z_1))", "def cost(self, x, y):\n return self.cross_entropy_error(x,y) + self.l2_regularization_penalty()", "def mpt11_w34_2_v2_lr(optimizer, args, **kwargs):\n\n def _lr_adjuster(epoch, iteration):\n if epoch >= 50:\n lr = args.lr * 0.1\n\n if epoch >= 62:\n lr = lr * 0.1\n\n if epoch >= 70:\n lr = lr * 0.01\n else:\n lr = args.lr\n\n assign_learning_rate(optimizer, lr)\n\n return lr\n\n return _lr_adjuster", "def mpt11_w18_2_v2_lr(optimizer, args, **kwargs):\n\n def _lr_adjuster(epoch, iteration):\n if epoch >= 35:\n lr = args.lr * 0.1\n\n if epoch >= 50:\n lr = lr * 0.1\n\n if epoch >= 60:\n lr = lr * 0.01\n else:\n lr = args.lr\n\n assign_learning_rate(optimizer, lr)\n\n return lr\n\n return _lr_adjuster", "def loss(self, X, y=None):\n scores = None\n ############################################################################\n # TODO: Implement the forward pass for the two-layer net, computing the #\n # class scores for X and storing them in the scores variable. #\n ############################################################################\n hid1, hid1cache = affine_relu_forward(X, self.params['W1'], self.params['b1'])\n scores, scorecache = affine_forward(hid1, self.params['W2'], self.params['b2'])\n ############################################################################\n # END OF YOUR CODE #\n ############################################################################\n\n # If y is None then we are in test mode so just return scores\n if y is None:\n return scores\n\n loss, grads = 0, {}\n ############################################################################\n # TODO: Implement the backward pass for the two-layer net. Store the loss #\n # in the loss variable and gradients in the grads dictionary. Compute data #\n # loss using softmax, and make sure that grads[k] holds the gradients for #\n # self.params[k]. Don't forget to add L2 regularization! #\n # #\n # NOTE: To ensure that your implementation matches ours and you pass the #\n # automated tests, make sure that your L2 regularization includes a factor #\n # of 0.5 to simplify the expression for the gradient. #\n ############################################################################\n loss, dscores = softmax_loss(scores, y)\n loss += 0.5 * self.reg *( np.sum(self.params['W1']**2) + np.sum(self.params['W2']**2) )\n\n dhid1, grads['W2'], grads['b2'] = affine_backward(dscores, scorecache)\n dx, grads['W1'], grads['b1'] = affine_relu_backward(dhid1, hid1cache)\n\n grads['W1'] += self.reg * self.params['W1']\n grads['W2'] += self.reg * self.params['W2']\n ############################################################################\n # END OF YOUR CODE #\n ############################################################################\n\n return loss, grads", "def get_learning_rate():\n return 0.00001", "def penalty_calc(self):\n self.p_budget = (self.tx_oma_min - self.rx_unstressed_sensitivity - self.fiber_conn_loss)*self.l_1\n\n # fiber attenuation,\n self.p_atten = self.alpha*self.length # column B\n\n # calculate bandwidth for RIN test (exclude transmitter)\n rin_inverse_bw = np.sqrt(np.square(1.0/self.bw_cd) + np.square(1.0/self.bw_md) + (0.477/(self.rx_bw**2))*self.l_1)\n rin_bw = 1.0 / rin_inverse_bw\n\n # see FC-MSQS-2 equation B.47 in Annex B.4 for the following k_rin = math.sqrt(2.0/math.pi)*erfinv(0.8)\n k_rin = 0.7\n\n # v_rin,\n self.v_rin = (k_rin*1E6*(self.rin_test_isi**2)*rin_bw*\n math.pow(10.0,0.1*self.rin)) # column AK\n\n # Prin,\n print('v_rin: ', self.v_rin)\n print('Q: ',self.Q)\n print('isi_dj_refl_closed :', self.isi_dj_refl_closed)\n self.p_rin = -10.0*np.log10(np.sqrt(1.0-np.multiply(self.v_rin, np.square(self.Q/self.isi_dj_refl_closed)))) # column R\n print(\"P_rin : \", self.p_rin)\n self.beta = (3.14159E-6*self.speedup*self.br_nominal *self.delta_lambda*self.d1*self.length) # column O\n self.sigma_mpn = (self.k_mpn/math.sqrt(2.0)*(self.l_1 -np.exp(-np.square(self.beta)))) # column P\n self.p_mpn = (-10.0*np.log10(np.sqrt(self.l_1 - (self.Q**2)*np.square(self.sigma_mpn)))) # column Q\n self.p_blw = (-10.0*math.log10(math.sqrt(1.0- ((self.Q*self.sigma_blw)/ self.isi_tp4_rx)**2))*self.l_1) # cell T13\n self.p_reflection = -10.0*np.log10(self.isi_reflection) # column N\n self.v_mn = (((1.0-math.pow(10.0,-0.2*self.pmn))/ (self.Q)**2)*self.l_1) # cell AG7\n print(\"isi_center : \", self.isi_center)\n\n self.p_isi_center = -10.0*np.log10(self.isi_center) # column J\n\n self.p_isi_corners = (-10.0*np.log10(self.isi_corners) - self.p_isi_center) # column K\n self.p_isi_dj_center = (-10.0*np.log10(self.isi_dj_refl_closed) - self.p_isi_center) # column L\n self.p_isi_dj_corners = (-10.0*np.log10(self.isi_dj_corners) -self.p_isi_center -self.p_isi_corners) # column M\n\n\n # calculate the \"cross\" penalty contribution, column S\n arg1 = ((self.sigma_blw**2 + self.v_rin)/ np.square(self.isi_dj_refl_closed))\n arg2 = self.l_1 - (self.Q**2)*(arg1 + self.v_mn + np.square(self.sigma_mpn))\n arg3 = (-10.0*np.log10(np.multiply(self.isi_dj_refl_closed, np.sqrt(arg2))))\n self.p_cross_center = ( # column S\n arg3\n - self.p_blw # cell T13\n - self.p_isi_center # column J\n - self.p_isi_dj_center # column L\n - self.p_mpn # column Q\n - self.p_reflection # column N\n - self.p_rin # column R\n - self.pmn*self.l_1) # cell G13\n print('p_isi_center: ', self.p_isi_center)\n\n # calculate the total power budget evaluated at the center of the eye\n self.p_total_center = ( # column T\n self.p_isi_center # column J\n + self.p_isi_dj_center # column L\n + self.p_atten # column B\n + self.p_mpn # column Q\n + self.p_reflection # column N\n + self.p_rin # column R\n + self.p_cross_center # column S\n + self.pmn*self.l_1) # cell G13\n # calculate the total power budget evaluated at the corner of the eye\n self.p_total_corners = (\n self.p_isi_center # column J\n + self.p_isi_corners # column K\n + self.p_atten # column B\n + self.p_mpn # column Q\n + self.p_reflection # column N\n + self.p_rin # column R\n + self.p_cross_center # column S\n + self.pmn*self.l_1 # cell G13\n + self.p_isi_dj_corners)# column M\n\n # receiver stressed sensitivity\n self.margin = ( self.p_budget\n - self.p_total_center) # column W\n\n self.rx_stressed_sensitivity = (\n self.tx_oma_min*self.l_1\n - self.chil\n - self.p_mpn\n - self.p_reflection\n - self.p_rin\n - 0.5*self.p_cross_center\n - self.pmn*self.l_1\n - self.margin[self.lnum//2]*self.l_1)\n\n\n # end of GbE10.penalty_calc\n #======================================================================+", "def compute_cost(A2, Y, params):\n m = Y.shape[1]\n \n logprobs1 = -np.dot(Y, np.log(A2).T)\n logprobs2 = -np.dot(1-Y, np.log(1-A2).T)\n cost = 1/m * (logprobs1 + logprobs2)\n \n cost = np.asscalar(cost)\n return cost", "def penalty(self):\n return 0", "def combined_costs(matrix_MSLL_IO):\r\n return", "def loss_fn(model):\n with flax.nn.stateful(state) as new_state:\n with flax.nn.stochastic(prng_key):\n logits = model(batch['image'])\n loss = cross_entropy_loss(logits, batch['label'])\n # TODO(britefury): check if applying L2 regularization to weights but\n # *not* biases improves results\n weight_penalty_params = jax.tree_leaves(model.params)\n weight_l2 = sum([jnp.sum(x ** 2)\n for x in weight_penalty_params\n if x.ndim > 1])\n weight_penalty = l2_reg * 0.5 * weight_l2\n loss = loss + weight_penalty\n return loss, (new_state, logits)", "def adagrad_l1_update(loss, w, l1, learning_rate=1.0):\n g = theano.grad(loss, w) # (tef_n, )\n updates = OrderedDict()\n\n # for each gradient-weights vector pair compile the following\n\n # empty initializations, later of size (tef_n, )\n # maximum penalty accumulator\n u = theano.shared(np.zeros(1, dtype=theano.config.floatX), broadcastable=w.broadcastable)\n\n # actual accumulated penalties\n q = theano.shared(np.zeros(1, dtype=theano.config.floatX), broadcastable=w.broadcastable)\n\n # squared gradients accumulator\n gg = theano.shared(np.zeros(1, dtype=theano.config.floatX), broadcastable=w.broadcastable)\n\n # adagrad learning rate upgrade\n gradient_squared_accu_new = gg + g ** 2 # (tef_n, )\n adagrad_learning_rate = learning_rate / T.sqrt(gradient_squared_accu_new)\n\n # total possible accumulated l1 penalty\n u_new = u + l1 * adagrad_learning_rate\n\n # update rule: w_k+1/2,i = w_k,i - adagrad_lr_k * g_i\n w_tmp = w - adagrad_learning_rate * g\n\n # apply penalties\n if T.gt(l1, 0):\n # if w_k+1/2,i > 0: w_k+1,i = max(0, w_k+1/2,i - (u_k,i + q_k-1,i)) else if: ...\n w_update = T.switch(T.gt(w_tmp, 0.),\n T.maximum(w_tmp - (u_new + q), 0.), # w_tmp > 0\n T.switch(T.lt(w_tmp, 0.0),\n T.minimum(w_tmp + (u_new - q), 0.), # w_tmp < 0\n 0.) # w_tmp == 0\n )\n else:\n w_update = w_tmp\n\n # return updates (key: shared variable, value: symbolic variable)\n updates[w] = w_update\n updates[gg] = gradient_squared_accu_new\n updates[u] = u_new\n # actually accumulated penalty\n updates[q] = q + w_update - w_tmp\n\n return updates, u, q, gg", "def penalty(self):\n assert len(self.weights) == len(self.means), \"Dimensions!\"\n out = np.exp(self.data['riskfree'] * self.data['maturity'])\n for weight, mean in zip(self.weights, self.means):\n out -= weight * np.exp(mean * self.data['maturity'])\n return (out**2).mean()**.5", "def l2_regularization(variables, factor=1e-4, name='l2_regularization', collections=['regularization']):\n l2 = tf.add_n([tf.sqrt(2.*tf.nn.l2_loss(var)) for var in variables], name=name) if variables else tf.constant(0.)\n loss = factor * l2\n scalar_summary(loss, name, collections)\n return loss", "def optimize_ppo2(trial):\n\treturn {\n\t\t'n_steps': int(trial.suggest_loguniform('n_steps', 64, 2048)),\n\t\t'gamma': trial.suggest_loguniform('gamma', 0.9, 0.9999),\n\t\t'learning_rate': trial.suggest_loguniform('learning_rate', 1e-5, 1e-1),\n\t\t'ent_coef': trial.suggest_loguniform('ent_coef', 1e-8, 1e-1),\n\t\t'cliprange': trial.suggest_uniform('cliprange', 0.1, 0.4),\n\t\t'noptepochs': int(trial.suggest_loguniform('noptepochs', 1, 48)),\n\t\t'lam': trial.suggest_uniform('lam', 0.8, 1.)\n\t}", "def _add_weight_decay(net, l2_value, skip_list=()):\n decay, no_decay = [], []\n for name, param in net.named_parameters():\n if not param.requires_grad:\n continue # frozen weights\n if len(param.shape) == 1 or name.endswith(\".bias\") or name in skip_list:\n no_decay.append(param)\n else:\n decay.append(param)\n return [{'params': decay, 'weight_decay': l2_value}, {'params': no_decay, 'weight_decay': 0.}, ]", "def weight_l2_norm():\n cumulated_l2_norm = tf.constant(0., dtype=tf.float32)\n for trainable_variable in tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES):\n name = trainable_variable.name.split('/')[-1]\n if name.startswith('weights'):\n cumulated_l2_norm += tf.nn.l2_loss(trainable_variable)\n return cumulated_l2_norm", "def get_lr_cost(self):\n\n\t\tlabels = self.get_symbolic_expected_rewards()\n\n\t\treturn -theano.tensor.mean(\n\t\t\ttheano.tensor.log(labels)[\n\t\t\t\ttheano.tensor.arange(self.symbolic_output.shape[0]),\n\t\t\t\tself.symbolic_output])", "def objective(self,w):\n l = 0\n for i in range(len(self.x)):\n # Each example contributes log(sigma(y_i * x_i . w))\n l -= log(sigmoid(self.y[i] * np.dot(w, self.x[i,:])))\n # regularisation 1/2 * alpha * ||w||^2\n l += 0.5 * self.alpha * np.dot(w,w)\n return l", "def poly_adjust_learning_rate(optimizer, lr0, step, n_step):\n lr = lr0 * (1.0 - step*1.0/n_step)\n for param_group in optimizer.param_groups:\n param_group['lr'] = lr\n return lr", "def EmbeddingL2RegularizationUpdate(embedding_variable, net_input, learn_rate, l2_reg_val):\n # TODO(student): Change this to something useful. Currently, this is a no-op.\n # net_input = net_input / tf.norm(net_input)\n net_input = tf.nn.l2_normalize(net_input, axis=0)\n grad = l2_reg_val * tf.matmul(tf.transpose(net_input), tf.matmul(net_input, embedding_variable))\n embedding_variable_ = embedding_variable - learn_rate * grad\n\n ## local test #better to disable when learning\n batch_size, number_of_vocabulary_tokens = net_input.shape\n net_example = numpy.random.binomial(1, .1, (3, number_of_vocabulary_tokens))\n sigma_fnc = l2_reg_val * tf.nn.l2_loss(tf.matmul(net_input, embedding_variable))\n # assert tf.gradients(sigma_fnc, embedding_variable) == grad, \"wrong grad in L2\"\n sess = tf.Session()\n sess.run(tf.global_variables_initializer())\n tf_grad = sess.run(tf.gradients(sigma_fnc, embedding_variable)[0], feed_dict={net_input: net_example})\n my_grad = sess.run(grad, feed_dict={net_input: net_example})\n differ = numpy.linalg.norm(tf_grad - my_grad)\n differ = differ / numpy.linalg.norm(tf_grad)\n print('l2 grad differentage {}'.format(differ))\n print('l2 grad max difference {}'.format(numpy.max(tf_grad - my_grad)))\n\n return embedding_variable.assign(embedding_variable_)", "def mpt11_w18_2_lr(optimizer, args, **kwargs):\n\n def _lr_adjuster(epoch, iteration):\n if epoch >= 46:\n lr = args.lr * 0.1\n\n if epoch >= 65:\n lr = lr * 0.1\n\n if epoch >= 80:\n lr = lr * 0.01\n else:\n lr = args.lr\n\n assign_learning_rate(optimizer, lr)\n\n return lr\n\n return _lr_adjuster", "def compute_cost(AL, Y):\n pass", "def compute_loss_logreg_regl2(y, tx, w, lambda_):\n loss = compute_loss_logreg(y, tx, w)\n penal_loss = loss + lambda_ * w.dot(w)\n\n return penal_loss", "def kl_loss(self):\n return sum([p.kl_loss() for p in self.parameters])", "def get_sparsity_penalty(nnet, inputs, sparsity, mode=\"mean\",\n deterministic=False):\n\n assert mode in (\"mean\", \"l1\")\n\n rho = sparsity\n penalty = 0\n eps = 0.0001 # for numerical stability\n\n for layer in nnet.all_layers:\n if layer.isactivation:\n\n activation = lasagne.layers.get_output(layer, inputs=inputs,\n deterministic=deterministic)\n\n if mode == \"mean\":\n if layer.isrelu:\n avg_activation = T.mean(T.gt(activation,\n T.zeros_like(activation)),\n axis=0, dtype='floatX')\n if layer.issigmoid:\n avg_activation = T.mean(activation, axis=0, dtype='floatX')\n\n KL_div = T.sum((rho+eps) *\n (T.log(rho+eps) - T.log(avg_activation+eps)) +\n (1-rho+eps) *\n (T.log(1-rho+eps) - T.log(1-avg_activation+eps)),\n dtype='floatX')\n penalty = penalty + KL_div\n\n if mode == \"l1\":\n penalty = penalty + T.sum(abs(activation), dtype='floatX')\n\n return T.cast(penalty, dtype='floatX')", "def L2_func(x):\n return K.expand_dims(K.sqrt(K.sum(K.pow(x,2), axis=1)))", "def loss(params: hk.Params, batch, labels, xent_weight=self.weights, l1_coeff=self.l1_coef, l2_coeff=self.l2_coef) -> jnp.ndarray:\n logits = net.apply(params, batch)\n labels = jax.nn.one_hot(label, 2)\n\n # Note that in our problem, regularization should be after the AND-mask.\n sum_in_layer = lambda p: jnp.sum(p)\n sum_p_layers = [sum_in_layer(p) for p in jax.tree_leaves(params)]\n l1_loss = sum(sum_p_layers)\n l2_loss = 0.5 * sum(jnp.sum(jnp.square(p)) for p in jax.tree_leaves(params))\n softmax_xent = -jnp.sum(labels * jax.nn.log_softmax(logits) * xent_weight)\n softmax_xent /= labels.shape[0]\n\n return softmax_xent + l2_coeff * l2_loss + l1_coeff * l1_loss", "def learnign_rate_examples():\n #######\n bad_larning_rate = 0.1\n not_bad_learning_rate = 1e-4\n good_learning_rate = 1e-3\n #######\n return bad_larning_rate, not_bad_learning_rate, good_learning_rate", "def get_optimal_param(data_desc, ml_model_desc):\n if ml_model_desc == 'ANN': \n # return [<num_layers>, <momentum>, <learn rate>]\n if data_desc == 'young_students_ti_courses':\n return [100, 0.5, 0.001]\n elif data_desc == 'young_students_lic_courses':\n return [36, 0.9, 1.0]\n elif data_desc == 'young_students_comp_courses':\n return [36, 0.6, 0.001]\n elif data_desc == 'old_students':\n return [24, 0.5, 0.7]\n else:\n exit('can not get optimal parameters for the combination passed!')\n elif ml_model_desc == 'naive_bayes':\n if data_desc == 'young_students_ti_courses':\n return [GaussianNB()]\n elif data_desc == 'young_students_lic_courses':\n return [BernoulliNB()]\n elif data_desc == 'young_students_comp_courses':\n return [MultinomialNB()]\n elif data_desc == 'old_students':\n return [GaussianNB()]\n else:\n exit('can not get optimal parameters for the combination passed!')\n elif ml_model_desc == 'SVR': \n if data_desc == 'young_students_ti_courses':\n return ['linear', 1.0]\n elif data_desc == 'young_students_lic_courses':\n return ['linear', 1.0]\n elif data_desc == 'young_students_comp_courses':\n return ['rbf', 1.0]\n elif data_desc == 'old_students':\n return ['linear', 1.0]\n else:\n exit('can not get optimal parameters for the combination passed!')\n else: \n exit('can not get optimal parameters for the combination passed!')", "def lr_cost_function(theta: np.ndarray, X: np.ndarray, y: np.ndarray, l: float) -> float:\n\n # Initialize some useful values\n m = len(y) # number of training examples\n\n # You need to return the following variable correctly.\n J = 0\n\n # ====================== YOUR CODE HERE ======================\n # Instructions: Compute the cost of a particular choice of theta.\n # You should set J to the cost.\n\n # =============================================================\n return J", "def _svm_loss_penalty_dual(name):\n return hp.choice(name, [\n ('hinge', 'l2', True),\n ('squared_hinge', 'l2', True),\n ('squared_hinge', 'l1', False),\n ('squared_hinge', 'l2', False)\n ])", "def calculate_penalty(self):\n if AT.PENALTY not in self.attributes:\n return (0, 1)\n return self.attributes[AT.PENALTY].calculate(self)", "def _l2(self, y, x, y_minus_g, w_0):\n # Initialize weight vector to return\n w_1 = np.zeros(len(w_0))\n \n for j in range(len(x)):\n reg = float(w_0[j]) / (self._lambda*self._num_training)\n w_1[j] = w_0[j] + self._epsilon*(x[j]*y_minus_g - reg)\n \n return w_1", "def loss(self, x, y):\n (N,D) = x.shape\n k1 = np.matmul(x,np.transpose(self.w)) + self.b\n y1 = y.reshape((N,1))\n c2 = 0\n c1 = (np.log(1+np.exp(-1*y1*k1)))\n for i in range(N):\n c2 += c1[i][0]\n l = c2 / N + (0.5 * self.l2_reg * np.dot(self.w,np.transpose(self.w)))\n l1 = l[0][0]\n return l1\n\n\n #raise NotImplementedError", "def build_loss(self):\n import tensorflow as tf\n\n y_1d = [tf.reduce_sum(tf.multiply(self.variables[\"y\"][i], self.variables[\"y_action\"][i]), axis=1) for i in range(len(self.variables[\"y\"]))]\n loss = np.sum([tf.nn.l2_loss(y_1d[i] - self.variables[\"y_true\"]) for i in range(len(y_1d))])\n\n l1_reg = 0\n l2_reg = 0\n\n keys = sorted(self.variables.keys())\n keys = [key for key in keys if critere_keys(key) and \"W\" in key]\n for key in keys:\n l1_reg += tf.reduce_sum(tf.abs(self.variables[key]))\n l2_reg += tf.nn.l2_loss(self.variables[key])\n\n self.loss = loss + self.alpha_reg * l1_reg + self.beta_reg * l2_reg\n\n self.train_step = tf.train.RMSPropOptimizer(self.decay_learning_rate,\n decay=0.99, momentum=0., centered=True).minimize(self.loss, global_step=self.global_step)", "def reg_loss(model: nn.Module, regularizer: str, l1: float=0.01, l2: float=0.01):\n if regularizer == 'l1':\n l1_reg = l1 * sum(torch.abs(p).sum() for p in model.parameters())\n return l1_reg\n if regularizer == 'l2':\n l2_reg = l2 * sum(torch.square(p).sum() for p in model.parameters())\n return l2_reg\n if regularizer == 'l1_l2':\n l1_reg = l1 * sum(torch.abs(p).sum() for p in model.parameters())\n l2_reg = l2 * sum(torch.square(p).sum() for p in model.parameters())\n return l1_reg + l2_reg", "def compute_cost(zn, y, reg, params, n_layers):\n\n logits = tf.transpose(zn)\n labels = tf.transpose(y)\n\n regularization = 0.0\n for i in range(n_layers):\n wn = 'W{}'.format(i)\n regularization += tf.nn.l2_loss(params[wn])\n\n cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=logits, labels=labels)) + (\n reg * regularization)\n\n return cost", "def _construct_nll_costs(self, si, xo):\n # average log-likelihood over the refinement sequence\n xh = self.obs_transform(si)\n if self.x_type == 'bernoulli':\n ll_costs = log_prob_bernoulli(xo, xh)\n else:\n ll_costs = log_prob_gaussian2(xo, xh, \\\n log_vars=self.bounded_logvar)\n nll_costs = -ll_costs\n return nll_costs", "def update_weights(self):\n\n\n self.w += self.learn_rate * (self.X.T.dot(self.T - self.Y)\n - self.reg_L1 * np.sign(self.w)\n - self.reg_L2 * 2*self.w)", "def regularizer(self):\n \n # L2 regularization for the fully connected parameters.\n regularizers = (tf.nn.l2_loss(self.weights.wd1) + tf.nn.l2_loss(self.weights.bd1) + \n tf.nn.l2_loss(self.weights.wout) + tf.nn.l2_loss(self.weights.bout))\n return regularizers", "def P2l_rec(ells, cost):\n P22 = 3 * (1 - cost**2)\n P23 = 15 * cost * (1 - cost**2)\n P2l = np.zeros(len(ells))\n P2l[0] = 0.\n P2l[1] = 0.\n P2l[2] = P22\n P2l[3] = P23\n for ell in ells[4:]:\n # print ell, P2l[ell-1], P2l[ell-2]\n P2l[ell] = ((2 * ell - 1) * cost * P2l[ell - 1] -\n (ell + 2 - 1) * P2l[ell - 2]) / (ell - 2)\n return P2l", "def loss_total(self):\r\n def loss(y_true, y_pred):\r\n l2 = 1/2*K.sum(K.square(y_true-y_pred))\r\n\r\n return l2\r\n return loss", "def compute_cost(AL, Y):\n pass", "def compute_cost_with_regularization(A3, Y, parameters, lambd):\n\n m = Y.shape[1]\n W1 = parameters[\"W1\"]\n W2 = parameters[\"W2\"]\n W3 = parameters[\"W3\"]\n\n cross_entropy_cost = compute_cost(A3, Y)\n L2_regularization_cost = (np.sum(np.square(W1)) + np.sum(np.square(W2)) + np.sum(np.square(W3))) * lambd / (2 * m)\n\n cost = cross_entropy_cost + L2_regularization_cost\n\n return cost", "def dual_objective(self, dual_coeffs):\n primal = self.model._sdca_primal_dual_relation(self.l_l2sq,\n dual_coeffs)\n prox_l2_value = 0.5 * self.l_l2sq * np.linalg.norm(primal) ** 2\n return self.model.dual_loss(dual_coeffs) - prox_l2_value", "def l2(weights, name=None):\n with ops.op_scope([weights], name, 'l2_regularizer') as scope:\n my_scale = ops.convert_to_tensor(scale,\n dtype=weights.dtype.base_dtype,\n name='scale')\n return standard_ops.mul(my_scale, nn.l2_loss(weights), name=scope)", "def loss(self, X, y=None, reg=0.0):\n\n self.layers = []\n layers = self.layers\n layers.append(X)\n\n # Unpack variables from the params dictionary\n W1, b1 = self.params['W1'], self.params['b1']\n W2, b2 = self.params['W2'], self.params['b2']\n N, D = X.shape\n H, C = W2.shape\n\n # Compute the forward pass\n scores = None\n #############################################################################\n # TODO: Perform the forward pass, computing the class scores for the input. #\n # Store the result in the scores variable, which should be an array of #\n # shape (N, C). #\n #############################################################################\n mid = np.maximum(0, X.dot(W1) + b1.reshape(1, -1)) # activation\n scores = mid.dot(W2) + b2.reshape(1, -1)\n #############################################################################\n # END OF YOUR CODE #\n #############################################################################\n\n # If the targets are not given then jump out, we're done\n if y is None:\n return scores\n\n # Compute the loss\n loss = None\n #############################################################################\n # TODO: Finish the forward pass, and compute the loss. This should include #\n # both the data loss and L2 regularization for W1 and W2. Store the result #\n # in the variable loss, which should be a scalar. Use the Softmax #\n # classifier loss. So that your results match ours, multiply the #\n # regularization loss by 0.5 #\n #############################################################################\n exp_score = np.exp(scores)\n exp_score_sum = exp_score.sum(axis=1)\n correct_score = exp_score[np.arange(N), y]\n probability = (correct_score / exp_score_sum).reshape(-1, 1)\n loss = -np.log(probability).sum()\n\n loss /= N\n loss += 0.5 * reg * (np.sum(W1 * W1) + np.sum(W2 * W2))\n\n #############################################################################\n # END OF YOUR CODE #\n #############################################################################\n\n # Backward pass: compute gradients\n grads = {}\n #############################################################################\n # TODO: Compute the backward pass, computing the derivatives of the weights #\n # and biases. Store the results in the grads dictionary. For example, #\n # grads['W1'] should store the gradient on W1, and be a matrix of same size #\n #############################################################################\n des = np.tile((-correct_score / np.square(exp_score_sum)).reshape(-1, 1), (1, C))\n des[np.arange(N), y] += 1.0 / exp_score_sum\n dsoftmax = des * (-np.ones((mid.shape[0], 1)) / probability) * np.exp(scores)\n\n # W2\n grads['W2'] = mid.T.dot(dsoftmax)\n grads['W2'] /= N\n grads['W2'] += reg * W2\n\n # b2\n grads['b2'] = np.ones_like(b2.reshape(1, -1)) * dsoftmax\n grads['b2'] = np.mean(grads['b2'], axis=0).reshape(-1)\n\n # W1\n binary = np.zeros_like(mid)\n binary[mid > 0] = 1\n grads['W1'] = X.T.dot(binary * dsoftmax.dot(W2.T)) # chain rule, compute dmid/dW1 * dscore/dmid * dsoftmax\n grads['W1'] /= N\n grads['W1'] += reg * W1\n\n # b1\n grads['b1'] = np.ones_like(b1.reshape(1, -1)) * binary * dsoftmax.dot(W2.T)\n grads['b1'] = np.mean(grads['b1'], axis=0).reshape(-1)\n\n #############################################################################\n # END OF YOUR CODE #\n #############################################################################\n\n return loss, grads", "def cost(self) -> float:", "def compute_cost(A2, Y, parameters):\n\n\tm = Y.shape[1] # number of example\n\n\t# Compute the cross-entropy cost\n\tlogprobs = np.multiply(np.log(A2), Y)\n\tcost = -(1/m)*(np.sum((logprobs) + np.multiply(np.log(1-A2), 1-Y)))\n\n\tcost = np.squeeze(cost) # makes sure cost is the dimension we expect. \n\t \t# E.g., turns [[17]] into 17 \n\tassert(isinstance(cost, float))\n\n\treturn cost", "def l2_loss(self, t, use_logit: bool = False):\n c = 0\n if use_logit:\n return np.mean([(self._irf[i].interpolant(t[p]) - logit(self._x[p, i])) ** 2\n for p in range(t.shape[0]) for i in range(self._x.shape[1])])\n else:\n return np.mean([(self._irf[i].interpolant(t[p]) - self._x[p, i]) ** 2\n for p in range(t.shape[0]) for i in range(self._x.shape[1])])", "def loss(self, X, y=None):\n\t\tW1, b1 = self.params['W1'], self.params['b1']\n\t\tW2, b2 = self.params['W2'], self.params['b2']\n\t\tW3, b3 = self.params['W3'], self.params['b3']\n\t\t\n\t\t# pass conv_param to the forward pass for the convolutional layer\n\t\tfilter_size = W1.shape[2]\n\t\tconv_param = {'stride': 1, 'pad': (filter_size - 1) / 2}\n\n\t\t# pass pool_param to the forward pass for the max-pooling layer\n\t\tpool_param = {'pool_height': 2, 'pool_width': 2, 'stride': 2}\n\n\t\tscores = None\n\t\t############################################################################\n\t\t# TODO: Implement the forward pass for the three-layer convolutional net, #\n\t\t# computing the class scores for X and storing them in the scores\t\t\t\t\t #\n\t\t# variable.\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t #\n\t\t############################################################################\n\t\tz1, cache1 = conv_relu_pool_forward(X, W1, b1, conv_param, pool_param)\n\t\tz2, cache2 = affine_relu_forward(z1, W2, b2)\n\t\ty3, cache3 = affine_forward(z2, W3, b3)\n\t\tscores = y3\n\t\t############################################################################\n\t\t#\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tEND OF YOUR CODE\t\t\t\t\t\t\t\t\t\t\t\t\t\t #\n\t\t############################################################################\n\t\t\n\t\tif y is None:\n\t\t\treturn scores\n\t\t\n\t\tloss, grads = 0, {}\n\t\t############################################################################\n\t\t# TODO: Implement the backward pass for the three-layer convolutional net, #\n\t\t# storing the loss and gradients in the loss and grads variables. Compute #\n\t\t# data loss using softmax, and make sure that grads[k] holds the gradients #\n\t\t# for self.params[k]. Don't forget to add L2 regularization!\t\t\t\t\t\t\t #\n\t\t############################################################################\n\t\tloss, dout = softmax_loss(scores, y)\n\t\tloss += self.reg * 0.5 * (np.power(self.params['W3'], 2).sum() + \n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tnp.power(self.params['W2'], 2).sum() + \n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tnp.power(self.params['W1'], 2).sum())\n\n\t\tdx3, grads['W3'], grads['b3'] = affine_backward(dout, cache3)\n\t\tdx2, grads['W2'], grads['b2'] = affine_relu_backward(dx3, cache2)\n\t\tdx1, grads['W1'], grads['b1'] = conv_relu_pool_backward(dx2, cache1)\n\t\t############################################################################\n\t\t#\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tEND OF YOUR CODE\t\t\t\t\t\t\t\t\t\t\t\t\t\t #\n\t\t############################################################################\n\t\t\n\t\treturn loss, grads", "def nnCostFunction2(nn_params, input_layer_size, hidden_layer_size, num_labels, X, y, lambda_):\n # Reshape nn_params back into the parameters Theta1 and Theta2, the weight matrices\n # for our 2 layer neural network\n Theta1 = nn_params[:hidden_layer_size * (input_layer_size + 1)].reshape(\n (hidden_layer_size, input_layer_size + 1))\n Theta2 = nn_params[hidden_layer_size *\n (input_layer_size + 1):].reshape((num_labels, hidden_layer_size + 1))\n\n # Setup some useful variables\n m = X.shape[0]\n\n # Add ones to the X data matrix\n X = np.insert(X, 0, 1, axis=1)\n\n # Perform forward propagation for layer 2\n z2 = np.matmul(X, Theta1.transpose())\n a2 = sigmoid(z2)\n a2 = np.insert(a2, 0, 1, axis=1)\n z3 = np.matmul(a2, Theta2.transpose())\n a3 = sigmoid(z3)\n\n # turn Y into a matrix with a new column for each category and marked with 1\n y_one_hot = np.zeros_like(a3)\n for i in range(m):\n y_one_hot[i, y[i] - 1] = 1\n\n # Calculate the cost of our forward prop\n ones = np.ones_like(a3)\n A = np.matmul(y_one_hot.transpose(), np.log(a3)) + \\\n np.matmul((ones - y_one_hot).transpose(), np.log(ones - a3))\n J = -1 / m * A.trace()\n J += lambda_ / (2 * m) * \\\n (np.sum(Theta1[:, 1:] ** 2) + np.sum(Theta2[:, 1:] ** 2))\n\n # Perform backward propagation to calculate deltas & gradients\n delta3 = a3 - y_one_hot\n delta2 = np.matmul(delta3, Theta2[:, 1:]) * sigmoidGradient(z2)\n Theta2_grad = np.matmul(a2.transpose(), delta3).transpose()\n Theta1_grad = np.matmul(X.transpose(), delta2).transpose()\n\n Theta1_grad[:, 1:] += lambda_ * Theta1[:, 1:]\n Theta2_grad[:, 1:] += lambda_ * Theta2[:, 1:]\n Theta1_grad /= m\n Theta2_grad /= m\n grad = np.concatenate([Theta1_grad.reshape(-1), Theta2_grad.reshape(-1)])\n return J, grad", "def EmbeddingL1RegularizationUpdate(embedding_variable, net_input, learn_rate, l1_reg_val):\n # TODO(student): Change this to something useful. Currently, this is a no-op.\n net_input = tf.nn.l2_normalize(net_input, axis=0)\n sign_inside = tf.sign(tf.matmul(net_input, embedding_variable))\n where = tf.equal(sign_inside, 0)\n # should replace 0's with random in [-1, 1] for an better (not necessarily acute)implementation\n grad = l1_reg_val * tf.matmul(tf.transpose(net_input), sign_inside)\n embedding_variable_ = embedding_variable - learn_rate * grad\n\n ## local test #better to disable when learning\n batch_size, number_of_vocabulary_tokens = net_input.shape\n net_example = numpy.random.binomial(1, .1, (3, number_of_vocabulary_tokens))\n sigma_fnc = l1_reg_val * tf.norm(tf.matmul(net_input, embedding_variable), ord=1)\n # assert tf.gradients(sigma_fnc, embedding_variable) == grad, \"wrong grad in L2\"\n sess = tf.Session()\n sess.run(tf.global_variables_initializer())\n tf_grad = sess.run(tf.gradients(sigma_fnc, embedding_variable)[0], feed_dict={net_input: net_example})\n my_grad = sess.run(grad, feed_dict={net_input: net_example})\n differ = numpy.linalg.norm(tf_grad - my_grad)\n differ = differ / numpy.linalg.norm(tf_grad)\n print('l1 grad differentage {}'.format(differ))\n print('l2 grad max difference {}'.format(numpy.max(tf_grad - my_grad)))\n\n return embedding_variable.assign(embedding_variable_)", "def decay_weights(cost, weight_decay_rate):\n costs = []\n for var in tf.trainable_variables():\n costs.append(tf.nn.l2_loss(var))\n cost += tf.multiply(weight_decay_rate, tf.add_n(costs))\n return cost", "def _get_learning_rate(self) -> float:\n if len(self.errors) > 1 and self.errors[-2] - self.errors[-1] < 0:\n self.learning_rate /= 2\n return self.learning_rate", "def compute_loss_and_gradients(self, X, y):\n # Before running forward and backward pass through the model,\n # clear parameter gradients aggregated from the previous pass\n # TODO Set parameter gradient to zeros\n # Hint: using self.params() might be useful!\n self.fulllayer1.W.grad = np.zeros_like(self.fulllayer1.W.grad)\n self.fulllayer1.B.grad = np.zeros_like(self.fulllayer1.B.grad)\n self.fulllayer2.W.grad = np.zeros_like(self.fulllayer2.W.grad)\n self.fulllayer2.B.grad = np.zeros_like(self.fulllayer2.B.grad)\n\n\n # TODO Compute loss and fill param gradients\n # by running forward and backward passes through the model\n res = self.fulllayer1.forward(X)\n res2 = self.reglayer1.forward(res)\n res3 = self.fulllayer2.forward(res2)\n\n loss, grad = softmax_with_cross_entropy(res3, y)\n\n back3 = self.fulllayer2.backward(grad)\n back2 = self.reglayer1.backward(back3)\n back = self.fulllayer1.backward(back2)\n \n # After that, implement l2 regularization on all params\n # Hint: self.params() is useful again!\n\n for params in self.params().keys():\n # print(params)\n # print(self.params()[params].value)\n loc_loss, loc_grad = l2_regularization(self.params()[params].value, self.reg)\n loss += loc_loss\n self.params()[params].grad += loc_grad\n\n return loss", "def get_expected_cost(self):", "def l2(name, weights):\n\n with tf.name_scope(name):\n regularizer = np.float32(0.0)\n for weight in weights:\n tf.add(regularizer, tf.nn.l2_loss(weight))\n\n return regularizer", "def loss(self, X, y=None):\n\n # In dev testing, the loss fnc stops at \"scores\" , unfollowed by \"softmax\" probability prediction.\n # In real testing, \"self.predict()\" needs to be implemented in Solver() class.\n \n if y is None:\n for bn_param in self.bn_params:\n bn_param[\"mode\"] = \"test\"\n\n\n W1, b1 = self.params['W1'], self.params['b1']\n gamma1, beta1 = self.params[\"sbnGamma1\"], self.params[\"sbnBeta1\"]\n bn_param1 = self.bn_params[0]\n\n W2, b2 = self.params['W2'], self.params['b2']\n gamma2, beta2 = self.params[\"sbnGamma2\"], self.params[\"sbnBeta2\"]\n bn_param2 = self.bn_params[1]\n\n W3, b3 = self.params['W3'], self.params['b3']\n gamma3, beta3 = self.params[\"bnGamma3\"], self.params[\"bnBeta3\"]\n bn_param3 = self.bn_params[2]\n\n W4, b4 = self.params['W4'], self.params['b4']\n \n # pass conv_param to the forward pass for the convolutional layer\n conv_param = self.conv_param\n\n # pass pool_param to the forward pass for the max-pooling layer\n pool_param = self.maxpool_params\n\n ############################################################################\n # TODO: Implement the forward pass for the three-layer convolutional net, #\n # computing the class scores for X and storing them in the scores #\n # variable. #\n ############################################################################\n \n scores = None \n cache = {}\n # def conv_sbn_relu_forward(x, w, b, gamma, beta, conv_param, bn_param): return out, cache;\n out, cache[\"layer1\"] = layer_utils.conv_sbn_relu_forward(X, W1, b1, gamma1, beta1, conv_param, bn_param1) \n out, cache[\"layer2\"] = layer_utils.conv_sbn_relu_forward(out, W2, b2, gamma2, beta2, conv_param, bn_param2)\n\n # def max_pool_forward_fast(x, pool_param): return out, cache;\n out, cache[\"maxpool\"] = fast_layers.max_pool_forward_fast(out, pool_param)\n\n # def affine_bn_relu_forward(x, w, b, gamma, beta, bn_param): return out, cache;\n \n out, cache[\"layer3\"] = layer_utils.affine_bn_relu_forward(out, W3, b3, gamma3, beta3, bn_param3)\n\n # def affine_forward(x, w, b): return out, cache;\n scores, cache[\"layer4\"] = layers.affine_forward(out, W4, b4)\n\n\n ############################################################################\n # END OF YOUR CODE #\n ############################################################################\n \n if y is None:\n return scores\n \n ############################################################################\n # TODO: Implement the backward pass for the three-layer convolutional net, #\n # storing the loss and gradients in the loss and grads variables. Compute #\n # data loss using softmax, and make sure that grads[k] holds the gradients #\n # for self.params[k]. Don't forget to add L2 regularization! #\n ############################################################################\n \n loss, grads = 0, {}\n\n # def softmax_loss(x, y): return loss, dscore;\n loss, dscores = layers.softmax_loss(scores, y)\n loss += 0.5 * self.reg * (np.sum(W1 * W1) + np.sum(W2 * W2) + np.sum(W3 * W3) + np.sum(W4 * W4))\n\n # def affine_backward(dout, cache): return dx, dw, db;\n dout, dW4, db4 = layers.affine_backward(dscores, cache[\"layer4\"]) \n\n # def affine_bn_relu_backward(dout, cache): return dx, dw, db, dgamma, dbeta;\n dout, dW3, db3, dgamma3, dbeta3 = layer_utils.affine_bn_relu_backward(dout, cache[\"layer3\"])\n\n # print cache[\"layer3\"]\n\n # def max_pool_backward_fast(dout, cache): return max_pool_backward_im2col(dout, real_cache);\n # def max_pool_backward_im2col(dout, cache): return dx;\n dout = fast_layers.max_pool_backward_fast(dout, cache[\"maxpool\"])\n\n # def conv_sbn_relu_backward(dout, cache): return dx, dw, db, dgamma, dbeta;\n dout, dW2, db2, dgamma2, dbeta2 = layer_utils.conv_sbn_relu_backward(dout, cache[\"layer2\"])\n _, dW1, db1, dgamma1, dbeta1 = layer_utils.conv_sbn_relu_backward(dout, cache[\"layer1\"])\n\n # reg\n grads['W4'], grads['b4'] = dW4 + self.reg * W4, db4\n \n grads['W3'], grads['b3'] = dW3 + self.reg * W3, db3\n grads[\"bnGamma3\"], grads[\"bnBeta3\"] = dgamma3, dbeta3\n\n grads['W2'], grads['b2'] = dW2 + self.reg * W2, db2\n grads[\"sbnGamma2\"], grads[\"sbnBeta2\"] = dgamma2, dbeta2\n\n grads['W1'], grads['b1'] = dW1 + self.reg * W1, db1\n grads[\"sbnGamma1\"], grads[\"sbnBeta1\"] = dgamma1, dbeta1\n\n ############################################################################\n # END OF YOUR CODE #\n ############################################################################\n \n return loss, grads", "def ComputeCost(Y, W, P, my_lambda):\n l = [np.log(P[i][np.argmax(Y[i])]) for i in range(len(Y))]\n l = -np.mean(l)\n J = l\n for w in W:\n J += my_lambda * (w**2).sum()\n return J, l", "def get_learning_gradients(self, k=1, \n l1=0.0, l2=0.0, \n sparse_lambda=0., sparse_p=0.01, \n waste_cutoff=1e-2,\n waste_reduction=False):\n updates = {}\n self.waste_reduction_nbsize = sharedX(1.0, name='waste_reduction_nbsize')\n\n print '*********************************'\n print '*********************************'\n print '** waste_reduction = ', waste_reduction\n print '*********************************'\n print '*********************************'\n \n ### compute positive phase ###\n ph_mean, ph_sample = self.sample_h_given_v(self.input, temper=False)\n\n ### compute negative phase ###\n [nh_samples, nv_samples, beta, mixstat, E], sampling_updates = self.get_sampling_updates(k=k)\n updates.update(sampling_updates)\n\n if self.n_beta.value > 1:\n # after all this... swap bottom temperature samples into T=1 minibatch\n nv_samples, E = self.pt_swap_t1_sample(nv_samples, E, mixstat)\n updates[self._buffer] = T.set_subtensor(self._buffer[:self.n_chain_total], nv_samples)\n updates[self._E] = T.set_subtensor(self._E[:self.n_chain_total], E)\n\n # (optionally) perform waste reduction on parallel chains\n if waste_reduction:\n chain_end, weights, wr_nbsize = \\\n pt_waste_reduction(nv_samples, beta, mixstat, E, \n cut_off=waste_cutoff, batch_size=self.batch_size)\n updates[self.waste_reduction_nbsize] = wr_nbsize\n\n # define cost function\n cost = T.mean(self.free_energy(self.input)) - \\\n T.sum(weights*self.free_energy(chain_end)) / self.t_batch_size\n gconstant = [chain_end, weights]\n else:\n chain_end = nv_samples[:self.batch_size]\n # define cost function\n cost = T.mean(self.free_energy(self.input)) - T.mean(self.free_energy(chain_end))\n gconstant = [chain_end]\n\n if l1: cost += l1 * T.sum(abs(self.W))\n if l2: cost += l2 * T.sum(self.W**2)\n \n # We must not compute the gradient through the gibbs sampling \n gparams = T.grad(cost, self.params, consider_constant=gconstant)\n\n grads = {}\n # constructs the update dictionary\n for gparam, param in zip(gparams, self.params):\n grads[param] = gparam\n \n # modify hidden biases according to the sparsity regularization term of Lee07\n if sparse_lambda:\n sparse_cost = sparse_lambda * T.sum((sparse_p - T.mean(ph_mean, axis=0))**2)\n grads[self.hbias] += T.grad(sparse_cost, self.hbias)\n\n return grads, updates", "def polynomial_learning_rate(n, w=0.5):\n assert n > 0, \"Make sure the number of times a state action pair has been observed is always greater than 0 before calling polynomial_learning_rate\"\n\n return 1./n**w", "def compute_cost(y, tx, w):\n # ***************************************************\n # INSERT YOUR CODE HERE\n # TODO: compute loss by MSE / MAE\n # ***************************************************\n \n # vector e\n e = compute_e(y, tx, w)\n N = compute_N(e)\n L_MSE = np.dot(np.matrix.transpose(e), e)\n L_MSE = L_MSE / (2 * N)\n \n return L_MSE", "def loss(self, X, y=None):\r\n mode = 'test' if y is None else 'train'\r\n\r\n if self.dropout_param is not None:\r\n self.dropout_param['mode'] = mode\r\n if self.use_batchnorm:\r\n for bn_param in self.bn_params:\r\n bn_param[mode] = mode\r\n\r\n\r\n W1, b1 = self.params['W1'], self.params['b1']\r\n W2, b2 = self.params['W2'], self.params['b2']\r\n W3, b3 = self.params['W3'], self.params['b3']\r\n gamma1, beta1 = self.params['gamma1'], self.params['beta1']\r\n gamma2, beta2 = self.params['gamma2'], self.params['beta2']\r\n # pass conv_param to the forward pass for the convolutional layer\r\n filter_size = W1.shape[2]\r\n conv_param = {'stride': 1, 'pad': int((filter_size - 1) / 2)}\r\n\r\n # pass pool_param to the forward pass for the max-pooling layer\r\n pool_param = {'pool_height': 2, 'pool_width': 2, 'stride': 2}\r\n\r\n scores = None\r\n ############################################################################\r\n # TODO: Implement the forward pass for the three-layer convolutional net, #\r\n # computing the class scores for X and storing them in the scores #\r\n # variable. #\r\n ############################################################################\r\n alpha = 0.1\r\n csrp1, csrp1_cache = conv_sbn_lrelu_pool_forward(X, W1, b1, gamma1, beta1, self.bn_params[0], conv_param, pool_param, alpha)\r\n abr1, abr1_cache = affine_bn_lrelu_forward(csrp1, W2, b2, gamma2, beta2, self.bn_params[1], alpha)\r\n scores, out_cache = affine_forward(abr1, W3, b3)\r\n ############################################################################\r\n # END OF YOUR CODE #\r\n ############################################################################\r\n\r\n if y is None:\r\n return scores\r\n\r\n loss, grads = 0, {}\r\n ############################################################################\r\n # TODO: Implement the backward pass for the three-layer convolutional net, #\r\n # storing the loss and gradients in the loss and grads variables. Compute #\r\n # data loss using softmax, and make sure that grads[k] holds the gradients #\r\n # for self.params[k]. Don't forget to add L2 regularization! #\r\n ############################################################################\r\n loss, dp = softmax_loss(scores, y)\r\n loss += 0.5 * self.reg * np.sum(\r\n np.sum(W1 ** 2) + np.sum(W2 ** 2) + np.sum(W3 ** 2)\r\n )\r\n dp, dw3, db3 = affine_backward(dp, out_cache)\r\n dp, dw2, db2, dgamma2, dbeta2 = affine_bn_lrelu_backward(dp, abr1_cache)\r\n dp, dw1, db1, dgamma1, dbeta1 = conv_sbn_lrelu_pool_backward(dp, csrp1_cache)\r\n grads['W1'] = dw1 + self.reg * W1\r\n grads['W2'] = dw2 + self.reg * W2\r\n grads['W3'] = dw3 + self.reg * W3\r\n grads['b1'] = db1\r\n grads['b2'] = db2\r\n grads['b3'] = db3\r\n grads['gamma2'] = dgamma2\r\n grads['gamma1'] = dgamma1\r\n grads['beta2'] = dbeta2\r\n grads['beta1'] = dbeta1\r\n \r\n ############################################################################\r\n # END OF YOUR CODE #\r\n ############################################################################\r\n\r\n return loss, grads", "def net(input_lst, weight_lst, bias):\r\n net_total = bias\r\n\r\n for node in range(len(input_lst)):\r\n net_total += input_lst[node] * weight_lst[node]\r\n\r\n return net_total", "def target_multi_objective2(\n config: Configuration,\n seed: int,\n # instance: str,\n # budget: float,\n) -> dict[str, float]:\n return {\"cost1\": seed, \"cost2\": seed}", "def add_l2_reg(loss_f, grad_f, lambda_):\n\n def l2_loss(y, tx, w, *args, **kwargs):\n return loss_f(y, tx, w, *args, **kwargs) + lambda_ * np.linalg.norm(w)\n \n def l2_grad(y, tx, w, *args, **kwargs):\n return grad_f(y, tx, w, *args, **kwargs) + 2 * lambda_ * w\n \n return l2_loss, l2_grad", "def get_learning_rate(opt, current, best, counter, learning_rate):\n if current > best:\n best = current\n counter = 0\n elif counter > opt['delay']:\n learning_rate = learning_rate / 10.\n counter = 0\n else:\n counter += 1\n return (best, counter, learning_rate)", "def _learn_node_parameter_w(outputs, inputs=None):\n num_inputs = 0 if inputs is None else inputs.shape[1]\n weights = np.zeros(shape=num_inputs + 1)\n\n \"\"\" YOUR CODE HERE \"\"\"\n # Ax = b, A\n N_observe = outputs.shape[0]\n A = np.zeros(shape = (num_inputs+1, num_inputs+1))\n for i in range(A.shape[0]):\n for j in range(A.shape[1]):\n if i==0 and j==0:\n A[i][j] = N_observe\n elif i==0 and j!=0:\n A[i][j] = np.sum(inputs[:,j-1])\n elif i!=0 and j==0:\n A[i][j] = np.sum(inputs[:,i-1])\n else:\n for k in range(N_observe):\n A[i][j] += inputs[k,i-1]*inputs[k, j-1]\n b = np.zeros(shape=num_inputs + 1)\n for i in range(len(b)):\n if i==0:\n b[i] = np.sum(outputs)\n else:\n for k in range(N_observe):\n b[i] += inputs[k,i-1]*outputs[k]\n\n weights = np.linalg.solve(A, b)\n \"\"\" END YOUR CODE HERE \"\"\"\n\n return weights", "def l2_reg_gradient_descent(Y, weights, cache, alpha, lambtha, L):\n m = Y.shape[1]\n len_cache = len(cache)\n\n # learning for the last layer:\n Al = cache['A{}'.format(len_cache - 1)] # last A\n A_prev = cache['A{}'.format(len_cache - 2)] # pre last A\n dZl = Al - Y # last dZ\n dWl = np.matmul(dZl, A_prev.T) / m # last dW, shape (1, nodes)\n dbl = (1 / m) * np.sum(dZl, axis=1, keepdims=True)\n Wl_str = 'W{}'.format(len_cache - 1)\n Wl = weights[Wl_str] # last W\n # last layer W learning:\n weights[Wl_str] = Wl - (alpha * lambtha / m) * Wl - alpha * dWl\n bl_str = 'b{}'.format(len_cache - 1)\n bl = weights[bl_str] # last b\n weights[bl_str] = bl - alpha * dbl # last layer b learning\n\n # next: learning for the rest of the layers:\n dZ = dZl\n W_next = Wl\n for i in reversed(range(1, len_cache - 1)):\n A = cache['A{}'.format(i)]\n A_prev = cache['A{}'.format(i - 1)]\n dZ = np.matmul(W_next.T, dZ) * (1 - A ** 2)\n dW = (1 / m) * (np.matmul(dZ, A_prev.T))\n db = np.sum(dZ, axis=1, keepdims=True) / m\n W_c_str = 'W{}'.format(i)\n W_c = weights[W_c_str] # current W\n b_c_str = 'b{}'.format(i)\n b_c = weights[b_c_str] # current b\n weights[W_c_str] = W_c - (alpha * lambtha / m) * W_c - alpha * dW\n weights[b_c_str] = b_c - alpha * db\n W_next = W_c", "def s_penalty(self, triples, nodes):\n\n s_index, p_index, o_index = split_spo(triples)\n\n s, p, o = nodes[s_index, :], self.relations[p_index, :], nodes[o_index, :]\n\n return s.pow(2).mean() + p.pow(2).mean() + o.pow(2).mean()", "def get_objective(X_t, xattr, Y_t, s):\n return eloglik(X_t, xattr, Y_t, s) - (s['KL']).sum()", "def calculate_path_cost_with_weighted_sum(self, path, attr1, attr2): \n costs = [] \n for i in range(len(path) - 1):\n a = (1- self.G[path[i]][path[i+1]][attr2]) / (2 - self.G[path[i]][path[i+1]][attr1] - self.G[path[i]][path[i+1]][attr2]) \n b = (1- self.G[path[i]][path[i+1]][attr1]) / (2 - self.G[path[i]][path[i+1]][attr1] - self.G[path[i]][path[i+1]][attr2]) \n costs.append(a * self.G[path[i]][path[i+1]][attr1] + b * self.G[path[i]][path[i+1]][attr2]) \n return max(costs)", "def loss(self, X, y=None):\n\t\tmode = 'test' if y is None else 'train'\n\t\tif self.dropout_param is not None:\n\t\t\tself.dropout_param['mode'] = mode\n\t\tif self.use_batchnorm:\n\t\t\tfor bn_param in self.bn_params:\n\t\t\t\tbn_param[mode] = mode\n\n\t\tW1, b1 = self.params['W1'], self.params['b1']\n\t\tW2, b2 = self.params['W2'], self.params['b2']\n\t\tW3, b3 = self.params['W3'], self.params['b3']\n\t\tW5, b5 = self.params['W5'], self.params['b5']\n\t\t\n\t\tgamma1, beta1 = self.params['gamma1'], self.params['beta1']\n\t\tgamma2, beta2 = self.params['gamma2'], self.params['beta2']\n\t\tgamma3, beta3 = self.params['gamma3'], self.params['beta3']\t\n\n\t\t# pass conv_param to the forward pass for the convolutional layer\n\t\tfilter_size1 = W1.shape[2]\n\t\tconv_param1 = {'stride': 1, 'pad': (filter_size1 - 1) / 2}\n\t\tfilter_size2 = W2.shape[2]\n\t\tconv_param2 = {'stride': 1, 'pad': (filter_size2 - 1) / 2}\n\t\t\n\t\t# pass pool_param to the forward pass for the max-pooling layer\n\t\tpool_param = {'pool_height': 2, 'pool_width': 2, 'stride': 2}\n\t\t\n\t\tscores = None\n\t\n\t\t# Convolutional layers\t\n\t\tz1, cache1 = conv_relu_forward(X, W1, b1, conv_param1)\n\t\tz2, cache2 = conv_relu_pool_forward(z1, W2, b2, conv_param2, pool_param)\n\t\tz3, cache3 = spatial_batchnorm_forward(z2, gamma1, beta1, self.bn_params[1])\n\n\t\t# Fully Connected layers\n\t\tz4, cache4 = affine_relu_bn_forward(z3, W3, b3, gamma2, beta2, self.bn_params[2])\n\t\tz4, cache9 = dropout_forward(z4, self.dropout_params)\n\n\t\t# Output layer\n\t\tz6, cache6 = affine_forward(z4, W5, b5)\n\t\tz7, cache7 = batchnorm_forward(z6, gamma3, beta3, self.bn_params[3])\n\t\t#z8, cache8 = dropout_forward(z7, self.dropout_params)\n\t\tscores = z7\n\t\t\n\t\tif y is None:\n\t\t\treturn scores\n\t\t\n\t\tloss, grads = 0, {}\n\t\tloss, dout = softmax_loss(scores, y)\n\t\tloss += self.reg * 0.5 * (np.power(self.params['W1'], 2).sum() +\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tnp.power(self.params['W2'], 2).sum() +\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tnp.power(self.params['W5'], 2).sum() +\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tnp.power(self.params['W3'], 2).sum())\n\t\t\n\t\t#dx8 = dropout_backward(dout, cache8)\n\t\tdx7, grads['gamma3'], grads['beta3'] = batchnorm_backward(dout, cache7)\n\t\tdx6, grads['W5'], grads['b5'] = affine_backward(dx7, cache6)\n\t\tdx6 = dropout_backward(dx6, cache9)\n\t\tdx4, grads['W3'], grads['b3'], grads['gamma2'], grads['beta2'] = affine_relu_bn_backward(dx6, cache4)\n\t\t\n\t\tdx3, grads['gamma1'], grads['beta1'] = spatial_batchnorm_backward(dx4, cache3)\n\t\tdx2, grads['W2'], grads['b2'] = conv_relu_pool_backward(dx3, cache2)\n\t\tdx1, grads['W1'], grads['b1'] = conv_relu_backward(dx2, cache1)\n\t\t\n\t\treturn loss, grads" ]
[ "0.7453849", "0.7264303", "0.7145872", "0.70822954", "0.68864816", "0.68526465", "0.6803547", "0.6774062", "0.6762818", "0.67296004", "0.64906466", "0.647366", "0.64639264", "0.64553", "0.64553", "0.64553", "0.6427316", "0.64204615", "0.64127034", "0.6400128", "0.6380519", "0.63788927", "0.6342982", "0.6290131", "0.6142353", "0.6130636", "0.60847235", "0.60530436", "0.6050501", "0.6031292", "0.60270035", "0.60059696", "0.60056454", "0.5987304", "0.5984579", "0.59775734", "0.596707", "0.5947776", "0.594028", "0.59327495", "0.5876293", "0.586679", "0.5818204", "0.5802741", "0.57987165", "0.57900846", "0.5781898", "0.5777403", "0.57632613", "0.57549816", "0.57536066", "0.5744273", "0.5728545", "0.57178503", "0.57129854", "0.5709982", "0.57067454", "0.5705098", "0.5704008", "0.5698122", "0.5689362", "0.5689301", "0.5680931", "0.5680659", "0.5676717", "0.56603837", "0.5658681", "0.5654257", "0.56464493", "0.56457376", "0.56446844", "0.5644533", "0.56344134", "0.56299645", "0.5627925", "0.5611371", "0.56072485", "0.5595468", "0.5590568", "0.55839413", "0.55806243", "0.5577969", "0.5577813", "0.55754256", "0.55563647", "0.55543166", "0.5548141", "0.5546108", "0.5541889", "0.55406976", "0.5538831", "0.55377614", "0.55352384", "0.55323255", "0.5526861", "0.5513652", "0.5510514", "0.5508279", "0.5490452", "0.54873824" ]
0.7189708
2
returns the sparsity penalty on network activations combined as a sum
def get_sparsity_penalty(nnet, inputs, sparsity, mode="mean", deterministic=False): assert mode in ("mean", "l1") rho = sparsity penalty = 0 eps = 0.0001 # for numerical stability for layer in nnet.all_layers: if layer.isactivation: activation = lasagne.layers.get_output(layer, inputs=inputs, deterministic=deterministic) if mode == "mean": if layer.isrelu: avg_activation = T.mean(T.gt(activation, T.zeros_like(activation)), axis=0, dtype='floatX') if layer.issigmoid: avg_activation = T.mean(activation, axis=0, dtype='floatX') KL_div = T.sum((rho+eps) * (T.log(rho+eps) - T.log(avg_activation+eps)) + (1-rho+eps) * (T.log(1-rho+eps) - T.log(1-avg_activation+eps)), dtype='floatX') penalty = penalty + KL_div if mode == "l1": penalty = penalty + T.sum(abs(activation), dtype='floatX') return T.cast(penalty, dtype='floatX')
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def s_penalty(self, triples, nodes):\n\n s_index, p_index, o_index = split_spo(triples)\n\n s, p, o = nodes[s_index, :], self.relations[p_index, :], nodes[o_index, :]\n\n return s.pow(2).mean() + p.pow(2).mean() + o.pow(2).mean()", "def penalty(self):\n assert len(self.weights) == len(self.means), \"Dimensions!\"\n out = np.exp(self.data['riskfree'] * self.data['maturity'])\n for weight, mean in zip(self.weights, self.means):\n out -= weight * np.exp(mean * self.data['maturity'])\n return (out**2).mean()**.5", "def penalty(self):\n return 0", "def _construct_reg_costs(self):\n param_reg_cost = sum([T.sum(p**2.0) for p in self.joint_params])\n return param_reg_cost", "def get_spectral_penalty(nnet, include_bias=False):\n\n penalty = 0\n\n for layer in nnet.trainable_layers:\n if not layer.issvm:\n eigenvalues, eigvec = T.nlinalg.eigh(T.dot(layer.W, layer.W.T))\n eig_max = T.max(eigenvalues)\n penalty = penalty + eig_max\n\n if include_bias:\n for layer in nnet.trainable_layers:\n if (not layer.issvm) and (layer.b is not None):\n penalty = penalty + T.sum(abs(layer.b) ** 2)\n\n return T.cast(penalty, dtype='floatX')", "def calc_sparsity (data): \n matrix_size = data.shape[0]*data.shape[1] # Number of possible interactions in the matrix\n num_purchases = len(data.nonzero()[0]) # Number of items interacted with\n sparsity = 100*(1 - (num_purchases/matrix_size))\n print('{:.2f} % of the user interaction matrix is sparse'.format(sparsity,2))", "def combined_costs(matrix_MSLL_IO):\r\n return", "def sparsity(self):\n nelem = self._nelem\n\n if nelem is None:\n self._logger.warning(\n \"this matrix will be considered as dense as it has not had its number of elements defined\")\n nelem = self._size\n\n return 1.0 - nelem / self._size", "def overall_sensitivity(self):\n if self.mod1:\n s = torch.max(torch.max(self.weight, -1)[0], -1)[0].item()\n else:\n s = torch.max(torch.sqrt(torch.sum(self.weight * self.weight, -1)))[0].item()\n s *= np.sqrt(2. / np.e)\n return s", "def perplexity(self, sents):\n return 2 ** self.cross_entropy(sents)", "def model_sparsity(model, param_dims=[2, 4], param_types=['weight', 'bias']):\n sparsity, _, _ = model_params_stats(model, param_dims, param_types)\n return sparsity", "def get_prod_penalty(nnet):\n\n assert Cfg.ocsvm_loss is True\n\n penalty = 0\n layers = nnet.trainable_layers\n num_layers = len(layers) - 1 # do not regularize parameters of oc-svm layer\n assert num_layers > 0\n\n W_norm_prod = 1.0\n\n if layers[num_layers-1].b is not None:\n penalty += T.sum(layers[num_layers-1].b ** 2)\n\n for i in range(num_layers-1):\n W_norm_prod *= T.sum(layers[num_layers-1-i].W ** 2)\n if layers[num_layers-2-i].b is not None:\n penalty += W_norm_prod * T.sum(layers[num_layers-2-i].b ** 2)\n\n W_norm_prod *= T.sum(layers[0].W ** 2)\n\n penalty += W_norm_prod\n penalty *= T.sum(nnet.ocsvm_layer.W ** 2)\n\n return penalty", "def test_calc_layer_sparsity():\n test_ndarray = np.array([[0, 2, 0], [1, 0, 1]])\n assert lu.calc_layer_sparsity(test_ndarray) == 3 / 6, 'correct sparsity value'\n\n test_ndarray = np.array([[0, 0, 0], [1, 0, 1]])\n assert abs(lu.calc_layer_sparsity(test_ndarray) - 4 / 6) < 10**-8, 'correct sparsity value'\n assert lu.calc_layer_sparsity(np.zeros((20, 20))) == 1.0, 'zero array should have 1.0 sparsity'\n assert lu.calc_layer_sparsity(\n np.random.rand(20, 20)) == 0.0, 'random array should have 0.0 sparsity'\n assert type(lu.calc_layer_sparsity(np.zeros((10, 10)))) is float, 'return value should be of type float'", "def calc_assn_weights():\n\t\n\t\t\t#\n\t\t\t#\n\ttext(\"\"\"INSERT INTO assignments (mentor_id, course_id, cost)\n\t\t\tSELECT M.mentor_id, C.course_id, SUM(COALESCE(PW.weight_value,PT.def_weight_val))\n\t\t\tFROM mentors M, courses C\n\t\t\tJOIN course2pref C2P ON C2P.course_id = C.course_id\n\t\t\tJOIN prefs P ON P.pref_id = C2P.pref_id\n\t\t\tJOIN pref_types PT ON PT.pref_type_id = P.pref_type_id\n\t\t\tJOIN pref_weights PW ON PW.pref_type_id = P.pref_type_id\n\t\t\tLEFT JOIN choices Ch ON Ch.mentor_id = M.mentor_id AND Ch.weight_id = PW.pref_id", "def _learn_node_parameter_w(outputs, inputs=None):\n num_inputs = 0 if inputs is None else inputs.shape[1]\n weights = np.zeros(shape=num_inputs + 1)\n\n \"\"\" YOUR CODE HERE \"\"\"\n # Ax = b, A\n N_observe = outputs.shape[0]\n A = np.zeros(shape = (num_inputs+1, num_inputs+1))\n for i in range(A.shape[0]):\n for j in range(A.shape[1]):\n if i==0 and j==0:\n A[i][j] = N_observe\n elif i==0 and j!=0:\n A[i][j] = np.sum(inputs[:,j-1])\n elif i!=0 and j==0:\n A[i][j] = np.sum(inputs[:,i-1])\n else:\n for k in range(N_observe):\n A[i][j] += inputs[k,i-1]*inputs[k, j-1]\n b = np.zeros(shape=num_inputs + 1)\n for i in range(len(b)):\n if i==0:\n b[i] = np.sum(outputs)\n else:\n for k in range(N_observe):\n b[i] += inputs[k,i-1]*outputs[k]\n\n weights = np.linalg.solve(A, b)\n \"\"\" END YOUR CODE HERE \"\"\"\n\n return weights", "def get_strength(self):\n return 10 - self.get_agility()", "def cost(self) -> float:", "def nSensParams(self):\n sum = 0\n for r in self._reactors:\n sum += r.nSensParams()\n return sum", "def getWeights(self, gameState, actton):\n\t\treturn {'successorScore': 1.0}", "def test_sparsity(config):\n total_zeros = 0\n total_nonzeros = 0\n\n print ('<===sparsity type is {}'.format(config.sparsity_type))\n print ('<===layers to be pruned are {}'.format(config._prune_ratios))\n if config.masked_progressive and (config.sparsity_type == 'filter' or config.sparsity_type =='column'or config.sparsity_type == \"bn_filter\" ):\n ### test both column and row sparsity\n print (\"***********checking column sparsity*************\")\n for name,W in config.model.named_parameters():\n if name not in config.prune_ratios:\n continue\n W = W.cpu().detach().numpy()\n shape = W.shape\n W2d = W.reshape(shape[0],-1)\n column_l2_norm = LA.norm(W2d,2,axis=0)\n zero_column = np.sum(column_l2_norm == 0)\n nonzero_column = np.sum(column_l2_norm !=0)\n\n print (\"column sparsity of layer {} is {}\".format(name,zero_column/(zero_column+nonzero_column)))\n print (\"***********checking filter sparsity*************\") \n for name,W in config.model.named_parameters():\n if name not in config.prune_ratios:\n continue\n W = W.cpu().detach().numpy()\n shape = W.shape\n W2d = W.reshape(shape[0],-1)\n row_l2_norm = LA.norm(W2d,2,axis=1)\n zero_row = np.sum(row_l2_norm == 0)\n nonzero_row = np.sum(row_l2_norm !=0)\n print (\"filter sparsity of layer {} is {}\".format(name,zero_row/(zero_row+nonzero_row)))\n print (\"************checking overall sparsity in conv layers*************\")\n for name,W in config.model.named_parameters():\n if name not in config.prune_ratios:\n continue\n W = W.cpu().detach().numpy() \n total_zeros +=np.sum(W==0)\n total_nonzeros +=np.sum(W!=0)\n print ('only consider conv layers, compression rate is {}'.format((total_zeros+total_nonzeros)/total_nonzeros))\n return\n \n if config.sparsity_type == \"irregular\":\n for name,W in config.model.named_parameters():\n if 'bias' in name:\n continue\n W = W.cpu().detach().numpy()\n zeros = np.sum(W==0)\n total_zeros+=zeros\n nonzeros = np.sum(W!=0)\n total_nonzeros+=nonzeros\n print (\"sparsity at layer {} is {}\".format(name,zeros/(zeros+nonzeros)))\n total_weight_number = total_zeros+total_nonzeros\n print ('overal compression rate is {}'.format(total_weight_number/total_nonzeros))\n elif config.sparsity_type == \"column\":\n for name,W in config.model.named_parameters():\n if name not in config.prune_ratios:\n continue\n W = W.cpu().detach().numpy()\n shape = W.shape\n W2d = W.reshape(shape[0],-1)\n column_l2_norm = LA.norm(W2d,2,axis=0)\n zero_column = np.sum(column_l2_norm == 0)\n nonzero_column = np.sum(column_l2_norm !=0)\n total_zeros +=np.sum(W==0)\n total_nonzeros +=np.sum(W!=0)\n print (\"column sparsity of layer {} is {}\".format(name,zero_column/(zero_column+nonzero_column)))\n print ('only consider conv layers, compression rate is {}'.format((total_zeros+total_nonzeros)/total_nonzeros)) \n elif config.sparsity_type == \"filter\":\n print ('inside if')\n print (config.prune_ratios)\n for name,W in config.model.named_parameters():\n if name not in config.prune_ratios:\n continue\n W = W.cpu().detach().numpy()\n shape = W.shape\n W2d = W.reshape(shape[0],-1)\n row_l2_norm = LA.norm(W2d,2,axis=1)\n zero_row = np.sum(row_l2_norm == 0)\n nonzero_row = np.sum(row_l2_norm !=0)\n total_zeros +=np.sum(W==0)\n total_nonzeros +=np.sum(W!=0)\n print (\"filter sparsity of layer {} is {}\".format(name,zero_row/(zero_row+nonzero_row)))\n print ('only consider conv layers, compression rate is {}'.format((total_zeros+total_nonzeros)/total_nonzeros))\n elif config.sparsity_type == \"bn_filter\":\n print ('inside bn_filter')\n print (config.prune_ratios)\n for i,(name,W) in enumerate(config.model.named_parameters()):\n if name not in config.prune_ratios:\n continue\n W = W.cpu().detach().numpy()\n zeros = np.sum(W==0)\n nonzeros = np.sum(W!=0)\n print (\"sparsity at layer {} is {}\".format(name,zeros/(zeros+nonzeros)))", "def get_sparsity(self, exclude=[]):\n nnz = 0 # number of nonzero elements\n nz = 0 # number of zero elements\n for key in self.variables:\n if key in exclude:\n continue\n nnz += amath.sum(self.variables[key] != 0)\n nz += amath.sum(self.variables[key] == 0)\n sparsity = float(nz) / (nnz + nz)\n return sparsity", "def sparsity(model: keras.Model):\n zero = tf.constant(0, dtype=tf.float32)\n model_weight_size = 0\n model_zeros = 0\n sparsity_dict = {}\n\n for layer in model.layers:\n layer_sparsity_dict = {}\n\n for i, weight in enumerate(layer.trainable_weights):\n mask = tf.cast(tf.equal(weight, zero), tf.uint8)\n\n weight_size = tf.size(weight)\n zeros = tf.cast(tf.math.count_nonzero(mask), tf.int32)\n layer_sparsity_dict[weight.name] = zeros / weight_size\n\n model_weight_size += weight_size\n model_zeros += zeros\n\n sparsity_dict[layer.name] = layer_sparsity_dict\n\n model_sparsity = model_zeros / model_weight_size\n\n return model_sparsity, sparsity_dict", "def update_weights_sum(self):\n vals = self.nn.get_param_values()\n # only use the last layer for summation (w, b)\n self.w_sum = np.sum(vals[-2]) + np.sum(vals[-1])", "def getWeights(self, gameState, action):\n return {'successorScore': 1.0}", "def get_weights(self):", "def weights(self) -> List[float]:", "def strength(self) -> float:\n ...", "def _learn_node_parameter_var(outputs, weights, inputs):\n var = 0.\n\n \"\"\" YOUR CODE HERE \"\"\"\n temp = 0\n N_observe = outputs.shape[0]\n if inputs is None:\n temp = np.sum((outputs-weights[0])**2)\n else:\n for i in range(N_observe):\n temp += (outputs[i] - (np.sum(weights[1:] * inputs[i]) +weights[0]))**2\n var = temp/N_observe\n\n\n\n \"\"\" END YOUR CODE HERE \"\"\"\n\n return var", "def calculateCost(self,sol,weights):\n\t\treturn sum([x.value*y if x != None else 0 \\\n\t\t\t\t\tfor x,y in zip(sol,weights)])", "def getWeights(self, gameState, action):\n\n return {\n 'successorScore': 1.0\n }", "def total_cost(self):\n return np.einsum('i->', self.c[self.s])", "def perplexity(self, sents):\n # total words seen\n M = 0\n for sent in sents:\n M += len(sent)\n # cross-entropy\n l = 0\n print('Computing Perplexity on {} sents...\\n'.format(len(sents)))\n for sent in sents:\n l += self.sent_log_prob(sent) / M\n return pow(2, -l)", "def costFun(self, S, x):", "def getWeights(self, gameState, action):\n # return {'successorScore': 1.0}\n if self.isOffensive:\n return self.getOffensiveWeights(gameState, action)\n else:\n return self.getDefensiveWeights(gameState, action)", "def _mse(self, weights):\n error = self._input * weights - self._label\n sum_ = 0.0\n for i in range(self._input.shape[0]):\n sum_ += error[i, 0]**2\n return sum_ / self._input.shape[0]", "def _equalize_weights_lsq_pact(self, bn_dict={}, verbose=False, eps=None):\n\n if not bn_dict:\n bn_dict = get_bn_dict_from_supernodes(self)\n\n module_dict = {}\n for n,m in self.named_modules():\n if (m.__class__.__name__ == \"PACT_Conv2d\" or \\\n m.__class__.__name__ == \"PACT_Conv1d\" or \\\n m.__class__.__name__ == \"PACT_Linear\" or \\\n m.__class__.__name__ == \"BatchNorm2d\" or \\\n m.__class__.__name__ == \"BatchNorm1d\" ):\n module_dict[n] = m\n for n_before in bn_dict.keys():\n n_after = bn_dict[n_before]\n m_before = module_dict[n_before]\n m_after = module_dict[n_after]\n if eps is None:\n eps = m_after.eps\n min_before = weight_min(m_before, 0).cpu().detach().numpy()\n max_before = weight_max(m_before, 0).cpu().detach().numpy()\n if verbose:\n logging.info(\"[Equalization by Least Squares] %s: wrange_min=%.5f wrange_max=%.5f\" % (n_before, weight_range(m_before, 0).min().item(), weight_range(m_before, 0).max().item()))\n X = np.vstack((min_before, max_before))\n y = np.asarray((-1,1))\n coeff = torch.zeros(len(min_before), device=m_before.weight.device)\n regr = linear_model.LinearRegression(fit_intercept=False)\n for i in range(len(min_before)):\n regr.fit(X[:,i].reshape((-1,1)), y)\n coeff[i] = torch.as_tensor(regr.coef_[0], device=m_before.weight.device)\n coeff = 1./coeff\n m_before.weight.data[:] = m_before.weight.data[:] / reshape_before(m_before, coeff)\n try:\n m_before.bias.data[:] = m_before.bias.data[:] / coeff\n except AttributeError:\n pass\n m_after.running_mean.data[:] = m_after.running_mean.data[:] / coeff\n m_after.weight.data[:] = m_after.weight.data[:] * reshape_after(m_after, coeff)\n if verbose:\n logging.info(\"[Equalization by Least Squares] %s: wrange_min=%.5f wrange_max=%.5f\" % (n_before, weight_range(m_before, 0).min().item(), weight_range(m_before, 0).max().item()))", "def cost(lossvalues):\n return np.sum(lossvalues ** 2) / (2 * lossvalues.shape[1])", "def storage_operating_costs_rule(_m, y, s):\r\n\r\n return sum(m.C_MC[g, y] * m.p_out[g, y, s, t] for g in m.G_STORAGE for t in m.T)", "def regularization_loss(params: hk.Params) -> jnp.ndarray:\r\n\r\n # L1 Loss\r\n sum_in_layer = lambda p: jnp.sum(jnp.abs(p))\r\n sum_p_layers = [sum_in_layer(p) for p in jax.tree_leaves(params)]\r\n l1_loss = sum(sum_p_layers)\r\n\r\n # L2 Loss\r\n l2_loss = 0.5 * sum(jnp.sum(jnp.square(p)) for p in jax.tree_leaves(params))\r\n\r\n return l2_coef * l2_loss + l1_coef * l1_loss", "def is_sparsity_enabled(cls):\n total,sp100,sp50 = 0,0,0\n for module_name, module, p_name, p, mask, pruned in cls.__sparse_parameters:\n total += 1\n mask_sum = mask.sum()\n mask_numel = mask.numel()\n if mask_sum == mask_numel:\n sp100 += 1\n elif mask_sum*2 == mask_numel:\n sp50 += 1\n\n assert (total == sp100 or total == sp50), \"Inconsistent model sparsity\"\n if total == sp100:\n return False\n elif total == sp50:\n return True", "def objective(self,w):\n l = 0\n for i in range(len(self.x)):\n # Each example contributes log(sigma(y_i * x_i . w))\n l -= log(sigmoid(self.y[i] * np.dot(w, self.x[i,:])))\n # regularisation 1/2 * alpha * ||w||^2\n l += 0.5 * self.alpha * np.dot(w,w)\n return l", "def sparsity(tensor):\n return 1.0 - density(tensor)", "def score(matrix,seq,ns=True):\n #specific_binding = sum([row[base_dict[b]] for row,b in zip(matrix,seq)])\n specific_binding = 0\n for i in xrange(len(matrix)): \n specific_binding += matrix[i][base_dict[seq[i]]]\n if ns:\n return log(exp(-beta*specific_binding) + exp(-beta*ns_binding_const))/-beta\n else:\n return specific_binding", "def mlp_weight_sum(self) -> Tuple[Tensor, Tensor]:\n if self._model:\n return self._model.mlp_weight_sum()\n return torch.tensor([0.0]), torch.tensor([0.0])", "def constraint_sum(w):\n return sum(w) - 1", "def getWeights(self, gameState, action):\n return {'successorScore': 1.0}", "def getWeights(self, gameState, action):\n return {'successorScore': 1.0}", "def getWeights(self, gameState, action):\n return {'successorScore': 1.0}", "def getWeights(self, gameState, action):\n return {'successorScore': 1.0}", "def getWeights(self, gameState, action):\n return {'successorScore': 1.0}", "def getWeights(self, gameState, action):\n return {'successorScore': 1.0}", "def getWeights(self, gameState, action):\n return {'successorScore': 1.0}", "def scenario_cost_rule(_m, y, s):\r\n\r\n return m.OP_T[y, s] + m.OP_H[y, s] + m.OP_W[y, s] + m.OP_S[y, s] + m.OP_Q[y, s] + m.OP_L[y, s]", "def compute_cost(y, tx, w):\n # ***************************************************\n # INSERT YOUR CODE HERE\n # TODO: compute loss by MSE / MAE\n # ***************************************************\n \n # vector e\n e = compute_e(y, tx, w)\n N = compute_N(e)\n L_MSE = np.dot(np.matrix.transpose(e), e)\n L_MSE = L_MSE / (2 * N)\n \n return L_MSE", "def get_expected_cost(self):", "def get_weights_sum(self):\n return self.w_sum", "def weight(self):", "def num_prunable_parameters(self) -> int:\n return sum(l.weight.numel() for l in self.emb_layers) + \\\n sum(weight.numel() for weight in self.emb_projs)", "def num_prunable_parameters(self) -> int:\n return sum(l.weight.numel() for l in self.emb_layers) + \\\n sum(weight.numel() for weight in self.emb_projs)", "def node_impurity(self):\n\t\t\n\t\tgini = 0.0\n\t\ttotal = 0.0\n\n\t\tfor c in range(self.n_classes):\n\t\t\ttmp = self.label_count_total[c]\n\t\t\tgini += tmp * tmp\n\n\t\tgini = 1.0 - gini / (self.weighted_n_node_samples *\n\t\t\t\t\t\t\t self.weighted_n_node_samples)\n\n\t\treturn gini", "def cost(self):\n lg = len(self.guarantees.cnf)\n la = len(self.assumptions.cnf)\n\n \"\"\"heuristic\n Low: guarantees while assuming little (assumption set is bigger)\n High: guarantees while assuming a lot (assumption set is smaller)\"\"\"\n\n return la / lg", "def weights(self):\r\n\t\treturn None", "def compute_stability_scores(self):\n self.mutations, self.scores, self.matrix = stability(\n self.seq,\n alphabet='ACGU',\n fold_vectorize=self.fold_vectorize)", "def _decay(self):\n wd_losses = tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES)\n log.info('Weight decay variables')\n [log.info(x) for x in wd_losses]\n log.info('Total length: {}'.format(len(wd_losses)))\n if len(wd_losses) > 0:\n return tf.add_n(wd_losses)\n else:\n log.warning('No weight decay variables!')\n return 0.0", "def total_sdram_requirements(self):", "def calculate_macs(self) -> None:\n for name, param in self.module.named_parameters():\n if name == \"weight\":\n # ignore N, C when calculate Mult-Adds in ConvNd\n if \"Conv\" in self.class_name:\n self.macs += int(param.nelement() * prod(self.output_size[2:]))\n else:\n self.macs += param.nelement()\n # RNN modules have inner weights such as weight_ih_l0\n elif \"weight\" in name:\n self.macs += param.nelement()", "def get_learning_rate():\n return 0.00001", "def _construct_nll_costs(self, si, xo):\n # average log-likelihood over the refinement sequence\n xh = self.obs_transform(si)\n if self.x_type == 'bernoulli':\n ll_costs = log_prob_bernoulli(xo, xh)\n else:\n ll_costs = log_prob_gaussian2(xo, xh, \\\n log_vars=self.bounded_logvar)\n nll_costs = -ll_costs\n return nll_costs", "def net(input_lst, weight_lst, bias):\r\n net_total = bias\r\n\r\n for node in range(len(input_lst)):\r\n net_total += input_lst[node] * weight_lst[node]\r\n\r\n return net_total", "def overall_reduction(self):\n return 84", "def weight_sum(self) -> Tuple[Tensor, Tensor]:\n if not self._isfit:\n return 0.0, 0.0\n e1_sum = 0.0\n e2_sum = 0.0\n for embedding in self.embedding:\n e1_sum += embedding.weight.abs().sum()\n e2_sum += (embedding.weight ** 2).sum()\n return e1_sum, e2_sum", "def effective_cluster_weights(self):\n weights = np.array(\n [\n np.sum(\n self._subspace.function_ordering_multiplicities[\n self._subspace.function_orbit_ids == i\n ]\n * self.eci[self.eci_orbit_ids == i] ** 2\n )\n for i in range(len(self._subspace.orbits) + 1)\n ]\n )\n return weights", "def num_prunable_parameters(self) -> int:\n return sum(l.weight.numel() for l in self.out_layers) + \\\n sum(weight.numel() for weight in self.out_projs)", "def num_prunable_parameters(self) -> int:\n return sum(l.weight.numel() for l in self.out_layers) + \\\n sum(weight.numel() for weight in self.out_projs)", "def analyze_belief_strength_with_bias(self, G):\r\n n = []\r\n nbs_list = []\r\n for node in G.nodes: #cycles through the nodes of the graph to mine the attributes\r\n n.append(node) #appends each node to a list that will be put into a dictionary\r\n pbs_list = []\r\n og_bs = G.nodes[node]['belief_strength'] #mines the numerical value for a nodes belief strength, from a pre-set node attribute\r\n unc = G.nodes[node]['uncertainty'] #mines the numerical value for a nodes belief uncertainty, from a pre-set node attribute\r\n prob = G.nodes[node]['probability']\r\n for pre in G.predecessors(node):\r\n ew = G.edges[pre, node]['weight'] #mines the numerical value of an edge's weight, from a pre-set edge attribute\r\n pre_bs = G.nodes[pre]['belief_strength'] #mines the numerical value for a predecessors belief strength, from a pre-set node attribute\r\n x = ew * pre_bs #determines how much a node values its neighbor's opinion.\r\n pbs_list.append(x) #puts all values for predecessor belief strangths in a list\r\n if len(pbs_list) == 0:\r\n nbs = og_bs\r\n nbs = int(nbs)\r\n else:\r\n apbs = sum(pbs_list)/len(pbs_list) #calculates the average predecessor belief strength value for a node\r\n if apbs*og_bs > 0:\r\n if apbs > 0:\r\n nbs = min(og_bs + (0.1*prob*unc*apbs), 100)\r\n else:\r\n nbs = max(og_bs + (0.1*prob*unc*apbs), -100)\r\n nbs = int(nbs)\r\n else:\r\n nbs = og_bs\r\n nbs = int(nbs)\r\n nbs_list.append(nbs) #the new belief strengths are appended to a list that will be put into adictionary\r\n change = dict(zip(n, nbs_list)) #creates a dictionary from two lists which stores the nodes as keys and their new belief strengths as values\r\n print(change)\r\n return change #this will be used to update the list in a different function\r", "def weights_cost(self, n, lambda_r=0.0):\n return lambda_r * np.sum(np.sum(w ** 2) for w in self.weights) / (2 * n)", "def get_n_fit_scalars_per_param(self):\n ns = self.n_states\n ne = self.n_emissions\n nf = self.n_features\n nnt = self.nr_no_train_de\n return {\n 's': ns,\n 't': ns * ns,\n 'e': sum(ns * (nf[i] - 1) for i in range(ne - nnt)) if self.state_no_train_de is None else sum(ns * (nf[i] - 1) for i in range(ne - nnt) if i != self.state_no_train_de),\n }", "def nobs(self):\n return self.sum_weights", "def calculate_penalty(self):\n if AT.PENALTY not in self.attributes:\n return (0, 1)\n return self.attributes[AT.PENALTY].calculate(self)", "def get_objective(X_t, xattr, Y_t, s):\n return eloglik(X_t, xattr, Y_t, s) - (s['KL']).sum()", "def get_cost_updates(self):\n\n y = self.get_hidden_values()\n z = self.get_reconstructed_input(y)\n\n L = T.sum((self.x-z)**2, axis=1)\n\n cost = T.mean(L)\n\n return cost", "def sparsity_regularizer(x, sparsity):\n q = tf.reduce_mean(tf.nn.sigmoid(x))\n return -sparsity * tf.log(q) - (1 - sparsity) * tf.log(1 - q)", "def learnign_rate_examples():\n #######\n bad_larning_rate = 0.1\n not_bad_learning_rate = 1e-4\n good_learning_rate = 1e-3\n #######\n return bad_larning_rate, not_bad_learning_rate, good_learning_rate", "def test_PoissonRegression_penalty_elastic_net_ratio(self):\n ratio_1 = 0.6\n ratio_2 = 0.3\n for penalty in PoissonRegression._penalties.keys():\n if penalty == 'elasticnet':\n learner = PoissonRegression(penalty=penalty, C=self.float_1,\n elastic_net_ratio=ratio_1)\n self.assertEqual(learner.C, self.float_1)\n self.assertEqual(learner.elastic_net_ratio, ratio_1)\n self.assertEqual(learner._prox_obj.strength, 1. / self.float_1)\n self.assertEqual(learner._prox_obj.ratio, ratio_1)\n\n learner.elastic_net_ratio = ratio_2\n self.assertEqual(learner.C, self.float_1)\n self.assertEqual(learner.elastic_net_ratio, ratio_2)\n self.assertEqual(learner._prox_obj.ratio, ratio_2)\n\n else:\n msg = '^Penalty \"%s\" has no elastic_net_ratio attribute$$' % \\\n penalty\n with self.assertWarnsRegex(RuntimeWarning, msg):\n if penalty == 'binarsity':\n PoissonRegression(penalty=penalty,\n elastic_net_ratio=0.8,\n blocks_start=[0], blocks_length=[1])\n else:\n PoissonRegression(penalty=penalty,\n elastic_net_ratio=0.8)\n\n if penalty == 'binarsity':\n learner = PoissonRegression(\n penalty=penalty, blocks_start=[0], blocks_length=[1])\n else:\n learner = PoissonRegression(penalty=penalty)\n\n with self.assertWarnsRegex(RuntimeWarning, msg):\n learner.elastic_net_ratio = ratio_1", "def UpdateS1(i):\n Sum = 0.0\n for j in range(q):\n Sum1 = Kernel(i, int(WorkingSet[j,0]))\n Sum = Sum + (Difference[j])*y_train[int(WorkingSet[j,0])]*Sum1\n s1[i] = s1[i] + Sum\n return s1[i]", "def calc_weight(sequence):\r\n return len(sequence) * AVG_WEIGHT", "def get_layerwise_manipulation_strength(num_layers, truncation_psi, truncation_layers):\n strength = [1.0 for _ in range(num_layers)]\n pdb.set_trace()\n\n if truncation_layers > 0:\n for layer_idx in range(0, truncation_layers):\n strength[layer_idx] = truncation_psi\n return strength", "def netasset(self,coef):\n net_total = sum([share.sharetotal() for share in self.shares])*(1-coef)\n return net_total", "def flexibility(self):\n self._flexibility = 0.25 * self.DAM - 0.25 * self.DCC + 0.5 * self.MOA + 0.5 * self.NOP\n return round(self._flexibility, 5)", "def __init__(self, S=7, B=2, C=20): \n super().__init__()\n self.mse = nn.MSELoss(reduction=\"sum\")\n self.S = S\n self.B = B\n self.C = C\n self.l_noobl = 0.5\n self.l_coord = 5", "def premium(self):\n premium = 0\n for weight, mean, std in zip(self.weights, self.means, self.stds):\n shift = (self.data['riskfree'] - mean) * self.data['maturity']\n moneyness = np.array(self.data['moneyness']) + shift\n premium += weight * blackscholes_norm(moneyness,\n self.data['maturity'],\n std, self.data['call'])\n return premium", "def __get_net_probs(self):\n return np.array([node.value for node in self.net]).reshape(5,5)", "def calc_is_weight(self, nodes_value):\n beta = self.beta.step()\n nodes_value = torch.tensor(nodes_value)\n sample_probabilities = nodes_value / self.sum_tree.top_node.value\n weights = ((1 / (len(self) * sample_probabilities.to(self.device))) ** beta)\n weights /= weights.max()\n return weights", "def prob_model_no_attacker(SFTnet, data, T):\n total_prob = 0\n for node in SFTnet.nodes:\n # For each node\n for rec in node.sends_to:\n # For each possible receiver\n normal_ix = node.states.index('normal')\n clean_ix = node.messages.index('clean')\n rate = node.rates[rec][normal_ix, clean_ix]\n num_sent = np.sum((data[2] == node.name) * (data[3] == rec))\n logprob = -rate * T + num_sent * (np.log(rate * T)) \\\n - np.sum(np.log(np.arange(1, num_sent + 1, 1)))\n total_prob += logprob\n return total_prob", "def activation(self, weighted_sum):\n return 0 if (weighted_sum + self.biases) < 0 else 1", "def perplexity(self):\n raise NotImplementedError(\"To be implemented\")", "def prediction_cost(a, y):\n return np.sum((a - y) ** 2) / 2", "def _compute_sensitivities(self, context):\n _logger.info(\"calling _compute_sensitivities.\")\n cached_id = np.random.randint(1000)\n if self.start_epoch == context.epoch_id:\n sensitivities_file = self.sensitivities_file\n else:\n sensitivities_file = self.sensitivities_file + \".epoch\" + str(\n context.epoch_id)\n sensitivities = self._load_sensitivities(sensitivities_file)\n\n for param in context.eval_graph.all_parameters():\n if not re.match(self.pruned_params, param.name()):\n continue\n if param.name() not in sensitivities:\n sensitivities[param.name()] = {\n 'pruned_percent': [],\n 'loss': [],\n 'size': param.shape()[0]\n }\n\n metric = None\n\n for param in sensitivities.keys():\n ratio = self.delta_rate\n while ratio < 1:\n ratio = round(ratio, 2)\n if ratio in sensitivities[param]['pruned_percent']:\n _logger.debug('{}, {} has computed.'.format(param, ratio))\n ratio += self.delta_rate\n continue\n if metric is None:\n metric = self._eval_graph(context, self.eval_rate,\n cached_id)\n\n param_backup = {}\n # prune parameter by ratio\n self._prune_parameters(\n context.eval_graph,\n context.scope, [param], [ratio],\n context.place,\n lazy=True,\n param_backup=param_backup)\n self.pruned_list[0]\n # get accuracy after pruning and update self.sensitivities\n pruned_metric = self._eval_graph(context, self.eval_rate,\n cached_id)\n loss = metric - pruned_metric\n _logger.info(\"pruned param: {}; {}; loss={}\".format(\n param, ratio, loss))\n for brother in self.pruned_list[0]:\n if re.match(self.pruned_params, brother):\n if brother not in sensitivities:\n sensitivities[brother] = {\n 'pruned_percent': [],\n 'loss': []\n }\n sensitivities[brother]['pruned_percent'].append(ratio)\n sensitivities[brother]['loss'].append(loss)\n\n self._save_sensitivities(sensitivities, sensitivities_file)\n\n # restore pruned parameters\n for param_name in param_backup.keys():\n param_t = context.scope.find_var(param_name).get_tensor()\n param_t.set(self.param_backup[param_name], context.place)\n\n# pruned_metric = self._eval_graph(context)\n\n ratio += self.delta_rate\n return sensitivities", "def learn(NW, y):\n\n # no. of networks\n K = len(NW)\n\n # combine networks, uniform weighting\n nu = ones(K) * 1. / K\n W = nu[0] * NW[0]\n for i in arange(1, K): W = W + nu[i] * NW[i]\n del NW\n\n auc = zeros(constants.NTRIALS)\n\n # regularization parameters\n cparam = 2 ** array([-14., -12., -10., -8., -6., -4. - 2., -1., 0., 1., 2., 4., 6., 8.])\n\n for trial in arange(constants.NTRIALS):\n seed(trial)\n m = W.shape[0]\n\n # split data set\n pids = where(y == 1)[0]\n npids = len(pids)\n pids = pids[permutation(npids)]\n nids = where(y != 1)[0]\n nnids = len(nids)\n nids = nids[permutation(nnids)]\n tr_pids, val_pids, te_pids = pids[0:3 * npids / 5], pids[3 * npids / 5:4 * npids / 5], pids[4 * npids / 5:]\n tr_nids, val_nids, te_nids = nids[0:3 * nnids / 5], nids[3 * nnids / 5:4 * nnids / 5], nids[4 * nnids / 5:]\n\n trids = hstack((tr_pids, tr_nids))\n valids = hstack((val_pids, val_nids))\n teids = hstack((te_pids, te_nids))\n\n tr_y = zeros(m)\n tr_y[trids] = y[trids]\n pids = where(tr_y == 1)[0]\n npids = len(pids)\n nids = where(tr_y == -1)[0]\n nnids = len(nids)\n tr_y[valids] = (npids - nnids) * 1. / (npids + nnids)\n tr_y[teids] = (npids - nnids) * 1. / (npids + nnids)\n\n rmse = []\n for c in cparam:\n f = lprop(W, tr_y, c)\n rmse.append(sum((f[valids] - y[valids]) ** 2))\n bparam = cparam[argmin(rmse)]\n\n # retrain with training + validation set\n tr_y = zeros(m)\n trids = hstack((tr_pids, tr_nids, val_pids, val_nids))\n (tr_y)[trids] = (y)[trids]\n pids = where(tr_y == 1)[0];\n npids = len(pids);\n nids = where(tr_y == -1)[0];\n nnids = len(nids);\n (tr_y)[teids] = (npids - nnids) * 1. / (npids + nnids)\n f = lprop(W, tr_y, bparam)\n\n auc[trial] = util.auc((f)[teids], (y)[teids])\n\n return auc", "def get_S_r(self):\n\n S_r = np.sum((self.eta_model - self.eta_exp) ** 2.)\n\n return S_r" ]
[ "0.67767024", "0.62359196", "0.6125899", "0.60773647", "0.5991481", "0.596333", "0.5899081", "0.58852255", "0.5814007", "0.57617986", "0.5758206", "0.57509667", "0.57433456", "0.57304865", "0.56787086", "0.56762224", "0.5659587", "0.5657149", "0.56555104", "0.5648981", "0.5642741", "0.5632752", "0.56002814", "0.5582769", "0.5571518", "0.55609745", "0.5560528", "0.55443484", "0.5533882", "0.55334055", "0.5514972", "0.55128855", "0.55068374", "0.5506439", "0.5503253", "0.5486107", "0.5479212", "0.5473713", "0.5453781", "0.5453198", "0.54527813", "0.54464763", "0.54374844", "0.543631", "0.5435215", "0.54332066", "0.54332066", "0.54332066", "0.54332066", "0.54332066", "0.54332066", "0.54332066", "0.542357", "0.54085976", "0.5405523", "0.5399018", "0.5393162", "0.5390601", "0.5390601", "0.5389439", "0.538149", "0.53730184", "0.5345857", "0.5343963", "0.5338312", "0.5336722", "0.5336081", "0.5335663", "0.5320322", "0.53142595", "0.5312361", "0.5310966", "0.53095466", "0.53095466", "0.5306043", "0.530047", "0.5296003", "0.5295024", "0.5285725", "0.52767", "0.5266672", "0.5263796", "0.52631986", "0.5252579", "0.5247171", "0.52334046", "0.5232983", "0.5232611", "0.5229478", "0.52285963", "0.5227267", "0.5225968", "0.5224863", "0.52233577", "0.5222765", "0.52216434", "0.52167743", "0.5212753", "0.5212446", "0.52110237" ]
0.6444512
1
returns the sum of squared spectral norms of network parameters (i.e. the sum of the largest Eigenvalues of dot(W, W.T))
def get_spectral_penalty(nnet, include_bias=False): penalty = 0 for layer in nnet.trainable_layers: if not layer.issvm: eigenvalues, eigvec = T.nlinalg.eigh(T.dot(layer.W, layer.W.T)) eig_max = T.max(eigenvalues) penalty = penalty + eig_max if include_bias: for layer in nnet.trainable_layers: if (not layer.issvm) and (layer.b is not None): penalty = penalty + T.sum(abs(layer.b) ** 2) return T.cast(penalty, dtype='floatX')
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def normsq(self):\n return sum(x**2 for x in self.data)", "def sumsquares(self):\n return np.dot((self.demeaned ** 2).T, self.weights)", "def normsq(self):\n return abs(sum(self._ar * self._ar))", "def spectral_norm_parallel(self):\n weights = {}\n for l in self.all_conv_layers:\n weight = l.weight_normalized\n weight_mat = weight.view(weight.size(0), -1)\n if weight_mat.shape not in weights:\n weights[weight_mat.shape] = []\n weights[weight_mat.shape].append(weight_mat)\n loss = 0\n for i in weights:\n weights[i] = torch.stack(weights[i], dim=0)\n with torch.no_grad():\n num_iter = self.num_power_iter\n if i not in self.sr_u:\n num_w, row, col = weights[i].shape\n self.sr_u[i] = F.normalize(torch.ones(num_w, row).normal_(0, 1), dim=1, eps=0.001)\n self.sr_v[i] = F.normalize(torch.ones(num_w, col).normal_(0, 1), dim=1, eps=0.001)\n num_iter = 10 * self.num_power_iter\n for j in range(num_iter):\n self.sr_v[i] = F.normalize(torch.matmul(self.sr_u[i].unsqueeze(1), weights[i]).squeeze(1), dim=1, eps=0.001)\n self.sr_u[i] = F.normalize(torch.matmul(weights[i], self.sr_v[i].unsqueeze(2)).squeeze(2), dim=1, eps=0.001)\n sigma = torch.matmul(self.sr_u[i].unsqueeze(1), torch.matmul(weights[i], self.sr_v[i].unsqueeze(2)))\n loss += torch.sum(sigma)\n return loss", "def norm(self):\n mag_squared = self._sum_of_squares()\n return sqrt(mag_squared)", "def get_weight_norms(parameters: List[Parameter]) -> np.ndarray:\r\n with torch.no_grad():\r\n norms = torch.cat([param.abs().flatten() for param in parameters])\r\n return norms.numpy()", "def weight_norm(W, s):\n _eps = numpy_floatX(1e-5)\n W_norms = tensor.sqrt((W * W).sum(axis=0, keepdims=True) + _eps)\n W_norms_s = W_norms * s # do this first to ensure proper broadcasting\n return W / W_norms_s", "def l2(weights):\n\treturn np.sqrt(np.sum(weights * weights))", "def operator_one_norm(W):\n return torch.max(torch.sum(torch.abs(W), dim=(0, 2, 3)))", "def nSensParams(self):\n sum = 0\n for r in self._reactors:\n sum += r.nSensParams()\n return sum", "def norm(self):\n\t\treturn np.sqrt(self.normSq())", "def compute_gradient_norm(parameters: Iterator):\n total_norm = 0.\n for p in parameters:\n # Possibly add: if hasattr(p, 'grad'):\n param_norm = p.grad.norm(2)\n total_norm += param_norm.item() ** 2\n total_norm **= 1. / 2\n return total_norm", "def normSq(self):\n\t\treturn self.x*self.x+self.y*self.y", "def get_weights_sum(self):\n return self.w_sum", "def total_sum_of_squares(y: np.ndarray) -> float:\n return np.sum(np.square(y - np.linalg.norm(y)))", "def weightednorm(matrix, weights):\n # Unpack the parameters\n wi, wj = weights\n\n # Initialize a list that will be called later to obtain the maximum value\n ivalues = []\n\n try:\n num_rows,num_columns = matrix.shape\n for i in range(num_columns):\n matrixcol = [np.abs(j)*wj for j in matrix[:,i]]\n columnsum = np.sum(matrixcol)\n ivalues.append(columnsum)\n return np.max(ivalues) / wi\n except ValueError:\n matrixcol = [np.abs(j)*wj for j in matrix]\n return np.sum(matrixcol)/wi", "def norm_sqr(x):\n return inner_prod(x, x)[0]", "def norm(self):\n norm = self.scalarProduct(self) ** 0.5\n return norm", "def _ss_tot(self):\n squares = np.square(self.y - np.expand_dims(self._ybar, axis=-2))\n if self.w is None:\n return np.sum(squares, axis=-2)\n else:\n return np.sum(np.matmul(self.w_diag, squares), axis=-2)", "def norm(self):\n return sqrt(self.dot(self))", "def norm(self):\n C = np.prod([F.T @ F for F in self.factors], axis=0)\n return np.sqrt(np.sum(C))", "def get_norms(self):\n l1_sum = 0\n l2_sum = 0\n actives = 0\n for lbl in self.labels:\n for fid in self.w[lbl]:\n # apply and remaing L1 penalities at the end of training.\n alpha = self.s - self.lastW[lbl].get(fid,0)\n self.w[lbl][fid] = self.w[lbl].get(fid, 0) - alpha\n weight = self.w[lbl][fid]\n l1_sum += weight if weight > 0 else -weight\n l2_sum += weight * weight\n if weight != 0:\n actives += 1\n l2_sum = math.sqrt(l2_sum)\n return (l1_sum,l2_sum,actives)", "def norm(self):\n return math.sqrt(sum([x*x for x in self.mV]))", "def norm(self) -> float:\n return self.squared_norm()**0.5", "def norm(self):\n return math.sqrt(self.dotProduct(self))", "def squared_norm(self) -> float:\n return self.__real**2 + self.__img[0]**2 + self.__img[1]**2 + self.__img[2]**2", "def root_sum_of_squares(data, dim=0):\n return torch.sqrt((data ** 2).sum(dim))", "def root_sum_of_squares(data, dim=0):\n return torch.sqrt((data ** 2).sum(dim))", "def _compute_weights(self):\n with variable_scope.variable_scope('compute_weights'):\n self.layer.W = nn_impl.l2_normalize(\n self.layer.v, axis=self.norm_axes) * self.layer.g", "def _compute_weights(self):\n with variable_scope.variable_scope('compute_weights'):\n self.layer.W = nn_impl.l2_normalize(\n self.layer.v, axis=self.norm_axes) * self.layer.g", "def norm(x):\r\n return sqrt(np.numerical.sum(x**2))", "def norm(x):\n return inner_prod(x, x)[0].sqrt_()", "def weight_sum(self) -> Tuple[Tensor, Tensor]:\n if not self._isfit:\n return 0.0, 0.0\n e1_sum = 0.0\n e2_sum = 0.0\n for embedding in self.embedding:\n e1_sum += embedding.weight.abs().sum()\n e2_sum += (embedding.weight ** 2).sum()\n return e1_sum, e2_sum", "def update_weights_sum(self):\n vals = self.nn.get_param_values()\n # only use the last layer for summation (w, b)\n self.w_sum = np.sum(vals[-2]) + np.sum(vals[-1])", "def Norm(self):\n \n return sqrt(sum([sum(abs(x)**2) for x in self.__ObjList]))", "def net_worth(self) -> float:\n return self._net_worth", "def norm(self):\n\t\treturn math.sqrt(self.norm2())", "def norm(self):\n return np.sqrt(np.dot(self._data, self._data))", "def root_sum_of_squares_complex(data, dim=0):\n return torch.sqrt(complex_abs_sq(data).sum(dim))", "def sqnorm(self, d):\n ###TODO\n total = 0.0\n for i in d:\n total = total + (d[i] * d[i])\n return total", "def squared_distance(v: Vector, w: Vector) -> float:\n return sum_of_squares(subtract(v, w))", "def norm(x):\n return np.sqrt(np.sum(x ** 2))", "def rmse(self):\n lam = self.lam()\n weights = lam / lam.sum()\n weighted_var = self.var() * weights\n rmse = np.sqrt(weighted_var.sum())\n return rmse", "def wsum(self):\n return reduce(operator.add, self.wvalues, 0.0)", "def neighbors_magnitude(self):\n return sum((n.weight for n in self.neighbors))", "def wt_std(x, w):\n xw = np.average(x, weights=w) # weighted average\n Nnz = np.count_nonzero(x) # number of non-zero weights\n vw = np.average(np.square(x-xw), weights=w)*Nnz/(Nnz-1) # weighted variance\n return m.sqrt(vw)", "def __neff(self):\n return 1. / np.sum(np.square(self.weights))", "def wstd(values: np.ndarray, weights: np.ndarray,\n axis=None, keepdim=False) -> np.ndarray:\n sum_wt = weights.sum(axis=axis, keepdims=True)\n avg = (values * weights).sum(axis=axis, keepdims=True) / sum_wt\n var = ((values - avg) ** 2 * weights).sum(axis=axis, keepdims=True) / sum_wt\n if not keepdim:\n var = np.squeeze(var, axis=axis)\n return np.sqrt(var)", "def wstd(values: np.ndarray, weights: np.ndarray,\n axis=None, keepdim=False) -> np.ndarray:\n sum_wt = weights.sum(axis=axis, keepdims=True)\n avg = (values * weights).sum(axis=axis, keepdims=True) / sum_wt\n var = ((values - avg) ** 2 * weights).sum(axis=axis, keepdims=True) / sum_wt\n if not keepdim:\n var = np.squeeze(var, axis=axis)\n return np.sqrt(var)", "def max_norm() -> float:\n return 1.0", "def model_norm(self, order=2) -> float:\n # L-n norm of model where we treat the model as a flat other\n return math.pow(sum([\n torch.pow(layer, order).sum().item()\n for layer in self.parameters\n ]), 1.0 / order)", "def _norm_along_last_axis(x):\n return np.sqrt(np.sum(np.square(x), axis=x.ndim - 1))", "def normkernel(S, T, n):\n\n k1 = kernel(S, S, n)\n k2 = kernel(T, T, n)\n res = kernel(S, T, n) / sqrt(k1 * k2)\n\n return res", "def sum_of_squares(v: Vector) -> float:\n return dot(v,v)", "def weighted_sum(self):\n return sum(self.wvalues)", "def sum_of_squares(v: Vector) -> float:\n return dot(v, v)", "def sum_of_squares(v: Vector) -> float:\n return dot(v, v)", "def sum_of_squares(v: Vector) -> float:\n return dot(v, v)", "def norm(u, w, a, b):\n u_1, u_2 = u\n return sqrt(dot((u_1, u_2), (u_1, u_2), w, a, b))", "def sum(self):\n return np.dot(self.data.T, self.weights)", "def normalize_weights(self, w):\n n = w.astype(np.float64, copy=True)\n c = float(np.sum(w))\n n /= c\n return n", "def sqnorm(v):\n res = 0\n for elt in v:\n for coef in elt:\n res += coef ** 2\n return res", "def norm(self) -> float:\n return numpy.linalg.norm(self.coeff)", "def spm_wnorm(A):\n\n A = A + 1e-16\n\n norm = np.divide(1.0, np.sum(A, axis=0))\n\n avg = np.divide(1.0, A)\n\n wA = norm - avg\n\n return wA", "def get_norm(x):\n return np.sqrt(np.sum(np.square(x)))", "def L2norm(m):\n return np.sqrt(np.sum(m**2))", "def norm(self):\n return numpy.linalg.norm(self.values)", "def _l2s(self, params):\n return [np.linalg.norm(param) for param in params]", "def norm(self):\n return np.linalg.norm(self.values)", "def overall_sensitivity(self):\n if self.mod1:\n s = torch.max(torch.max(self.weight, -1)[0], -1)[0].item()\n else:\n s = torch.max(torch.sqrt(torch.sum(self.weight * self.weight, -1)))[0].item()\n s *= np.sqrt(2. / np.e)\n return s", "def norm(self):", "def l2_norm(params):\n flattened, _ = flatten(params)\n return np.dot(flattened, flattened)", "def w(self) -> float:\n return self.A[0] if self.scalar_vector else self.A[3]", "def calc_uniformity(self):\n if len(self.neighbors) != 0:\n D_ij = [np.linalg.norm(self.get_vec_to_other(neigh)) for neigh in self.neighbors]\n M_ij = np.sum(D_ij)/len(D_ij)\n K_i = len(self.neighbors)\n \n U_i = np.sqrt((1/K_i) * np.sum([(d - M_ij)**2 for d in D_ij]))\n return U_i\n else:\n return 0", "def norm(self, N=2):\n norms = self.client.map(_call_norm, self.vecDask, N=N, pure=False)\n norm = 0.0\n for future, result in daskD.as_completed(norms, with_results=True):\n norm += np.power(np.float64(result), N)\n return np.power(norm, 1. / N)", "def get_rsq_y(self):\n\n return np.matmul(self.beta_z.T, self.sigma_zw)", "def weight_update_adagrad(self, network):\n epsilon = 10e-8\n\n if self.ms_b is None or self.ms_q is None:\n self.ms_b = []\n self.ms_q = []\n self.ms_rx_inp = []\n self.ms_ry_inp = []\n self.ms_rx_pos_out = []\n self.ms_ry_pos_out = []\n self.ms_rx_neg_out = []\n self.ms_ry_neg_out = []\n for l, layer in enumerate(network.layers):\n self.ms_b.append(np.zeros(layer.b.shape))\n self.ms_q.append(np.zeros(layer.q.shape))\n self.ms_rx_inp.append(np.zeros(layer.input_size))\n self.ms_ry_inp.append(np.zeros(layer.input_size))\n self.ms_rx_pos_out.append(np.zeros(layer.output_size))\n self.ms_ry_pos_out.append(np.zeros(layer.output_size))\n self.ms_rx_neg_out.append(np.zeros(layer.output_size))\n self.ms_ry_neg_out.append(np.zeros(layer.output_size))\n\n for l, layer in enumerate(network.layers):\n self.ms_b[l] += self.dc_db[l] ** 2\n self.ms_q[l] += self.dc_dq[l] ** 2\n\n self.ms_rx_inp[l] += self.dc_drx_inp[l] ** 2\n self.ms_ry_inp[l] += self.dc_dry_inp[l] ** 2\n\n self.ms_rx_pos_out[l] += self.dc_drx_pos_out[l] ** 2\n self.ms_ry_pos_out[l] += self.dc_dry_pos_out[l] ** 2\n self.ms_rx_neg_out[l] += self.dc_drx_neg_out[l] ** 2\n self.ms_ry_neg_out[l] += self.dc_dry_neg_out[l] ** 2\n\n layer.b += -self.alpha * self.dc_db[l] / np.sqrt(self.ms_b[l] + epsilon)\n layer.q += -self.alpha * self.dc_dq[l] / np.sqrt(self.ms_q[l] + epsilon)\n\n layer.rx_inp += -self.alpha * self.dc_drx_inp[l] / np.sqrt(self.ms_rx_inp[l] + epsilon)\n layer.ry_inp += -self.alpha * self.dc_dry_inp[l] / np.sqrt(self.ms_ry_inp[l] + epsilon)\n\n layer.rx_pos_out += -self.alpha * self.dc_drx_pos_out[l] / np.sqrt(self.ms_rx_pos_out[l] + epsilon)\n layer.ry_pos_out += -self.alpha * self.dc_dry_pos_out[l] / np.sqrt(self.ms_ry_pos_out[l] + epsilon)\n layer.rx_neg_out += -self.alpha * self.dc_drx_neg_out[l] / np.sqrt(self.ms_rx_neg_out[l] + epsilon)\n layer.ry_neg_out += -self.alpha * self.dc_dry_neg_out[l] / np.sqrt(self.ms_ry_neg_out[l] + epsilon)", "def computeW(self):\n E = np.where(self.v > 0, 1, -1)\n # theshold the connections to only -1,1\n binary_weights = np.where(self.c > 0, 1, self.c)\n binary_weights = np.where(binary_weights < 0, -1, binary_weights)\n W = np.sum(binary_weights * np.dot(E.reshape(-1,1), E.reshape(1,-1))) # W = C * E * E\n self.W = W\n if np.sum(binary_weights) != 0:\n self.W = self.W / np.sum(binary_weights) # W / W*\n return self.W", "def norm(self):\n return np.linalg.norm(self.ravel())", "def norm_var(self):\n return np.sqrt(self.var)", "def squared_norm(self, x_tensors=None):\n if x_tensors is None:\n x_tensors = self.x_tensors()\n\n return numpy.sum([squared_L2_norm(t) for t in x_tensors])", "def _rsq(self):\n return self._ss_reg / self._ss_tot", "def var(self):\n return self.sumsquares / (self.sum_weights - self.ddof)", "def sqr_mag(self) -> float:\n return sum(a**2 for a in self)", "def worddist(self):\n #return (self.n_z_t + self.beta) / (self.n_z[:, numpy.newaxis] + self.V * self.beta), len(self.n_z)\n return (self.n_z_t + self.n_w_d + self.beta) / (self.n_z[:, numpy.newaxis] + self.n_w[:, numpy.newaxis] + self.V * self.beta), len(self.n_z)", "def SquareClusteringCoefficient(graph):\n coef = np.mean(list(nx.square_clustering(graph).values()))\n return coef", "def norm(X, eps=1e-8, keepdims=True):\n return X / (np.sqrt(np.sum(X**2, axis=1, keepdims=keepdims)) + eps)", "def vecnorm(*axes):\n vecsum = 0\n for axis in axes:\n vecsum += axis**2\n return np.sqrt(vecsum)", "def residualNorm(self):\n return math.sqrt(self.residualNorm2())", "def d_weights(n):\n i = np.arange(1, n)\n return np.sqrt(n / (i * (n - i)))", "def sum_of_squares(x):\r\n return dot(x, x)", "def get_residual_sum_of_squares(input_features, output, w):\n X = get_test_data (input_features)\n y = get_test_label (output)\n \n # Building Xbar\n one = np.ones ((X.shape[0], 1))\n Xbar = np.concatenate ((one, X), axis = 1)\n\n RSS = sum ((y - np.dot (Xbar, w)) ** 2)\n return RSS", "def getSizeOfWeightVector(layerNeurons):\r\n return np.sum((np.array(layerNeurons[:-1])+1)*layerNeurons[1:])", "def norm(vector):\r\n result = 0\r\n # initial value for the result of this function\r\n for z in range(len(vector)):\r\n # this loop will continue as long as there are more values in the list \r\n result += vector[z]**2\r\n result = result**.5\r\n # The two equations above find the sum of the squares and then the square root of the squares\r\n return result", "def normBySum(vector):\n\treturn np.divide(vector,float(sum(vector)))", "def norma(self):\n return (self.pi(self))**(0.5)", "def NormU(x):\n return sum(0.5*x**2)", "def getNorm(self, norm=lambda l: (sum(map(lambda x: x ** 2, l))) ** (1 / 2)):\n return norm(self.components)", "def sqrt_sum_wis(\n wavelength: Union[Quantity, ndarray],\n flux: Union[Quantity, ndarray],\n mask: Optional[Union[Quantity, ndarray]] = None,\n grad: bool = True,\n) -> Union[float, Quantity]:\n if mask is None:\n # Don't use np.ones_like() as it will take units of a Quantity.\n mask = np.ones(len(wavelength))\n\n mask_check(mask)\n\n pixel_wis = pixel_weights(wavelength, flux, grad=grad)\n\n # Apply masking function\n if grad:\n masked_wis = pixel_wis * mask\n else:\n masked_wis = pixel_wis * mask[:-1]\n\n sqrt_sum = np.sqrt(np.nansum(masked_wis))\n if not np.isfinite(sqrt_sum):\n warnings.warn(\"Weight sum is not finite = {}\".format(sqrt_sum))\n if sqrt_sum == 0:\n warnings.warn(\n \"Sum of weights sum is = {}. This will cause infinite errors.\".format(\n sqrt_sum\n )\n )\n return sqrt_sum", "def frobeniusNorm(X):\n accum = 0\n V = np.reshape(X,X.size)\n for i in xrange(V.size):\n accum += abs(V[i] ** 2)\n return np.sqrt(accum)" ]
[ "0.7140227", "0.6941976", "0.67742", "0.65775263", "0.64593214", "0.6404113", "0.63714224", "0.63577896", "0.63157535", "0.62651175", "0.623166", "0.6196198", "0.61877424", "0.6176353", "0.6174241", "0.61738396", "0.61442685", "0.6121031", "0.60918915", "0.6082494", "0.60773396", "0.6068501", "0.60515976", "0.6048041", "0.6044287", "0.6029186", "0.6020982", "0.6020982", "0.60003394", "0.60003394", "0.59807116", "0.59553087", "0.59288824", "0.5925381", "0.59034383", "0.58967924", "0.5871506", "0.5859013", "0.5855298", "0.584905", "0.5841304", "0.58379465", "0.58308333", "0.5825168", "0.5807679", "0.57958776", "0.5787729", "0.57843703", "0.57843703", "0.57701653", "0.57632875", "0.5751447", "0.5719208", "0.57183564", "0.5714785", "0.5697545", "0.5697545", "0.5697545", "0.56968135", "0.5693262", "0.5689384", "0.5677865", "0.5661229", "0.5659081", "0.5651457", "0.56448275", "0.56414485", "0.5639878", "0.563545", "0.5630351", "0.56292784", "0.5622362", "0.5617428", "0.56068224", "0.559737", "0.55922306", "0.5589421", "0.55884886", "0.5578289", "0.557796", "0.5577348", "0.5565489", "0.55639875", "0.55576795", "0.5554196", "0.5541558", "0.55400634", "0.55372196", "0.5534635", "0.55300575", "0.55297756", "0.5527224", "0.5525526", "0.5517968", "0.5512833", "0.55084574", "0.5506061", "0.5505444", "0.55004704", "0.55001247" ]
0.5524715
93
returns the l2 penalty on (trainable) network parameters combined as tensor product.
def get_prod_penalty(nnet): assert Cfg.ocsvm_loss is True penalty = 0 layers = nnet.trainable_layers num_layers = len(layers) - 1 # do not regularize parameters of oc-svm layer assert num_layers > 0 W_norm_prod = 1.0 if layers[num_layers-1].b is not None: penalty += T.sum(layers[num_layers-1].b ** 2) for i in range(num_layers-1): W_norm_prod *= T.sum(layers[num_layers-1-i].W ** 2) if layers[num_layers-2-i].b is not None: penalty += W_norm_prod * T.sum(layers[num_layers-2-i].b ** 2) W_norm_prod *= T.sum(layers[0].W ** 2) penalty += W_norm_prod penalty *= T.sum(nnet.ocsvm_layer.W ** 2) return penalty
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _get_l2_reg(self) -> torch.Tensor:\n loss = 0\n for param in self.model.parameters():\n loss += (param ** 2).sum()\n return loss", "def l2_regularization_penalty(self):\n return self.l2 * (np.linalg.norm(self.weights)**2)", "def l2_training_penalty(batched_out: base.Output):\n if isinstance(batched_out, base.OutputWithPrior):\n return 0.5 * jnp.mean(jnp.square(batched_out.train))\n else:\n logging.warning('L2 weight penalty only works for OutputWithPrior.')\n return 0.", "def get_l2_penalty(nnet, include_bias=False, pow=2):\n\n l2_penalty = 0\n\n # do not include OC-SVM layer in regularization\n if Cfg.ocsvm_loss:\n if include_bias:\n for layer in nnet.trainable_layers:\n if not layer.issvm:\n if layer.b is not None:\n l2_penalty = (l2_penalty + T.sum(abs(layer.W) ** pow)\n + T.sum(abs(layer.b) ** pow))\n else:\n l2_penalty = l2_penalty + T.sum(abs(layer.W) ** pow)\n else:\n for layer in nnet.trainable_layers:\n if not layer.issvm:\n l2_penalty = l2_penalty + T.sum(abs(layer.W) ** pow)\n else:\n if include_bias:\n for layer in nnet.trainable_layers:\n if layer.b is not None:\n l2_penalty = (l2_penalty + T.sum(abs(layer.W) ** pow)\n + T.sum(abs(layer.b) ** pow))\n else:\n l2_penalty = l2_penalty + T.sum(abs(layer.W) ** pow)\n else:\n for layer in nnet.trainable_layers:\n l2_penalty = l2_penalty + T.sum(abs(layer.W) ** pow)\n\n return T.cast(l2_penalty, dtype='floatX')", "def l2(parameter, bias=None, reg=0.01, lr=0.1):\n \n if bias is not None:\n w_and_b = torch.cat((parameter, bias.unfold(0,1,1)),1)\n else:\n w_and_b = parameter\n L2 = reg # lambda: regularization strength\n Norm = (lr*L2/w_and_b.norm(2))\n if Norm.is_cuda:\n ones_w = torch.ones(parameter.size(), device=torch.device(\"cuda\"))\n else:\n ones_w = torch.ones(parameter.size(), device=torch.device(\"cpu\"))\n l2T = 1.0 - torch.min(ones_w, Norm)\n update = (parameter*l2T) \n parameter.data = update\n # Update bias\n if bias is not None:\n if Norm.is_cuda:\n ones_b = torch.ones(bias.size(), device=torch.device(\"cuda\"))\n else:\n ones_b = torch.ones(bias.size(), device=torch.device(\"cpu\"))\n l2T = 1.0 - torch.min(ones_b, bias)\n update_b = (bias*l2T)\n bias.data = update_b", "def l2_reg_cost(cost, lambtha, weights, L, m):\n w_norm = 0\n for i in range(1, L + 1):\n w_norm += np.linalg.norm(weights['W' + str(i)])\n L2 = cost + (lambtha / (2 * m) * w_norm)\n return L2", "def l2_reg_cost(cost, lambtha, weights, L, m):\n sumWeights = 0\n for i in range(1, L + 1):\n sumWeights += np.linalg.norm(weights['W' + str(i)])\n return cost + sumWeights * lambtha / (2 * m)", "def l2_reg_cost(cost, lambtha, weights, L, m):\n enorm = 0\n for i in range(1, L + 1):\n layer = 'W{}'.format(i)\n enorm += np.linalg.norm(weights[layer])\n return cost + (lambtha / (2 * m)) * enorm", "def get_cost(self, y_enc, output, w1, w2):\n cost = - np.sum(y_enc*np.log(output))\n # add the L2 regularization by taking the L2-norm of the weights and multiplying it with our constant.\n l2_term = (self.l2/2.0) * (np.sum(np.square(w1[:, 1:])) + np.sum(np.square(w2[:, 1:])))\n cost = cost + l2_term\n return cost/y_enc.shape[1]", "def l2_reg_cost(cost):\n return cost + tf.losses.get_regularization_losses()", "def l2_reg_cost(cost):\n return cost + tf.losses.get_regularization_losses()", "def l2_reg_cost(cost):\n return cost + tf.losses.get_regularization_losses()", "def l2_regularization(W, reg_strength):\n # TODO: Copy from the previous assignment\n loss = reg_strength*np.sum(W*W)\n grad = 2*reg_strength*W\n return loss, grad", "def l2_reg_cost(cost):\n\n return cost + tf.losses.get_regularization_losses()", "def l2_reg_cost(cost, lambtha, weights, L, m):\n f = 0\n while (L):\n index = \"W{}\".format(L)\n weight = weights[index]\n f += np.linalg.norm(weight)\n L -= 1\n return cost + lambtha / (2 * m) * f", "def l2_loss(params):\n \"\"\" It is a vec for each branch\"\"\"\n loss_branches_vec = []\n # TODO This is hardcoded but all our cases rigth now uses four branches\n for i in range(len(params['branches']) -1):\n loss_branches_vec.append(((params['branches'][i] - params['targets']) **2\n * params['controls_mask'][i])\n * params['branch_weights'][i])\n \"\"\" The last branch is a speed branch\"\"\"\n # TODO: Activate or deactivate speed branch loss\n loss_branches_vec.append((params['branches'][-1] - params['inputs']) ** 2\n * params['branch_weights'][-1])\n return loss_branches_vec, {}", "def L2Learning(**kwargs):\n past_task_params = kwargs['past_task_params']\n dataloader = kwargs['dataloader']\n epochs = kwargs['epochs']\n optim = kwargs['optim']\n crit = kwargs['crit']\n net = kwargs['net']\n ld = kwargs['ld']\n\n for epoch in range(epochs):\n running_loss = 0.0\n for x, y in dataloader:\n if torch.cuda.is_available():\n x = x.cuda()\n y = y.cuda()\n\n optim.zero_grad()\n outputs = net(x)\n loss = crit(outputs, y)\n\n reg = 0.0\n for past_param in past_task_params:\n for i, param in enumerate(net.parameters()):\n penalty = (past_param[i] - param) ** 2\n reg += penalty.sum()\n loss += reg * (ld / 2)\n\n loss.backward()\n optim.step()\n running_loss += loss.item()\n\n if epoch % 10 == 9:\n print(\"[Epoch %d/%d] Loss: %.3f\"%(epoch+1, epochs, running_loss))\n\n ### Save parameters to use next task learning\n tensor_param = []\n for params in net.parameters():\n tensor_param.append(params.detach().clone())\n '''\n tensor_param = torch.stack(tensor_param)\n\n if past_task_params.nelement() > 0:\n past_task_params = torch.cat((past_task_params, tensor_param.unsqueeze(0)))\n else:\n past_task_params = tensor_param.unsqueeze(0)\n '''\n past_task_params.append(tensor_param)", "def l2_reg_cost(cost, lambtha, weights, L, m):\n Frobenius = 0\n for k, v in weights.items():\n if k[0] == \"W\":\n Frobenius += np.linalg.norm(v)\n return cost + (lambtha/(2*m)) * Frobenius", "def L2_func(x):\n return K.expand_dims(K.sqrt(K.sum(K.pow(x,2), axis=1)))", "def l2_regularization(cg, rate=0.01):\n W = VariableFilter(roles=[WEIGHT])(cg.variables)\n L2_cost = rate * l2_norm(W)\n\n return L2_cost", "def cost(self, x, y):\n return self.cross_entropy_error(x,y) + self.l2_regularization_penalty()", "def EmbeddingL2RegularizationUpdate(embedding_variable, net_input, learn_rate, l2_reg_val):\n # TODO(student): Change this to something useful. Currently, this is a no-op.\n # net_input = net_input / tf.norm(net_input)\n net_input = tf.nn.l2_normalize(net_input, axis=0)\n grad = l2_reg_val * tf.matmul(tf.transpose(net_input), tf.matmul(net_input, embedding_variable))\n embedding_variable_ = embedding_variable - learn_rate * grad\n\n ## local test #better to disable when learning\n batch_size, number_of_vocabulary_tokens = net_input.shape\n net_example = numpy.random.binomial(1, .1, (3, number_of_vocabulary_tokens))\n sigma_fnc = l2_reg_val * tf.nn.l2_loss(tf.matmul(net_input, embedding_variable))\n # assert tf.gradients(sigma_fnc, embedding_variable) == grad, \"wrong grad in L2\"\n sess = tf.Session()\n sess.run(tf.global_variables_initializer())\n tf_grad = sess.run(tf.gradients(sigma_fnc, embedding_variable)[0], feed_dict={net_input: net_example})\n my_grad = sess.run(grad, feed_dict={net_input: net_example})\n differ = numpy.linalg.norm(tf_grad - my_grad)\n differ = differ / numpy.linalg.norm(tf_grad)\n print('l2 grad differentage {}'.format(differ))\n print('l2 grad max difference {}'.format(numpy.max(tf_grad - my_grad)))\n\n return embedding_variable.assign(embedding_variable_)", "def loss_fn(model):\n with flax.nn.stateful(state) as new_state:\n with flax.nn.stochastic(prng_key):\n logits = model(batch['image'])\n loss = cross_entropy_loss(logits, batch['label'])\n # TODO(britefury): check if applying L2 regularization to weights but\n # *not* biases improves results\n weight_penalty_params = jax.tree_leaves(model.params)\n weight_l2 = sum([jnp.sum(x ** 2)\n for x in weight_penalty_params\n if x.ndim > 1])\n weight_penalty = l2_reg * 0.5 * weight_l2\n loss = loss + weight_penalty\n return loss, (new_state, logits)", "def l2(weights):\n\treturn np.sqrt(np.sum(weights * weights))", "def l2_regularization(variables, factor=1e-4, name='l2_regularization', collections=['regularization']):\n l2 = tf.add_n([tf.sqrt(2.*tf.nn.l2_loss(var)) for var in variables], name=name) if variables else tf.constant(0.)\n loss = factor * l2\n scalar_summary(loss, name, collections)\n return loss", "def l2(weights, name=None):\n with ops.op_scope([weights], name, 'l2_regularizer') as scope:\n my_scale = ops.convert_to_tensor(scale,\n dtype=weights.dtype.base_dtype,\n name='scale')\n return standard_ops.mul(my_scale, nn.l2_loss(weights), name=scope)", "def l2_loss(self, t, use_logit: bool = False):\n c = 0\n if use_logit:\n return np.mean([(self._irf[i].interpolant(t[p]) - logit(self._x[p, i])) ** 2\n for p in range(t.shape[0]) for i in range(self._x.shape[1])])\n else:\n return np.mean([(self._irf[i].interpolant(t[p]) - self._x[p, i]) ** 2\n for p in range(t.shape[0]) for i in range(self._x.shape[1])])", "def nn_cost_function(nn_params, input_layer_size, hidden_layer_size, num_labels, X, y, l):\n Theta_1 = np.reshape(nn_params[0:(hidden_layer_size * (input_layer_size + 1)), ],\n (hidden_layer_size, input_layer_size + 1))\n Theta_2 = np.reshape(nn_params[(hidden_layer_size * (input_layer_size + 1)):, ],\n (num_labels, hidden_layer_size + 1))\n\n m, n = X.shape\n X = np.hstack((np.ones((m, 1)), X))\n\n Z_2 = X.dot(Theta_1.T)\n A_2 = sigmoid(Z_2)\n A_2 = np.hstack((np.ones((m, 1)), A_2))\n\n Z_3 = A_2.dot(Theta_2.T)\n A_3 = sigmoid(Z_3)\n\n Y = np.zeros((m, num_labels))\n for i in range(m):\n Y[i, y[i] - 1] = 1\n\n j = 0.0\n for i in range(m):\n j += np.log(A_3[i, ]).dot(-Y[i, ].T) - np.log(1 - A_3[i, ]).dot(1 - Y[i, ].T)\n j /= m\n\n Theta_1_square = np.square(Theta_1[:, 1:])\n Theta_2_square = np.square(Theta_2[:, 1:])\n reg = 1.0 * l / (2 * m) * (np.sum(Theta_1_square) + np.sum(Theta_2_square))\n j += reg\n\n d_3 = A_3 - Y\n D_2 = d_3.T.dot(A_2)\n\n Z_2 = np.hstack((np.ones((m, 1)), Z_2))\n d_2 = d_3.dot(Theta_2) * sigmoid_gradient(Z_2)\n d_2 = d_2[:, 1:]\n D_1 = d_2.T.dot(X)\n\n Theta_1_grad = 1.0 * D_1 / m\n Theta_1_grad[:, 1:] = Theta_1_grad[:, 1:] + 1.0 * l / m * Theta_1[:, 1:]\n\n Theta_2_grad = 1.0 * D_2 / m\n Theta_2_grad[:, 1:] = Theta_2_grad[:, 1:] + 1.0 * l / m * Theta_2[:, 1:]\n\n grad = np.hstack((Theta_1_grad.ravel(), Theta_2_grad.ravel()))\n\n return j, grad", "def loss(self, x, y):\n (N,D) = x.shape\n k1 = np.matmul(x,np.transpose(self.w)) + self.b\n y1 = y.reshape((N,1))\n c2 = 0\n c1 = (np.log(1+np.exp(-1*y1*k1)))\n for i in range(N):\n c2 += c1[i][0]\n l = c2 / N + (0.5 * self.l2_reg * np.dot(self.w,np.transpose(self.w)))\n l1 = l[0][0]\n return l1\n\n\n #raise NotImplementedError", "def penalty_objective(Z_0, Z_1, Z_2, psi, theta):\n return theta * sum(map(l1_od_norm, Z_0)) + (1 - theta) * sum(map(psi, Z_2 - Z_1))", "def dual_objective(self, dual_coeffs):\n primal = self.model._sdca_primal_dual_relation(self.l_l2sq,\n dual_coeffs)\n prox_l2_value = 0.5 * self.l_l2sq * np.linalg.norm(primal) ** 2\n return self.model.dual_loss(dual_coeffs) - prox_l2_value", "def l2(name, weights):\n\n with tf.name_scope(name):\n regularizer = np.float32(0.0)\n for weight in weights:\n tf.add(regularizer, tf.nn.l2_loss(weight))\n\n return regularizer", "def L2(yhat, y):\n loss = np.dot((y - yhat).T,(y - yhat))\n \n return loss", "def _svm_loss_penalty_dual(name):\n return hp.choice(name, [\n ('hinge', 'l2', True),\n ('squared_hinge', 'l2', True),\n ('squared_hinge', 'l1', False),\n ('squared_hinge', 'l2', False)\n ])", "def regularization_loss(params: hk.Params) -> jnp.ndarray:\r\n\r\n # L1 Loss\r\n sum_in_layer = lambda p: jnp.sum(jnp.abs(p))\r\n sum_p_layers = [sum_in_layer(p) for p in jax.tree_leaves(params)]\r\n l1_loss = sum(sum_p_layers)\r\n\r\n # L2 Loss\r\n l2_loss = 0.5 * sum(jnp.sum(jnp.square(p)) for p in jax.tree_leaves(params))\r\n\r\n return l2_coef * l2_loss + l1_coef * l1_loss", "def tf_l2_loss(Gt, pred,_axis):\n l2diff = tf.subtract(Gt, pred)\n l2loss = tf.reduce_sum(tf.square(l2diff), axis=_axis)\n l2loss = tf.maximum(l2loss, 1e-10)\n l2loss = tf.sqrt(l2loss) # (n_batch, n_class) -> (n_batch, 1)\n\n return l2loss", "def build_loss(self):\n import tensorflow as tf\n\n y_1d = [tf.reduce_sum(tf.multiply(self.variables[\"y\"][i], self.variables[\"y_action\"][i]), axis=1) for i in range(len(self.variables[\"y\"]))]\n loss = np.sum([tf.nn.l2_loss(y_1d[i] - self.variables[\"y_true\"]) for i in range(len(y_1d))])\n\n l1_reg = 0\n l2_reg = 0\n\n keys = sorted(self.variables.keys())\n keys = [key for key in keys if critere_keys(key) and \"W\" in key]\n for key in keys:\n l1_reg += tf.reduce_sum(tf.abs(self.variables[key]))\n l2_reg += tf.nn.l2_loss(self.variables[key])\n\n self.loss = loss + self.alpha_reg * l1_reg + self.beta_reg * l2_reg\n\n self.train_step = tf.train.RMSPropOptimizer(self.decay_learning_rate,\n decay=0.99, momentum=0., centered=True).minimize(self.loss, global_step=self.global_step)", "def weight_l2_norm():\n cumulated_l2_norm = tf.constant(0., dtype=tf.float32)\n for trainable_variable in tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES):\n name = trainable_variable.name.split('/')[-1]\n if name.startswith('weights'):\n cumulated_l2_norm += tf.nn.l2_loss(trainable_variable)\n return cumulated_l2_norm", "def get_spectral_penalty(nnet, include_bias=False):\n\n penalty = 0\n\n for layer in nnet.trainable_layers:\n if not layer.issvm:\n eigenvalues, eigvec = T.nlinalg.eigh(T.dot(layer.W, layer.W.T))\n eig_max = T.max(eigenvalues)\n penalty = penalty + eig_max\n\n if include_bias:\n for layer in nnet.trainable_layers:\n if (not layer.issvm) and (layer.b is not None):\n penalty = penalty + T.sum(abs(layer.b) ** 2)\n\n return T.cast(penalty, dtype='floatX')", "def mpt11_w34_2_v2_lr(optimizer, args, **kwargs):\n\n def _lr_adjuster(epoch, iteration):\n if epoch >= 50:\n lr = args.lr * 0.1\n\n if epoch >= 62:\n lr = lr * 0.1\n\n if epoch >= 70:\n lr = lr * 0.01\n else:\n lr = args.lr\n\n assign_learning_rate(optimizer, lr)\n\n return lr\n\n return _lr_adjuster", "def mpt11_w18_2_v2_lr(optimizer, args, **kwargs):\n\n def _lr_adjuster(epoch, iteration):\n if epoch >= 35:\n lr = args.lr * 0.1\n\n if epoch >= 50:\n lr = lr * 0.1\n\n if epoch >= 60:\n lr = lr * 0.01\n else:\n lr = args.lr\n\n assign_learning_rate(optimizer, lr)\n\n return lr\n\n return _lr_adjuster", "def _l2(self, y, x, y_minus_g, w_0):\n # Initialize weight vector to return\n w_1 = np.zeros(len(w_0))\n \n for j in range(len(x)):\n reg = float(w_0[j]) / (self._lambda*self._num_training)\n w_1[j] = w_0[j] + self._epsilon*(x[j]*y_minus_g - reg)\n \n return w_1", "def compute_loss_logreg_regl2(y, tx, w, lambda_):\n loss = compute_loss_logreg(y, tx, w)\n penal_loss = loss + lambda_ * w.dot(w)\n\n return penal_loss", "def compute_cost(A2, Y, params):\n m = Y.shape[1]\n \n logprobs1 = -np.dot(Y, np.log(A2).T)\n logprobs2 = -np.dot(1-Y, np.log(1-A2).T)\n cost = 1/m * (logprobs1 + logprobs2)\n \n cost = np.asscalar(cost)\n return cost", "def EmbeddingL1RegularizationUpdate(embedding_variable, net_input, learn_rate, l1_reg_val):\n # TODO(student): Change this to something useful. Currently, this is a no-op.\n net_input = tf.nn.l2_normalize(net_input, axis=0)\n sign_inside = tf.sign(tf.matmul(net_input, embedding_variable))\n where = tf.equal(sign_inside, 0)\n # should replace 0's with random in [-1, 1] for an better (not necessarily acute)implementation\n grad = l1_reg_val * tf.matmul(tf.transpose(net_input), sign_inside)\n embedding_variable_ = embedding_variable - learn_rate * grad\n\n ## local test #better to disable when learning\n batch_size, number_of_vocabulary_tokens = net_input.shape\n net_example = numpy.random.binomial(1, .1, (3, number_of_vocabulary_tokens))\n sigma_fnc = l1_reg_val * tf.norm(tf.matmul(net_input, embedding_variable), ord=1)\n # assert tf.gradients(sigma_fnc, embedding_variable) == grad, \"wrong grad in L2\"\n sess = tf.Session()\n sess.run(tf.global_variables_initializer())\n tf_grad = sess.run(tf.gradients(sigma_fnc, embedding_variable)[0], feed_dict={net_input: net_example})\n my_grad = sess.run(grad, feed_dict={net_input: net_example})\n differ = numpy.linalg.norm(tf_grad - my_grad)\n differ = differ / numpy.linalg.norm(tf_grad)\n print('l1 grad differentage {}'.format(differ))\n print('l2 grad max difference {}'.format(numpy.max(tf_grad - my_grad)))\n\n return embedding_variable.assign(embedding_variable_)", "def l2_reg(tensors, name='L2Reg'):\n if hasattr(tensors, '__iter__'):\n tensors = list(tensors)\n with tf.name_scope(name, values=tensors):\n return 0.5 * sum(tf.reduce_sum(p ** 2) for p in tensors)\n else:\n with tf.name_scope(name, values=[tensors]):\n return tf.nn.l2_loss(tensors)", "def kl_loss(self):\n return sum([p.kl_loss() for p in self.parameters])", "def l2_loss(obs, actual):\n # (tf.Tensor, tf.Tensor, float) -> tf.Tensor\n return tf.reduce_sum(tf.square(obs - actual), 1)", "def loss(self, X, y=None):\n scores = None\n ############################################################################\n # TODO: Implement the forward pass for the two-layer net, computing the #\n # class scores for X and storing them in the scores variable. #\n ############################################################################\n hid1, hid1cache = affine_relu_forward(X, self.params['W1'], self.params['b1'])\n scores, scorecache = affine_forward(hid1, self.params['W2'], self.params['b2'])\n ############################################################################\n # END OF YOUR CODE #\n ############################################################################\n\n # If y is None then we are in test mode so just return scores\n if y is None:\n return scores\n\n loss, grads = 0, {}\n ############################################################################\n # TODO: Implement the backward pass for the two-layer net. Store the loss #\n # in the loss variable and gradients in the grads dictionary. Compute data #\n # loss using softmax, and make sure that grads[k] holds the gradients for #\n # self.params[k]. Don't forget to add L2 regularization! #\n # #\n # NOTE: To ensure that your implementation matches ours and you pass the #\n # automated tests, make sure that your L2 regularization includes a factor #\n # of 0.5 to simplify the expression for the gradient. #\n ############################################################################\n loss, dscores = softmax_loss(scores, y)\n loss += 0.5 * self.reg *( np.sum(self.params['W1']**2) + np.sum(self.params['W2']**2) )\n\n dhid1, grads['W2'], grads['b2'] = affine_backward(dscores, scorecache)\n dx, grads['W1'], grads['b1'] = affine_relu_backward(dhid1, hid1cache)\n\n grads['W1'] += self.reg * self.params['W1']\n grads['W2'] += self.reg * self.params['W2']\n ############################################################################\n # END OF YOUR CODE #\n ############################################################################\n\n return loss, grads", "def regularizer(self):\n \n # L2 regularization for the fully connected parameters.\n regularizers = (tf.nn.l2_loss(self.weights.wd1) + tf.nn.l2_loss(self.weights.bd1) + \n tf.nn.l2_loss(self.weights.wout) + tf.nn.l2_loss(self.weights.bout))\n return regularizers", "def compute_cost(AL, Y, parameters ,lambd):\n L = len(parameters) // 2\n m = Y.shape[1]\n cost = -1 / m * np.sum(np.nan_to_num(Y * np.log(AL) + (1-Y) * np.log(1-AL)))\n cost+= 0.5*(lambd/m)*sum(np.linalg.norm(parameters['W' + str(i)])**2 for i in range(1,L))\n return cost", "def nnCostFunction2(nn_params, input_layer_size, hidden_layer_size, num_labels, X, y, lambda_):\n # Reshape nn_params back into the parameters Theta1 and Theta2, the weight matrices\n # for our 2 layer neural network\n Theta1 = nn_params[:hidden_layer_size * (input_layer_size + 1)].reshape(\n (hidden_layer_size, input_layer_size + 1))\n Theta2 = nn_params[hidden_layer_size *\n (input_layer_size + 1):].reshape((num_labels, hidden_layer_size + 1))\n\n # Setup some useful variables\n m = X.shape[0]\n\n # Add ones to the X data matrix\n X = np.insert(X, 0, 1, axis=1)\n\n # Perform forward propagation for layer 2\n z2 = np.matmul(X, Theta1.transpose())\n a2 = sigmoid(z2)\n a2 = np.insert(a2, 0, 1, axis=1)\n z3 = np.matmul(a2, Theta2.transpose())\n a3 = sigmoid(z3)\n\n # turn Y into a matrix with a new column for each category and marked with 1\n y_one_hot = np.zeros_like(a3)\n for i in range(m):\n y_one_hot[i, y[i] - 1] = 1\n\n # Calculate the cost of our forward prop\n ones = np.ones_like(a3)\n A = np.matmul(y_one_hot.transpose(), np.log(a3)) + \\\n np.matmul((ones - y_one_hot).transpose(), np.log(ones - a3))\n J = -1 / m * A.trace()\n J += lambda_ / (2 * m) * \\\n (np.sum(Theta1[:, 1:] ** 2) + np.sum(Theta2[:, 1:] ** 2))\n\n # Perform backward propagation to calculate deltas & gradients\n delta3 = a3 - y_one_hot\n delta2 = np.matmul(delta3, Theta2[:, 1:]) * sigmoidGradient(z2)\n Theta2_grad = np.matmul(a2.transpose(), delta3).transpose()\n Theta1_grad = np.matmul(X.transpose(), delta2).transpose()\n\n Theta1_grad[:, 1:] += lambda_ * Theta1[:, 1:]\n Theta2_grad[:, 1:] += lambda_ * Theta2[:, 1:]\n Theta1_grad /= m\n Theta2_grad /= m\n grad = np.concatenate([Theta1_grad.reshape(-1), Theta2_grad.reshape(-1)])\n return J, grad", "def l2_loss(inputs, reduction='mean', **kwargs):\n args = ArgHelper.parse(locals())\n args['reduction'] = reduction.upper()\n op_lib = loss_ops_lib.L2Loss\n if context.executing_eagerly():\n return op_lib \\\n .instantiate(reduction=args['reduction']) \\\n .apply(inputs)\n else:\n return op_lib.blend(**args)", "def resmlp(self, x, w3, w2, b2, w, b):\r\n return tf.matmul(tf.nn.tanh(tf.matmul(x, w2) + b2), w) + tf.matmul(x,\r\n w3) + b", "def loss(params: hk.Params, batch, labels, xent_weight=self.weights, l1_coeff=self.l1_coef, l2_coeff=self.l2_coef) -> jnp.ndarray:\n logits = net.apply(params, batch)\n labels = jax.nn.one_hot(label, 2)\n\n # Note that in our problem, regularization should be after the AND-mask.\n sum_in_layer = lambda p: jnp.sum(p)\n sum_p_layers = [sum_in_layer(p) for p in jax.tree_leaves(params)]\n l1_loss = sum(sum_p_layers)\n l2_loss = 0.5 * sum(jnp.sum(jnp.square(p)) for p in jax.tree_leaves(params))\n softmax_xent = -jnp.sum(labels * jax.nn.log_softmax(logits) * xent_weight)\n softmax_xent /= labels.shape[0]\n\n return softmax_xent + l2_coeff * l2_loss + l1_coeff * l1_loss", "def optimize_ppo2(trial):\n\treturn {\n\t\t'n_steps': int(trial.suggest_loguniform('n_steps', 64, 2048)),\n\t\t'gamma': trial.suggest_loguniform('gamma', 0.9, 0.9999),\n\t\t'learning_rate': trial.suggest_loguniform('learning_rate', 1e-5, 1e-1),\n\t\t'ent_coef': trial.suggest_loguniform('ent_coef', 1e-8, 1e-1),\n\t\t'cliprange': trial.suggest_uniform('cliprange', 0.1, 0.4),\n\t\t'noptepochs': int(trial.suggest_loguniform('noptepochs', 1, 48)),\n\t\t'lam': trial.suggest_uniform('lam', 0.8, 1.)\n\t}", "def RHS2(y,t):\r\n dy[:N] =y[N:2*N]\r\n dy[N:2*N]=scipy.sparse.csr_matrix.__mul__(L_alpha,y[0:N])\r\n return dy", "def l2_loss(x, y, kernel_name=\"l2_loss\"):\n shape = x.get(\"shape\")\n dtype = x.get(\"dtype\")\n\n check_shape(shape, param_name=\"x\")\n\n check_list = [\"float16\", \"float32\"]\n if not dtype.lower() in check_list:\n raise RuntimeError(\n \"l2_loss only support float16 float32\")\n\n shape, axis = util.simplify_axis_shape(shape, range(len(shape)))\n\n inp_dtype = dtype.lower()\n data_input = tvm.placeholder(shape, name=\"data_input\", dtype=inp_dtype)\n\n coeff_sqrt = tvm.const(1.0 / (2**(0.5)), dtype=inp_dtype)\n\n data_mul = te.lang.cce.vmuls(data_input, coeff_sqrt)\n data_sqr = te.lang.cce.vmul(data_mul, data_mul)\n res = te.lang.cce.sum(data_sqr, axis)\n\n with tvm.target.cce():\n sch = generic.auto_schedule(res)\n\n config = {\"name\": kernel_name,\n \"tensor_list\": [data_input, res]}\n te.lang.cce.cce_build_code(sch, config)", "def l2_loss(predictions, real_values):\n with tf.variable_scope('loss'):\n # 1/2n \\sum^{n}_{i=i}{(x_i - x'_i)^2}\n mse = tf.div(tf.reduce_mean(\n tf.square(tf.subtract(predictions, real_values))), 2, name=\"mse\")\n tf.add_to_collection('losses', mse)\n \n # mse + weight_decay per layer\n error = tf.add_n(tf.get_collection('losses'), name='total_loss')\n\n return error", "def kp_l2_loss(kp_pred, kp_gt):\n criterion = torch.nn.MSELoss()\n\n vis = (kp_gt[:, :, 2, None] > 0).float()\n\n # This always has to be (output, target), not (target, output)\n return criterion(vis * kp_pred, vis * kp_gt[:, :, :2])", "def l2_loss_vectorized(self, W, X, y, reg):\n loss = 0.0\n dW = np.zeros(W.shape) # initialize the gradient as zero\n num_train = X.shape[0]\n num_of_classes = W.shape[1]\n #############################################################################\n # TODO: #\n # Implement a vectorized version of the perceptron loss, storing the #\n # result in loss and the gradient in dW #\n #############################################################################\n\n\n scores = X.dot(W) - y\n\n loss = np.mean(0.5 * (scores**2))\n\n grad = np.empty_like(W)\n grad = X.T.dot(scores)\n dW = grad\n dW /= num_train\n #############################################################################\n # END OF YOUR CODE #\n #############################################################################\n\n return loss, dW", "def build_LP(beta, rw):\n listgate = []\n \n for iw in range(len(rw)):\n listgate = listgate + [T1(beta, rw[iw], -rw[iw])]\n\n return prod(*listgate)", "def compute_cost(zn, y, reg, params, n_layers):\n\n logits = tf.transpose(zn)\n labels = tf.transpose(y)\n\n regularization = 0.0\n for i in range(n_layers):\n wn = 'W{}'.format(i)\n regularization += tf.nn.l2_loss(params[wn])\n\n cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=logits, labels=labels)) + (\n reg * regularization)\n\n return cost", "def get_lr_cost(self):\n\n\t\tlabels = self.get_symbolic_expected_rewards()\n\n\t\treturn -theano.tensor.mean(\n\t\t\ttheano.tensor.log(labels)[\n\t\t\t\ttheano.tensor.arange(self.symbolic_output.shape[0]),\n\t\t\t\tself.symbolic_output])", "def compute_cost_with_regularization(A3, Y, parameters, lambd):\n\n m = Y.shape[1]\n W1 = parameters[\"W1\"]\n W2 = parameters[\"W2\"]\n W3 = parameters[\"W3\"]\n\n cross_entropy_cost = compute_cost(A3, Y)\n L2_regularization_cost = (np.sum(np.square(W1)) + np.sum(np.square(W2)) + np.sum(np.square(W3))) * lambd / (2 * m)\n\n cost = cross_entropy_cost + L2_regularization_cost\n\n return cost", "def l2_loss(input, target):\n pos_inds = torch.nonzero(target > 0.0).squeeze(1)\n if pos_inds.shape[0] > 0:\n cond = torch.abs(input[pos_inds] - target[pos_inds])\n loss = 0.5 * cond**2 / pos_inds.shape[0]\n else:\n loss = input * 0.0\n return loss.sum()", "def compute_cost(A2, Y, parameters):\n\n\tm = Y.shape[1] # number of example\n\n\t# Compute the cross-entropy cost\n\tlogprobs = np.multiply(np.log(A2), Y)\n\tcost = -(1/m)*(np.sum((logprobs) + np.multiply(np.log(1-A2), 1-Y)))\n\n\tcost = np.squeeze(cost) # makes sure cost is the dimension we expect. \n\t \t# E.g., turns [[17]] into 17 \n\tassert(isinstance(cost, float))\n\n\treturn cost", "def l2(theta: np.ndarray) -> float:\n if theta.size == 0:\n return None\n array_sum = theta.transpose().dot(theta)\n return float(array_sum - theta[0]**2)", "def _construct_reg_costs(self):\n param_reg_cost = sum([T.sum(p**2.0) for p in self.joint_params])\n return param_reg_cost", "def objective(self, coeffs, loss: float = None):\n prox_l2_value = 0.5 * self.l_l2sq * np.linalg.norm(coeffs) ** 2\n return SolverFirstOrderSto.objective(self, coeffs, loss) + prox_l2_value", "def rl_modelrl_l2_tiny():\n hparams = rl_modelrl_tiny()\n hparams.generative_model_params = \"basic_conv_l2\"\n return hparams", "def adagrad_l1_update(loss, w, l1, learning_rate=1.0):\n g = theano.grad(loss, w) # (tef_n, )\n updates = OrderedDict()\n\n # for each gradient-weights vector pair compile the following\n\n # empty initializations, later of size (tef_n, )\n # maximum penalty accumulator\n u = theano.shared(np.zeros(1, dtype=theano.config.floatX), broadcastable=w.broadcastable)\n\n # actual accumulated penalties\n q = theano.shared(np.zeros(1, dtype=theano.config.floatX), broadcastable=w.broadcastable)\n\n # squared gradients accumulator\n gg = theano.shared(np.zeros(1, dtype=theano.config.floatX), broadcastable=w.broadcastable)\n\n # adagrad learning rate upgrade\n gradient_squared_accu_new = gg + g ** 2 # (tef_n, )\n adagrad_learning_rate = learning_rate / T.sqrt(gradient_squared_accu_new)\n\n # total possible accumulated l1 penalty\n u_new = u + l1 * adagrad_learning_rate\n\n # update rule: w_k+1/2,i = w_k,i - adagrad_lr_k * g_i\n w_tmp = w - adagrad_learning_rate * g\n\n # apply penalties\n if T.gt(l1, 0):\n # if w_k+1/2,i > 0: w_k+1,i = max(0, w_k+1/2,i - (u_k,i + q_k-1,i)) else if: ...\n w_update = T.switch(T.gt(w_tmp, 0.),\n T.maximum(w_tmp - (u_new + q), 0.), # w_tmp > 0\n T.switch(T.lt(w_tmp, 0.0),\n T.minimum(w_tmp + (u_new - q), 0.), # w_tmp < 0\n 0.) # w_tmp == 0\n )\n else:\n w_update = w_tmp\n\n # return updates (key: shared variable, value: symbolic variable)\n updates[w] = w_update\n updates[gg] = gradient_squared_accu_new\n updates[u] = u_new\n # actually accumulated penalty\n updates[q] = q + w_update - w_tmp\n\n return updates, u, q, gg", "def loss(W_vect, X, T):\n # log_prior = - 0.5 * L2_reg * jnp.dot(W_vect, W_vect)\n return jnp.mean((predictions(W_vect, X) - T)**2) + 0.5*jnp.log(2*jnp.pi)", "def penalty(self):\n assert len(self.weights) == len(self.means), \"Dimensions!\"\n out = np.exp(self.data['riskfree'] * self.data['maturity'])\n for weight, mean in zip(self.weights, self.means):\n out -= weight * np.exp(mean * self.data['maturity'])\n return (out**2).mean()**.5", "def reg_loss(model: nn.Module, regularizer: str, l1: float=0.01, l2: float=0.01):\n if regularizer == 'l1':\n l1_reg = l1 * sum(torch.abs(p).sum() for p in model.parameters())\n return l1_reg\n if regularizer == 'l2':\n l2_reg = l2 * sum(torch.square(p).sum() for p in model.parameters())\n return l2_reg\n if regularizer == 'l1_l2':\n l1_reg = l1 * sum(torch.abs(p).sum() for p in model.parameters())\n l2_reg = l2 * sum(torch.square(p).sum() for p in model.parameters())\n return l1_reg + l2_reg", "def mpt11_w18_2_lr(optimizer, args, **kwargs):\n\n def _lr_adjuster(epoch, iteration):\n if epoch >= 46:\n lr = args.lr * 0.1\n\n if epoch >= 65:\n lr = lr * 0.1\n\n if epoch >= 80:\n lr = lr * 0.01\n else:\n lr = args.lr\n\n assign_learning_rate(optimizer, lr)\n\n return lr\n\n return _lr_adjuster", "def p2(self) -> float:\n return self.distortion_coefficients[4]", "def objective(self, coeffs, loss: float = None):\n prox_l2_value = 0.5 * self.l_l2sq * np.linalg.norm(coeffs) ** 2\n return SolverFirstOrderSto.objective(self, coeffs,\n loss) + prox_l2_value", "def loss(self, X, y=None, reg=0.0):\n\n self.layers = []\n layers = self.layers\n layers.append(X)\n\n # Unpack variables from the params dictionary\n W1, b1 = self.params['W1'], self.params['b1']\n W2, b2 = self.params['W2'], self.params['b2']\n N, D = X.shape\n H, C = W2.shape\n\n # Compute the forward pass\n scores = None\n #############################################################################\n # TODO: Perform the forward pass, computing the class scores for the input. #\n # Store the result in the scores variable, which should be an array of #\n # shape (N, C). #\n #############################################################################\n mid = np.maximum(0, X.dot(W1) + b1.reshape(1, -1)) # activation\n scores = mid.dot(W2) + b2.reshape(1, -1)\n #############################################################################\n # END OF YOUR CODE #\n #############################################################################\n\n # If the targets are not given then jump out, we're done\n if y is None:\n return scores\n\n # Compute the loss\n loss = None\n #############################################################################\n # TODO: Finish the forward pass, and compute the loss. This should include #\n # both the data loss and L2 regularization for W1 and W2. Store the result #\n # in the variable loss, which should be a scalar. Use the Softmax #\n # classifier loss. So that your results match ours, multiply the #\n # regularization loss by 0.5 #\n #############################################################################\n exp_score = np.exp(scores)\n exp_score_sum = exp_score.sum(axis=1)\n correct_score = exp_score[np.arange(N), y]\n probability = (correct_score / exp_score_sum).reshape(-1, 1)\n loss = -np.log(probability).sum()\n\n loss /= N\n loss += 0.5 * reg * (np.sum(W1 * W1) + np.sum(W2 * W2))\n\n #############################################################################\n # END OF YOUR CODE #\n #############################################################################\n\n # Backward pass: compute gradients\n grads = {}\n #############################################################################\n # TODO: Compute the backward pass, computing the derivatives of the weights #\n # and biases. Store the results in the grads dictionary. For example, #\n # grads['W1'] should store the gradient on W1, and be a matrix of same size #\n #############################################################################\n des = np.tile((-correct_score / np.square(exp_score_sum)).reshape(-1, 1), (1, C))\n des[np.arange(N), y] += 1.0 / exp_score_sum\n dsoftmax = des * (-np.ones((mid.shape[0], 1)) / probability) * np.exp(scores)\n\n # W2\n grads['W2'] = mid.T.dot(dsoftmax)\n grads['W2'] /= N\n grads['W2'] += reg * W2\n\n # b2\n grads['b2'] = np.ones_like(b2.reshape(1, -1)) * dsoftmax\n grads['b2'] = np.mean(grads['b2'], axis=0).reshape(-1)\n\n # W1\n binary = np.zeros_like(mid)\n binary[mid > 0] = 1\n grads['W1'] = X.T.dot(binary * dsoftmax.dot(W2.T)) # chain rule, compute dmid/dW1 * dscore/dmid * dsoftmax\n grads['W1'] /= N\n grads['W1'] += reg * W1\n\n # b1\n grads['b1'] = np.ones_like(b1.reshape(1, -1)) * binary * dsoftmax.dot(W2.T)\n grads['b1'] = np.mean(grads['b1'], axis=0).reshape(-1)\n\n #############################################################################\n # END OF YOUR CODE #\n #############################################################################\n\n return loss, grads", "def k2(self) -> float:\n return self.distortion_coefficients[1]", "def build_reparam_kl_loss_and_gradients(inference, var_list):\n \n p_log_lik = [0.0] * inference.n_samples\n base_scope = tf.get_default_graph().unique_name(\"inference\") + '/'\n for s in range(inference.n_samples):\n # Form dictionary in order to replace conditioning on prior or\n # observed variable with conditioning on a specific value.\n scope = base_scope + tf.get_default_graph().unique_name(\"sample\")\n dict_swap = {}\n for x, qx in six.iteritems(inference.data):\n if isinstance(x, RandomVariable):\n if isinstance(qx, RandomVariable):\n qx_copy = copy(qx, scope=scope)\n dict_swap[x] = qx_copy.value()\n else:\n dict_swap[x] = qx\n\n for z, qz in six.iteritems(inference.latent_vars):\n # Copy q(z) to obtain new set of posterior samples.\n qz_copy = copy(qz, scope=scope)\n dict_swap[z] = qz_copy.value()\n\n for x in six.iterkeys(inference.data):\n if isinstance(x, RandomVariable):\n x_copy = copy(x, dict_swap, scope=scope)\n p_log_lik[s] += tf.reduce_sum(\n inference.scale.get(x, 1.0) * x_copy.log_prob(dict_swap[x]))\n\n p_log_lik = tf.reduce_mean(p_log_lik)\n\n kl_penalty = tf.reduce_sum([\n tf.reduce_sum(inference.kl_scaling.get(z, 1.0) * kl_divergence(qz, z))\n for z, qz in six.iteritems(inference.latent_vars)])\n\n if inference.logging:\n tf.summary.scalar(\"loss/p_log_lik\", p_log_lik,\n collections=[inference._summary_key])\n tf.summary.scalar(\"loss/kl_penalty\", kl_penalty,\n collections=[inference._summary_key])\n\n loss = -(p_log_lik - kl_penalty)\n grads = tf.gradients(loss, var_list)\n grads_and_vars = list(zip(grads, var_list))\n return loss, grads_and_vars", "def add_l2_reg(loss_f, grad_f, lambda_):\n\n def l2_loss(y, tx, w, *args, **kwargs):\n return loss_f(y, tx, w, *args, **kwargs) + lambda_ * np.linalg.norm(w)\n \n def l2_grad(y, tx, w, *args, **kwargs):\n return grad_f(y, tx, w, *args, **kwargs) + 2 * lambda_ * w\n \n return l2_loss, l2_grad", "def rl_modelrl_l2_short():\n hparams = rl_modelrl_short()\n hparams.generative_model_params = \"basic_conv_l2\"\n return hparams", "def get_model_gradient_multipliers(last_layer_gradient_multiplier):\n gradient_multipliers = {}\n\n for var in slim.get_model_variables():\n # Double the learning rate for biases.\n if 'biases' in var.op.name:\n gradient_multipliers[var.op.name] = 2.\n\n # Use larger learning rate for last layer variables.\n if 'Score' in var.op.name:\n if 'biases' in var.op.name:\n gradient_multipliers[var.op.name] = 2 * last_layer_gradient_multiplier\n print(var.op.name)\n elif 'weights' in var.op.name:\n gradient_multipliers[var.op.name] = last_layer_gradient_multiplier\n print(var.op.name)\n\n return gradient_multipliers", "def rl_modelrl_l2_medium():\n hparams = rl_modelrl_medium()\n hparams.generative_model_params = \"basic_conv_l2\"\n return hparams", "def combined_costs(matrix_MSLL_IO):\r\n return", "def get_loss(self):\r\n\r\n if F.loss_type==\"cosine\":\r\n self.losscos = r2d*tf.acos(1-tf.losses.cosine_distance(tf.nn.l2_normalize(self.labels,1), tf.nn.l2_normalize(self.out, 1), dim=1))\r\n self.loss = tf.losses.cosine_distance(tf.nn.l2_normalize(self.labels,1), tf.nn.l2_normalize(self.out, 1), dim=1)\r\n elif F.loss_type==\"mse2d\":\r\n xl, yl, zl = tf.split(self.labels, 3, axis=1)\r\n xo, yo, zo = tf.split(self.out, 3, axis=1)\r\n thetal, thetao = tf.asin(-yl), tf.asin(-yo)\r\n phil, phio = tf.atan2(-zl, -xl), tf.atan2(-zo, -xo)\r\n self.lb = tf.concat([thetal, phil], axis=1)\r\n self.ob = tf.concat([thetao, phio], axis=1)\r\n self.loss = tf.scalar_mul(tf.constant(r2d), tf.losses.mean_squared_error(self.lb, self.ob, 2))\r\n elif F.loss_type==\"mse3d\":\r\n self.loss = tf.losses.mean_squared_error(tf.nn.l2_normalize(self.labels, 0), tf.nn.l2_normalize(self.out, 0))", "def rl_modelrl_l2_base():\n hparams = rl_modelrl_base()\n hparams.generative_model_params = \"basic_conv_l2\"\n return hparams", "def loss_maxL(y, tx, w):\n # to avoid problems when 0 in log\n epsilon=0.00000001\n sig=sigmoid(tx.dot(w))\n # calculated probability\n p=y.T.dot(np.log(sig+epsilon)) + (1-y).T.dot(np.log(1-sig+epsilon))\n #divides with number of samples so that learning rate is not dependant on number of samples\n p=p/len(y)\n return np.squeeze(- p)", "def l2_reg_gradient_descent(Y, weights, cache, alpha, lambtha, L):\n m = Y.shape[1]\n len_cache = len(cache)\n\n # learning for the last layer:\n Al = cache['A{}'.format(len_cache - 1)] # last A\n A_prev = cache['A{}'.format(len_cache - 2)] # pre last A\n dZl = Al - Y # last dZ\n dWl = np.matmul(dZl, A_prev.T) / m # last dW, shape (1, nodes)\n dbl = (1 / m) * np.sum(dZl, axis=1, keepdims=True)\n Wl_str = 'W{}'.format(len_cache - 1)\n Wl = weights[Wl_str] # last W\n # last layer W learning:\n weights[Wl_str] = Wl - (alpha * lambtha / m) * Wl - alpha * dWl\n bl_str = 'b{}'.format(len_cache - 1)\n bl = weights[bl_str] # last b\n weights[bl_str] = bl - alpha * dbl # last layer b learning\n\n # next: learning for the rest of the layers:\n dZ = dZl\n W_next = Wl\n for i in reversed(range(1, len_cache - 1)):\n A = cache['A{}'.format(i)]\n A_prev = cache['A{}'.format(i - 1)]\n dZ = np.matmul(W_next.T, dZ) * (1 - A ** 2)\n dW = (1 / m) * (np.matmul(dZ, A_prev.T))\n db = np.sum(dZ, axis=1, keepdims=True) / m\n W_c_str = 'W{}'.format(i)\n W_c = weights[W_c_str] # current W\n b_c_str = 'b{}'.format(i)\n b_c = weights[b_c_str] # current b\n weights[W_c_str] = W_c - (alpha * lambtha / m) * W_c - alpha * dW\n weights[b_c_str] = b_c - alpha * db\n W_next = W_c", "def mlp_ptscorer(inputs, Ddim, N, l2reg, pfx='out', oact='sigmoid', extra_inp=[]):\n\n sum_vec = add(inputs)\n mul_vec = multiply(inputs)\n\n mlp_input = concatenate([sum_vec, mul_vec])\n\n # Ddim may be either 0 (no hidden layer), scalar (single hidden layer) or\n # list (multiple hidden layers)\n if Ddim == 0:\n Ddim = []\n elif not isinstance(Ddim, list):\n Ddim = [Ddim]\n if Ddim:\n for i, D in enumerate(Ddim):\n shared_dense = Dense(int(N*D), kernel_regularizer=l2(l2reg), \n activation='linear', name=pfx+'hdn%d'%(i))\n mlp_input = Activation('tanh')(shared_dense(mlp_input))\n\n shared_dense = Dense(1, kernel_regularizer=l2(l2reg), activation=oact, name=pfx+'mlp')\n mlp_out = shared_dense(mlp_input)\n \n return mlp_out", "def l2_reg_create_layer(prev, n, activation, lambtha):\n regulizer = tf.contrib.layers.l2_regularizer(lambtha)\n init = tf.contrib.layers.variance_scaling_initializer(mode=\"FAN_AVG\")\n tensor = tf.layers.Dense(units=n, activation=activation,\n kernel_initializer=init,\n kernel_regularizer=regulizer)\n return tensor(prev)", "def objective(self,w):\n l = 0\n for i in range(len(self.x)):\n # Each example contributes log(sigma(y_i * x_i . w))\n l -= log(sigmoid(self.y[i] * np.dot(w, self.x[i,:])))\n # regularisation 1/2 * alpha * ||w||^2\n l += 0.5 * self.alpha * np.dot(w,w)\n return l", "def lp_reg(x, y, critic):\n batch_size = x.size()[0]\n \n t = torch.rand(batch_size, 1)\n t = t.expand(x.size())\n t_ = t*x + (1-t)*y\n t_ = torch.autograd.Variable(t_, requires_grad=True)\n \n t_grad = critic(t_)\n grad = torch.autograd.grad(t_grad, t_, grad_outputs=torch.ones(t_grad.size()),\n create_graph=True, retain_graph=True)[0]\n \n zeros = torch.zeros(batch_size, 1)\n penalty = torch.max((torch.norm(grad, dim=1)-1), zeros)\n return (penalty**2).mean()", "def l2_loss(embedding, tree, dist_function): \n # split tensor shape = (num_vertices, dim) into num_vertices number of tensors shape = (dim).\n embedding_tuple = torch.split(embedding, 1) \n \n # loss function is the sum of l2 norm (no sqrt) between the space distance and tree distance \n loss = Variable(torch.FloatTensor(torch.zeros(1)))\n\n # calculate the distance between embedding vectors and minus the tree distance\n dist_tensor = []\n for i_idx, i in enumerate(embedding_tuple):\n for j_idx, j in enumerate(embedding_tuple):\n if i_idx <= j_idx: # when i_idx==j_idx (dist=0) as it will lead to NaN loss in backprop\n continue\n dist_tensor.append((dist_function(i,j) - tree[i_idx][j_idx]).pow(2))\n\n # stack the list of calculated distance\n dist_tensor = torch.stack(dist_tensor)\n\n # loss = L2 loss between space distance tensor and tree distance tensor\n loss = dist_tensor.sum()\n \n return loss", "def loss(self, X, y=None):\n\t\tW1, b1 = self.params['W1'], self.params['b1']\n\t\tW2, b2 = self.params['W2'], self.params['b2']\n\t\tW3, b3 = self.params['W3'], self.params['b3']\n\t\t\n\t\t# pass conv_param to the forward pass for the convolutional layer\n\t\tfilter_size = W1.shape[2]\n\t\tconv_param = {'stride': 1, 'pad': (filter_size - 1) / 2}\n\n\t\t# pass pool_param to the forward pass for the max-pooling layer\n\t\tpool_param = {'pool_height': 2, 'pool_width': 2, 'stride': 2}\n\n\t\tscores = None\n\t\t############################################################################\n\t\t# TODO: Implement the forward pass for the three-layer convolutional net, #\n\t\t# computing the class scores for X and storing them in the scores\t\t\t\t\t #\n\t\t# variable.\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t #\n\t\t############################################################################\n\t\tz1, cache1 = conv_relu_pool_forward(X, W1, b1, conv_param, pool_param)\n\t\tz2, cache2 = affine_relu_forward(z1, W2, b2)\n\t\ty3, cache3 = affine_forward(z2, W3, b3)\n\t\tscores = y3\n\t\t############################################################################\n\t\t#\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tEND OF YOUR CODE\t\t\t\t\t\t\t\t\t\t\t\t\t\t #\n\t\t############################################################################\n\t\t\n\t\tif y is None:\n\t\t\treturn scores\n\t\t\n\t\tloss, grads = 0, {}\n\t\t############################################################################\n\t\t# TODO: Implement the backward pass for the three-layer convolutional net, #\n\t\t# storing the loss and gradients in the loss and grads variables. Compute #\n\t\t# data loss using softmax, and make sure that grads[k] holds the gradients #\n\t\t# for self.params[k]. Don't forget to add L2 regularization!\t\t\t\t\t\t\t #\n\t\t############################################################################\n\t\tloss, dout = softmax_loss(scores, y)\n\t\tloss += self.reg * 0.5 * (np.power(self.params['W3'], 2).sum() + \n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tnp.power(self.params['W2'], 2).sum() + \n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tnp.power(self.params['W1'], 2).sum())\n\n\t\tdx3, grads['W3'], grads['b3'] = affine_backward(dout, cache3)\n\t\tdx2, grads['W2'], grads['b2'] = affine_relu_backward(dx3, cache2)\n\t\tdx1, grads['W1'], grads['b1'] = conv_relu_pool_backward(dx2, cache1)\n\t\t############################################################################\n\t\t#\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tEND OF YOUR CODE\t\t\t\t\t\t\t\t\t\t\t\t\t\t #\n\t\t############################################################################\n\t\t\n\t\treturn loss, grads", "def ridge_regression(y, tx, lambda_):\n N = tx.shape[0]\n a = tx.T.dot(tx) + 2 * N * lambda_ * np.identity(tx.shape[1])\n b = tx.T.dot(y)\n w = np.linalg.solve(a, b)\n loss = compute_loss_LS(y, tx, w) \n return loss, w", "def l2_reg_create_layer(prev, n, activation, lambtha):\n reg = tf.contrib.layers.l2_regularizer(lambtha)\n init = tf.contrib.layers.variance_scaling_initializer(mode=\"FAN_AVG\")\n t = tf.layers.Dense(units=n, activation=activation,\n kernel_initializer=init,\n kernel_regularizer=reg,\n )\n return t(prev)", "def get_sparsity_penalty(nnet, inputs, sparsity, mode=\"mean\",\n deterministic=False):\n\n assert mode in (\"mean\", \"l1\")\n\n rho = sparsity\n penalty = 0\n eps = 0.0001 # for numerical stability\n\n for layer in nnet.all_layers:\n if layer.isactivation:\n\n activation = lasagne.layers.get_output(layer, inputs=inputs,\n deterministic=deterministic)\n\n if mode == \"mean\":\n if layer.isrelu:\n avg_activation = T.mean(T.gt(activation,\n T.zeros_like(activation)),\n axis=0, dtype='floatX')\n if layer.issigmoid:\n avg_activation = T.mean(activation, axis=0, dtype='floatX')\n\n KL_div = T.sum((rho+eps) *\n (T.log(rho+eps) - T.log(avg_activation+eps)) +\n (1-rho+eps) *\n (T.log(1-rho+eps) - T.log(1-avg_activation+eps)),\n dtype='floatX')\n penalty = penalty + KL_div\n\n if mode == \"l1\":\n penalty = penalty + T.sum(abs(activation), dtype='floatX')\n\n return T.cast(penalty, dtype='floatX')", "def mpt132_w34_2_lr(optimizer, args, **kwargs):\n\n def _lr_adjuster(epoch, iteration):\n if epoch >= 27:\n lr = args.lr * 0.1\n\n if epoch >= 44:\n lr = lr * 0.1\n\n if epoch >= 54:\n lr = lr * 0.01\n else:\n lr = args.lr\n\n assign_learning_rate(optimizer, lr)\n\n return lr\n\n return _lr_adjuster" ]
[ "0.7459851", "0.73080754", "0.71070445", "0.7071918", "0.6722846", "0.6637411", "0.66100603", "0.6606142", "0.65346557", "0.64298236", "0.64298236", "0.64298236", "0.64195216", "0.6411184", "0.6407858", "0.6394501", "0.63773257", "0.634488", "0.63392335", "0.6293014", "0.61802405", "0.61503524", "0.6149505", "0.6127163", "0.61208874", "0.60972327", "0.60640156", "0.6029717", "0.6015774", "0.6013876", "0.60113883", "0.60025465", "0.59928167", "0.5961134", "0.59243536", "0.59084576", "0.5899345", "0.5872625", "0.5862734", "0.58606994", "0.5836822", "0.58322966", "0.5795073", "0.5781312", "0.5761747", "0.5753726", "0.5752543", "0.5752193", "0.5734129", "0.5721926", "0.57193244", "0.5715699", "0.5693544", "0.56856376", "0.56797504", "0.56728595", "0.5669613", "0.56617296", "0.5657388", "0.56373453", "0.56289595", "0.56277", "0.5620497", "0.558956", "0.55868894", "0.558122", "0.5580016", "0.55780435", "0.5565405", "0.55270034", "0.55216616", "0.55185705", "0.55172133", "0.55098605", "0.5493075", "0.54918677", "0.5486331", "0.54855716", "0.54822814", "0.5458334", "0.54579157", "0.54495806", "0.54480374", "0.5439251", "0.542891", "0.5415962", "0.5414263", "0.5405145", "0.5399938", "0.53980637", "0.5377615", "0.53764576", "0.53688157", "0.53620154", "0.5361003", "0.53585947", "0.53584796", "0.53555447", "0.5354137", "0.5350858" ]
0.66554886
5
returns the offset to balance the polynomial parameters possible by the bias terms of the network.
def get_bias_offset(nnet): offset = 0 L = len(nnet.trainable_layers) for l in range(L-1): layer = nnet.trainable_layers[l] if layer.b is not None: W_prod = T.eye(int(layer.b.shape.eval()[0])) for k in range(1, L-1): W_prod = T.dot(nnet.trainable_layers[k].W.T, W_prod) offset = offset + T.dot(W_prod, layer.b) offset = T.dot(nnet.ocsvm_layer.W.T, offset) return T.sum(offset)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _get_bias(self) -> JTensor:\n p = self.params\n b = self.local_theta().b\n if p.forget_gate_bias != 0.0:\n b = b + self.get_adjustment()\n\n return b", "def get_bias(self):", "def bias(self):\n return self.mbmod.bias", "def bias(self):\n return self._bias", "def get_bias(self):\n return self._bias", "def coordinate_delta_bias(sum_grad, sum_hess):\n return -sum_grad / sum_hess", "def bias_index(self):\n return _div(abs(self.FN - self.FP), self.grand_total)", "def gradient_bias(X, Y, model):\n W = model['weight']\n b = model['bias']\n weight_decay = model['weight_decay']\n\n # YOUR CODE HERE\n # Write the gradient with respect to the bias\n # The following line is just a placeholder\n return np.subtract(np.dot(np.transpose(predict(X, model)), np.ones(len(Y))), np.dot(np.transpose(len(Y)), np.ones(10))) #np.zeros(Y.shape[1])", "def bias_(self):\n return self.get_tensor_value('logistic_regression/bias:0')", "def bias(self) -> Optional[str]:\n return pulumi.get(self, \"bias\")", "async def get_focus_offset(self, **kwargs: Any) -> float:\n return 0", "def get_parameters(self):\n if self.add_bias:\n params = np.concatenate((self.bias, self.W), 0)\n else:\n params = self.W\n return params", "def bias(self):\n if self._bias is None:\n with self:\n self._bias = nengo.Node([1], label='cortical bias')\n return self._bias", "def chain_offset(self):\n return self._chain_offset", "def bias_prior(self):", "def wm_offset(self):\n return self.get_par(\"offset\")", "def calc_bias(a,b):\n comb = a + b\n idx = np.array(range(len(a)))[~np.isnan(comb)]\n a1=a[idx]\n b1=b[idx]\n N = len(a1)\n bias = np.sum(a1-b1)/N\n return bias", "def bottom_offset(self):\n raise NotImplementedError", "def get_bprop_pow(self):\n pow_op = P.Pow()\n ln = P.Log()\n\n def bprop(x, power, out, dout):\n bc_dx = power * pow_op(x, power - 1.0) * dout\n bc_dpower = out * ln(x) * dout\n return binop_grad_common(x, power, bc_dx, bc_dpower)\n return bprop", "def __getxyB(x, y):\n\t\treturn x*3+y", "def backward(self, next_layer_weights, next_layer_delta):\n delta = np.dot(next_layer_weights.T, next_layer_delta)\n delta = delta * self.mask * self.activation_derivative(self.z)\n self.delta = delta\n return delta", "def Offset(self) -> int:", "def Offset(self) -> int:", "def Offset(self) -> int:", "def get_bias_for_op(op):\n bias = None\n if op.type in _BIAS_TYPES:\n bias = op.inputs[constants.OP_WEIGHT_INDICES[op.type]]\n return bias", "def getBalanceFactor(self):\n \n return (self._leftSubtreeHeight - self._rightSubtreeHeight)", "def get_panels_pcb_offset():\n mount_hole_offset = arena_assembly['panels_to_hallway_gap']\n mount_hole_offset -= arena_assembly['panels_assembly_offset']\n mount_hole_offset += 0.5*hallway_bottom_plate['width']\n mount_hole_offset += 0.5*panels_pcb['width']\n return mount_hole_offset", "def get_b(self):\n return ((self.b_plus_bstar / self.n_pos) + (self.b_minus_bstar / self.n_neg)) / 2", "def offset_slope(self):\n foc_um_slope = self.focus_slope * self.pix_size\n offset_slope = 0.5 * foc_um_slope / np.tan(self.convergence_angle)\n return offset_slope", "def pbias(self) -> float:\n return float(100.0 * sum(self.predicted - self.true) / sum(self.true))", "def abs_offset_from_hbin_offset(self, offset):\n h = self.parent()\n while h.__class__.__name__ != \"HBINBlock\":\n h = h.parent()\n\n return h.first_hbin().offset() + offset", "def constraint_sum(w):\n return sum(w) - 1", "def _relative_bias(sim: xr.DataArray, ref: xr.DataArray) -> xr.DataArray:\n out = (sim - ref) / ref\n return out.assign_attrs(units=\"\")", "def get_input_offset(self):\n return ELFLING_PADDING + len(self.__data) - 4", "def get_drawing_offset(self) -> Tuple2IntType:\n return self._drawing_offset", "def bias_model_qso(z):\n alpha = 0.278\n beta = 2.393\n return alpha * ((1+z)**2 - 6.565) + beta", "def backpropagation(self):\n\n print \"Backpropagation in pool layer\"\n deltasNext = self.__nextLayer.getDeltas()\n self.deltas = np.zeros(self.inputShape)\n\n\n # for para dar los valores del delta siguiente a los maximos\n idx = 0\n for n in range(self.inputShape[0]):\n for c in range(self.inputShape[1]):\n nh = 0\n for h in range(self.inputShape[2], self.inputShape[2] - self.kernelSize[0] + 1, self.stride[0]):\n nw = 0\n for w in range(self.inputShape[3], self.inputShape[3] - self.kernelSize[1] + 1, self.stride[1]):\n self.deltas[n, c, w + self.maxIdx[idx][0], h + self.maxIdx[idx][1]] = deltasNext[\n n, c,\n nh: nh + self.kernelSize[\n 0],\n nw:nw + self.kernelSize[\n 1]]\n idx += 1\n\n if self.__previousLayer is None:\n return self.deltas\n else:\n return self.__previousLayer.backpropagation()", "def adj_to_bias(adj):\n num_graphs = adj.shape[0]\n adj_temp = np.empty(adj.shape)\n for i in range(num_graphs):\n adj_temp[i] = adj[i] + np.eye(adj.shape[1])\n return -1e9 * (1.0 - adj_temp)", "def bias_nu(self,nu):\n return 1. + (self.a*nu-1.)/self.delta_c + 2.*self.p/(self.delta_c*(1.+(self.a*nu)**self.p))", "def powBeta( n ):\n return (1-alphaval)*Fib(n) + Fib(n-1)\n #return Fib(n+1) - Fib(n) * alphaval", "def balanceFactor(self):\n leftHeight = self.left.height if self.left != None else 0\n rightHeight = self.right.height if self.right != None else 0\n return rightHeight - leftHeight", "def get_offset_value():\n # TODO rename it 'get_margin_value'\n # should be greater than 2 (maybe 1 is enough)\n return 5", "def _offset_for(self, param):\n if param.has_parent():\n p = param._parent_._get_original(param)\n if p in self.parameters:\n return reduce(lambda a,b: a + b.size, self.parameters[:p._parent_index_], 0)\n return self._offset_for(param._parent_) + param._parent_._offset_for(param)\n return 0", "def _offset_for(self, param):\n if param.has_parent():\n p = param._parent_._get_original(param)\n if p in self.parameters:\n return reduce(lambda a,b: a + b.size, self.parameters[:p._parent_index_], 0)\n return self._offset_for(param._parent_) + param._parent_._offset_for(param)\n return 0", "def offset_point(self,base, offset):\r\n return (base[0] + offset[0], base[1] + offset[1])", "def _get_gradient_delta(self, Xi, yi):\n\n z = sum(wi * xij for wi, xij in zip(self.weights, Xi)) + self.bias\n y_hat = 1 / (1 + exp(-z))\n bias_grad_delta = yi - y_hat\n weights_grad_delta = [bias_grad_delta * Xij for Xij in Xi]\n return bias_grad_delta, weights_grad_delta", "def balance(self):\n return self._rbal - self._lbal", "def offset(self):\r\n return self.buf[0].unib[9:11]", "def _bowl_params(self):\n self.vars['bowl_strength'] = self.bowl.strength + \\\n self.vars['beta_min_offset']\n self.vars['q_init'] = self.vars['bowl_strength']\n if self.vars['bowl_strength'] <= self.vars['beta_min_offset']:\n print(\n f\"Bowl overflow -- Set to the minimum value : {self.vars['beta_min_offset']}\")\n # raise ValueError(\"Bowl overflow... strength lower than set tolerance. Modify the tolerance or fix the bug!\")\n self.vars['bowl_strength'] = self.vars['beta_min_offset']\n if self.vars['bowl_strength'] > self.vars['q_max']:\n self.vars['bowl_strength'] = self.vars['q_max']\n\n self.vars['zeta_bowl'] = self.toNeural(self.bowl.center)\n print(f\"Value for Q set to {self.vars['bowl_strength']}\")", "def backprop(nn, y):\n LAST = len(nn) - 1\n\n # last layer\n nn[LAST].dCdz = np.multiply(2.0 * (nn[LAST].a - y), AF_PRIME(nn[LAST].z))\n nn[LAST].dCdw = (np.dot(nn[LAST].dCdz, nn[LAST].input_value.T))\n nn[LAST].dCdw_sum = \\\n np.add(nn[LAST].dCdw, nn[LAST].dCdw_sum)\n nn[LAST].w -= nn[LAST].dCdw * LEARNING_RATE\n\n # other layer\n for n in range(1, len(nn)):\n dz1dz2 = \\\n np.dot(nn[LAST - n + 1].w.T, nn[LAST - n + 1].dCdz)\n nn[LAST - n].dCdz = \\\n np.multiply(AF_PRIME(nn[LAST - n].z), dz1dz2)\n nn[LAST - n].dCdw = \\\n (np.dot(nn[LAST - n].dCdz, nn[LAST - n].input_value.T))\n nn[LAST - n].dCdw_sum = \\\n np.add(nn[LAST - n].dCdw, nn[LAST - n].dCdw_sum)\n nn[LAST - n].w -= nn[LAST - n].dCdw * LEARNING_RATE", "def attention_bias_local(length, max_backward, max_forward):\n band = ones_matrix_band_part(\n length,\n length,\n max_backward,\n max_forward,\n out_shape=[1, 1, length, length])\n return -1e9 * (1.0 - band)", "def backpropagation(self, x, y):\n result = self.feed_forward(x)\n z_arr, a_arr = result[0], result[1]\n\n delta_w = [np.zeros(w.shape) for w in self.weights]\n delta_b = [np.zeros(b.shape) for b in self.biases]\n\n delta = self.cost_func.derivative(a_arr[-1], y, z_arr[-1])\n delta_b[-1] = np.sum(delta, axis=0)\n delta_w[-1] = np.dot(delta.T, a_arr[-2])\n\n for layer in range(2, self.layers_num):\n z_d = self.act_func.derivative(z_arr[-layer])\n delta = np.dot(self.weights[-layer + 1].T, np.sum(delta, axis=0)) * z_d\n delta_b[-layer] = np.sum(delta, axis=0)\n delta_w[-layer] = np.dot(delta.T, a_arr[-layer - 1])\n\n return delta_w, delta_b", "def compute_bias(self, qlen, klen):\n context_position = torch.arange(qlen, dtype=torch.long)[:, None]\n memory_position = torch.arange(klen, dtype=torch.long)[None, :]\n relative_position = memory_position - context_position # shape (qlen, klen)\n rp_bucket = self._relative_position_bucket(\n relative_position, # shape (qlen, klen)\n bidirectional=not self.is_decoder,\n num_buckets=self.relative_attention_num_buckets,\n )\n rp_bucket = rp_bucket.to(self.relative_attention_bias.weight.device)\n values = self.relative_attention_bias(rp_bucket) # shape (qlen, klen, num_heads)\n values = values.permute([2, 0, 1]).unsqueeze(0) # shape (1, num_heads, qlen, klen)\n return values", "def compute_bias(self, qlen, klen):\n context_position = torch.arange(qlen, dtype=torch.long)[:, None]\n memory_position = torch.arange(klen, dtype=torch.long)[None, :]\n relative_position = memory_position - context_position # shape (qlen, klen)\n rp_bucket = self._relative_position_bucket(\n relative_position, # shape (qlen, klen)\n bidirectional=not self.is_decoder,\n num_buckets=self.relative_attention_num_buckets,\n )\n rp_bucket = rp_bucket.to(self.relative_attention_bias.weight.device)\n values = self.relative_attention_bias(rp_bucket) # shape (qlen, klen, num_heads)\n values = values.permute([2, 0, 1]).unsqueeze(0) # shape (1, num_heads, qlen, klen)\n return values", "def calculate_gradient(y, tx, w): \n return tx.T@(sigmoid(tx@w)-y)", "def get_offset(self):\n return self.offset", "def square_trick(bias, slope, predictor, current_value, learning_rate):\n predicted_value = bias + slope*predictor\n slope += learning_rate*predictor*(current_value-predicted_value)\n bias += learning_rate*(current_value-predicted_value)\n return slope, bias", "def absolute_trick(bias, slope, predictor, current_value, learning_rate):\n predicted_value = bias + slope*predictor\n if current_value > predicted_value:\n slope += learning_rate*predictor\n bias += learning_rate\n else:\n slope -= learning_rate*predictor\n bias -= learning_rate\n return slope, bias", "def poly_desc(W, b):\n result = 'y = '\n for i, w in enumerate(W):\n result += '{:+.2f} x^{} '.format(w, len(W) - i)\n result += '{:+.2f}'.format(b[0])\n return result", "def offset(self):\n return self.__offset", "def offset(self):\n return self.__offset", "def bias_variable(shape):\n initial = tf.constant(0.1, shape=shape)\n return tf.get_variable('bias', initializer=initial)", "def backprop(self, x, y):\n nabla_b = [np.zeros(b.shape) for b in self.biases] #返回一个给定形状和类型的用0填充的数组 shape:形状\n nabla_w = [np.zeros(w.shape) for w in self.weights]\n # feedforward\n activation = x\n activations = [x] # list to store all the activations, layer by layer\n zs = [] # list to store all the z vectors, layer by layer\n for b, w in zip(self.biases, self.weights):\n z = np.dot(w, activation)+b # 计算每一个节点的值\n zs.append(z)\n activation = sigmoid(z) # 加入激活函数\n activations.append(activation)\n\n ''' backward pass 最后一层的 w和b \n 因为最后一层没有权重weights因此单独计算,没有放在循环中\n '''\n delta = self.cost_derivative(activations[-1], y) * \\\n sigmoid_prime(zs[-1])\n nabla_b[-1] = delta\n nabla_w[-1] = np.dot(delta, activations[-2].transpose())\n # Note that the variable l in the loop below is used a little\n # differently to the notation in Chapter 2 of the book. Here,\n # l = 1 means the last layer of neurons, l = 2 is the\n # second-last layer, and so on. It's a renumbering of the\n # scheme in the book, used here to take advantage of the fact\n # that Python can use negative indices in lists.\n for l in range(2, self.num_layers):\n z = zs[-l]\n sp = sigmoid_prime(z)\n delta = np.dot(self.weights[-l+1].transpose(), delta) * sp\n nabla_b[-l] = delta\n nabla_w[-l] = np.dot(delta, activations[-l-1].transpose()) \n return (nabla_b, nabla_w)", "def backward_pass(self, delta, zs, activations, nabla_b, nabla_w):\n for l in range(2, self.num_layers):\n delta = np.dot(self.weights[-l + 1].transpose(), delta) * sigmoid_prime(zs[-l])\n nabla_b[-l] = delta\n nabla_w[-l] = np.dot(delta, activations[-l - 1].transpose())\n return nabla_b, nabla_w", "def get_padding_bias(x, res_rank=4, pad_sym=0):\n # print(\"get_padding_bias\", dtype)\n with tf.name_scope(\"attention_bias\"):\n padding = get_padding(x, padding_value=pad_sym, dtype=tf.float32)\n # padding = get_padding(x, padding_value=pad_sym, dtype=dtype)\n neg_inf = _NEG_INF # if dtype==tf.float32 else _NEG_INF_FP16\n attention_bias = padding * neg_inf\n if res_rank == 4:\n attention_bias = tf.expand_dims(tf.expand_dims(attention_bias, axis=1), axis=1)\n elif res_rank == 3:\n attention_bias = tf.expand_dims(attention_bias, axis=1)\n else:\n raise ValueError(\"res_rank should be 3 or 4 but got {}\".format(res_rank))\n return attention_bias", "def _get_next_offset(self):\n return self.__offset", "def absorb_params(self):\n new_lin_op = mul_elemwise(self.weight, self.lin_op)\n new_b = mul_elemwise(self.weight, self.b).value\n return sum_squares(new_lin_op,\n alpha=self.alpha,\n beta=self.beta,\n b=new_b,\n c=self.c,\n gamma=self.gamma).absorb_params()", "def getYOffset(self):\n return _libsbml.Point_getYOffset(self)", "def absorb_params(self):\n new_beta = np.sqrt(self.alpha * self.beta**2 + self.gamma)\n new_b = (self.alpha * self.beta * self.b - self.c / 2) / new_beta\n return sum_squares(self.lin_op, beta=new_beta, b=new_b)", "def get_balance(address: str) -> int:\n return base.Balances(balance_of).get(address)", "def bias(self,M,z,norm_in=None):\n G = self.Growth(z)\n return self.bias_G(M,G,norm_in=norm_in)", "def calculate_offset(page_number, per_page):\n if page_number < 1:\n return 0\n else:\n offset = (page_number - 1) * per_page\n return offset", "def offset(self):\n return self.unpack_dword(0x0)", "def bias(pred, true):\n return torch.sum(true - pred) / torch.sum(true + pred)", "def get_bprop_abs(self):\n abs_grad = G.AbsGrad()\n\n def bprop(x, out, dout):\n dx = abs_grad(x, dout)\n return (dx,)\n return bprop", "def abs_offset_from_hbin_offset(self, offset):\n # TODO This violates DRY as this is a redefinition, see HBINCell.abs_offset_from_hbin_offset()\n h = self.parent()\n while h.__class__.__name__ != \"HBINBlock\":\n h = h.parent()\n\n return h.first_hbin().offset() + offset", "def backprop1_7(self, x, y):\r\n \r\n nabla_b = [np.zeros(b.shape) for b in self.biases]\r\n nabla_w = [np.zeros(w.shape) for w in self.weights]\r\n # feedforward\r\n activation = x\r\n activations = [x] # list to store all the activations, layer by layer\r\n zs = [] # list to store all the z vectors, layer by layer\r\n\r\n for b, w in list(zip(self.biases, self.weights))[:len(self.biases)-1]:\r\n z = np.dot(w, activation)+b\r\n zs.append(z)\r\n activation = relu(z)\r\n #print(activation)\r\n activations.append(activation)\r\n\r\n b = self.biases[-1]\r\n w = self.weights[-1]\r\n z = np.dot(w, activation)+b\r\n zs.append(z)\r\n activation = softmax(z)\r\n activations.append(activation)\r\n\r\n # backward pass\r\n delta = self.cost_derivative(activations[-1], y)\r\n\r\n nabla_b[-1] = delta\r\n nabla_w[-1] = np.dot(delta, activations[-2].transpose())\r\n # Note about the variable l: Here,\r\n # l = 1 means the last layer of neurons, l = 2 is the\r\n # second-last layer, and so on. This numbering takes advantage of the fact\r\n # that Python can use negative indices in lists.\r\n for l in range(2, self.num_layers):\r\n z = zs[-l]\r\n sp = leakyreluDerivative(z)\r\n delta = np.dot(self.weights[-l+1].transpose(), delta) * sp\r\n nabla_b[-l] = delta\r\n nabla_w[-l] = np.dot(delta, activations[-l-1].transpose())\r\n return (nabla_b, nabla_w)", "def offset(self):\n return self._offset", "def offset(self):\n return self._offset", "def offset(self):\n return self._offset", "def offset(self):\n return self._offset", "def offset(self):\n return self._offset", "def offset(self):\n return self._offset", "def offset(self):\n return self._offset", "def B(self) -> int:\n return self.params.B", "def calculate_gradient(y, tx, w):\n\n\tret = tx.T.dot(sigmoid(np.dot(tx, w)) - y)\n\treturn ret", "def get_b_param(self):\n value = self.memory[self.exec_ptr + 2]\n Vm.validate_value(value)\n return value", "def getB(self):\n return ((self.bPlusbStar() / self.nPos) + (self.bMinusbStar / self.nNeg)) / 2", "def update_bias(self):\n self._bias = self._bias + self.update_bias_value\n self.bias_clipping()", "def offset(self) -> Tuple[int, int]:\n return (self.ioffset[0].to_pixels(self.parent.width),\n self.ioffset[1].to_pixels(self.parent.height))", "def _extract_weights(self,W):\n wl1_size = self._D*self._hidden_layer_size\n bl1_size = self._hidden_layer_size\n \n wl2_size = self._hidden_layer_size*self._output_size\n bl2_size = self._output_size\n\n \n weights_L1 = W[0:wl1_size].reshape((self._D,self._hidden_layer_size))\n bias_L1 = W[wl1_size:wl1_size+bl1_size]\n \n start_l2 = wl1_size+bl1_size\n\n weights_L2 = W[start_l2: start_l2 + wl2_size].reshape((self._hidden_layer_size,self._output_size))\n bias_L2 = W[start_l2 + wl2_size : start_l2 + wl2_size + bl2_size]\n \n \n \n return weights_L1,bias_L1,weights_L2,bias_L2", "def find_B(self):\n max_lb = 0\n for arc in self.arcs():\n lb = self.arc_info[arc[0]]['lower_bound']\n max_lb = max(max_lb, lb)\n n = len(self)\n m = len(list(self.edges()))\n return((m - n + 2)*max_lb)", "def get_balance(self):\n\n return self.config", "def get_alignment_offset(self):\n\n return 0", "def calculate_output_bias(train):\n # This just converts to a count of each element by crystal\n site_counts = (\n train.inputs.progress_apply(lambda x: pd.Series(Counter(x[\"site\"])))\n .reindex(columns=np.arange(max_atomic_num))\n .fillna(0)\n )\n # Linear regression assumes a sum, while we average over sites in the neural network\n # Here, we make the regression target the total energy, not the site-averaged energy\n num_sites = site_counts.sum(1)\n total_energies = train[\"energyperatom\"] * num_sites\n\n # Do the least-squares regression, and stack on zeros for the mask and unknown\n # tokens\n output_bias = np.linalg.lstsq(site_counts, total_energies, rcond=None)[0]\n return output_bias", "def calculate_gradient(y, tx, w):\n return tx.T.dot(sigmoid(tx.dot(w))-np.reshape(y,(len(y),1)))", "def calculate_maxL_gradient(y, tx, w):\n sig=sigmoid(tx.dot(w))\n grad=tx.T.dot(sig-y)\n #divides with number of samples so that learning rate is not dependant on number of samples\n grad=grad/len(y) \n return grad", "def offset(self):\r\n return self._get_instantiation()[3]", "def ace_bias_fxn(pt, pt0=-24.065, a=0.8391, b=111.62, c=-0.67068):\n return a * np.exp(-(pt - pt0)/b) + c", "def Bpoly(n, x):\n n = int(n)\n out = 0\n for k in xrange(0, n+1):\n out += comb(n,k)*Bnum(n-k)*x**float(k)\n return out" ]
[ "0.6315012", "0.6108197", "0.58673644", "0.5829149", "0.5699308", "0.5665857", "0.56237257", "0.56060565", "0.5575922", "0.55285054", "0.55088294", "0.54091525", "0.5405784", "0.5395721", "0.5392568", "0.53660536", "0.53445673", "0.5333317", "0.52875805", "0.52750427", "0.52747667", "0.52479863", "0.52479863", "0.52479863", "0.5238909", "0.5233136", "0.5215768", "0.52109665", "0.516689", "0.5132227", "0.51315314", "0.510362", "0.50985116", "0.50926214", "0.5079154", "0.5066284", "0.50468194", "0.50415766", "0.5039222", "0.5032961", "0.5008906", "0.49974602", "0.49948883", "0.49948883", "0.4987739", "0.49691135", "0.49683383", "0.4956572", "0.49523118", "0.49467012", "0.4945798", "0.49451694", "0.4914285", "0.4914285", "0.49092233", "0.49085036", "0.490058", "0.48965475", "0.489294", "0.48837999", "0.48837999", "0.4881785", "0.48804864", "0.4874642", "0.48742747", "0.48735747", "0.48557046", "0.484865", "0.48407778", "0.4839383", "0.4839365", "0.483303", "0.48264772", "0.48225865", "0.48046267", "0.48019418", "0.47970724", "0.4787424", "0.4787424", "0.4787424", "0.4787424", "0.4787424", "0.4787424", "0.4787424", "0.47870168", "0.47814018", "0.47766465", "0.47765428", "0.47727823", "0.47721544", "0.47700942", "0.4769625", "0.47659612", "0.47648633", "0.476211", "0.4758538", "0.47537273", "0.47530514", "0.4751324", "0.47476372" ]
0.70303106
0
create update for network given in argument
def create_update(nnet): if nnet.data._X_val.ndim == 2: inputs = T.matrix('inputs') elif nnet.data._X_val.ndim == 4: inputs = T.tensor4('inputs') targets = T.ivector('targets') # compile theano functions if Cfg.softmax_loss: compile_update_softmax(nnet, inputs, targets) elif Cfg.ocsvm_loss: if Cfg.rho_fixed: compile_update_ocsvm_rho_fixed(nnet, inputs, targets) else: compile_update_ocsvm(nnet, inputs, targets) elif Cfg.svdd_loss: compile_update_svdd(nnet, inputs, targets) elif Cfg.reconstruction_loss: create_autoencoder(nnet) else: compile_update_default(nnet, inputs, targets)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def run(self, network_update_args, network_create_args=None):\n network = self.neutron.create_network(**(network_create_args or {}))\n self.neutron.update_network(network[\"id\"], **network_update_args)", "def update_target_network(self):\n\n\t\tprint \"Updating Target DQN...\"\n\t\t\n\t\tself.update_operation.run()", "def update_network(**kwargs):\n\n ip_addr = kwargs.get('ip_addr')\n is_private = kwargs.get('is_private')\n name = kwargs.get('name')\n dns_names = kwargs.get('dns_names')\n is_scanning = kwargs.get('is_scanning', False)\n network_id = make_shortuuid(name)\n\n network = {\n 'dns_names': dns_names,\n 'ip_addr': ip_addr,\n 'is_private' : is_private,\n 'name': name,\n 'id': network_id,\n 'is_scanning': is_scanning,\n 'updated_count': 0\n\n }\n\n network_exists = r.table(\"networks\").insert([network], conflict=\"update\")\n\n return network_exists.run(conn)", "def update_net(self) -> None:\n self.units.update_net()", "def update(*args):", "def update_target_network(self):\r\n self.send(self.server_conn, (sys._getframe().f_code.co_name, {}))", "def test_networking_project_network_update(self):\n pass", "def fusion_api_edit_fc_network(self, body, uri, api=None, headers=None):\n return self.fc_network.update(body, uri, api, headers)", "def update_network(self, context, net_id, network):\n\n LOG.debug(_(\"QuantumRestProxyV2.update_network() called\"))\n\n # Validate Args\n if network[\"network\"].get(\"admin_state_up\"):\n if network[\"network\"][\"admin_state_up\"] is False:\n LOG.warning(_(\"Network with admin_state_up=False are not yet \"\n \"supported by this plugin. Ignoring setting for \"\n \"network %s\", net_name))\n\n # update DB\n orig_net = super(QuantumRestProxyV2, self).get_network(context, net_id)\n tenant_id = orig_net[\"tenant_id\"]\n new_net = super(QuantumRestProxyV2, self).update_network(\n context, net_id, network)\n\n # update network on network controller\n if new_net[\"name\"] != orig_net[\"name\"]:\n try:\n resource = NETWORKS_PATH % (tenant_id, net_id)\n data = {\n \"network\": new_net,\n }\n ret = self.servers.put(resource, data)\n if not self.servers.action_success(ret):\n raise RemoteRestError(ret[2])\n except RemoteRestError as e:\n LOG.error(_(\"QuantumRestProxyV2: Unable to update remote \"\n \"network: %s\"), e.message)\n # reset network to original state\n super(QuantumRestProxyV2, self).update_network(\n context, id, orig_net)\n raise\n\n # return updated network\n return new_net", "def update(self, params):", "def update_target_net(self, sess):\n sess.run(self.update_target_net_op)", "def updateNetwork(self, session: Session, network: Network) -> Network:\n try:\n return NetworkManager().updateNetwork(session, network)\n except TortugaException as ex:\n raise\n except Exception as ex:\n self._logger.exception(str(ex))\n raise TortugaException(exception=ex)", "def update_network(self, dbnetwork, qipinfo):\n\n # We don't want to add the plenary to self.plenaries if we aren't going\n # to change anything\n plenary = Plenary.get_plenary(dbnetwork)\n updated = False\n\n if dbnetwork.name != qipinfo.name:\n self.logger.client_info(\"Setting network {0!s} name to {1}\"\n .format(dbnetwork, qipinfo.name))\n dbnetwork.name = qipinfo.name\n if dbnetwork.network_type != qipinfo.network_type:\n self.logger.client_info(\"Setting network {0!s} type to {1}\"\n .format(dbnetwork, qipinfo.network_type))\n dbnetwork.network_type = qipinfo.network_type\n if dbnetwork.location != qipinfo.location:\n self.logger.client_info(\"Setting network {0!s} location to {1:l}\"\n .format(dbnetwork, qipinfo.location))\n dbnetwork.location = qipinfo.location\n if dbnetwork.side != qipinfo.side:\n self.logger.client_info(\"Setting network {0!s} side to {1}\"\n .format(dbnetwork, qipinfo.side))\n dbnetwork.side = qipinfo.side\n if dbnetwork.network_compartment != qipinfo.compartment:\n self.logger.client_info(\"Setting network {0!s} compartment to {1!s}\"\n .format(dbnetwork, qipinfo.compartment))\n dbnetwork.network_compartment = qipinfo.compartment\n\n if dbnetwork in self.session.dirty:\n updated = True\n\n old_rtrs = set(dbnetwork.router_ips)\n new_rtrs = set(qipinfo.routers)\n\n del_routers = []\n for router in dbnetwork.routers:\n if router.ip in old_rtrs - new_rtrs:\n del_routers.append(router)\n\n for router in del_routers:\n self.logger.client_info(\"Removing router {0:s} from \"\n \"{1:l}\".format(router.ip, dbnetwork))\n for dns_rec in router.dns_records:\n if dns_rec.is_unused:\n delete_dns_record(dns_rec)\n dbnetwork.routers.remove(router)\n updated = True\n\n for ip in new_rtrs - old_rtrs:\n self.add_router(dbnetwork, ip)\n updated = True\n\n if updated:\n self.plenaries.append(plenary)\n\n # TODO: add support for updating router locations\n\n return dbnetwork.netmask == qipinfo.address.netmask", "def update_network(self, context, net_id, network):\n LOG.debug(_(\"NeutronRestProxyV2.update_network() called\"))\n\n self._warn_on_state_status(network['network'])\n\n session = context.session\n with session.begin(subtransactions=True):\n new_net = super(NeutronRestProxyV2, self).update_network(\n context, net_id, network)\n self._process_l3_update(context, new_net, network['network'])\n\n # update network on network controller\n self._send_update_network(new_net, context)\n return new_net", "def update():", "def update():", "def fusion_api_edit_network_set(self, body=None, uri=None, api=None, headers=None):\n return self.network_set.update(body, uri, api, headers)", "def _soft_update_target_network(self):\n\n # Update the target network\n for target_param, param in zip(self.actor_target_network.parameters(), self.actor_network.parameters()):\n target_param.data.copy_((1-self.args.tau) * target_param.data + self.args.tau * param.data)\n\n # Update the critic network\n for target_param, param in zip(self.critic_target_network.parameters(), self.critic_network.parameters()):\n target_param.data.copy_((1-self.args.tau) * target_param.data + self.args.tau * param.data)", "def updateRPC(loc,weight): #status: Done, not tested\r\n pass", "def update_networks(self):\n\t\t# layer 1 update\n\t\tself.W1_tv = tf.assign(self.W1_tv, self.W1_av)\n\t\tself.b1_tv = tf.assign(self.b1_tv, self.b1_av)\n\n\t\t# layer 2 update\n\t\tself.W2_tv = tf.assign(self.W2_tv, self.W2_av)\n\t\tself.b2_tv = tf.assign(self.b2_tv, self.b2_av)\n\n\t\t# layer 3 update\n\t\tself.W3_tv = tf.assign(self.W3_tv, self.W3_av)\n\t\tself.b3_tv = tf.assign(self.b3_tv, self.b3_av)", "def _update_target_net(self):\n self.target_net.load_state_dict(self.policy_net.state_dict())\n self.target_net.eval()", "def dvs_update_network(self):\n self.show_step(1)\n self.env.revert_snapshot(\"dvs_vcenter_systest_setup\")\n\n cluster_id = self.fuel_web.get_last_created_cluster()\n\n self.show_step(2)\n os_ip = self.fuel_web.get_public_vip(cluster_id)\n os_conn = os_actions.OpenStackActions(\n os_ip, SERVTEST_USERNAME,\n SERVTEST_PASSWORD,\n SERVTEST_TENANT)\n\n tenant = os_conn.get_tenant(SERVTEST_TENANT)\n\n net_1 = os_conn.create_network(\n network_name=self.net_data[0].keys()[0],\n tenant_id=tenant.id)['network']\n\n os_conn.create_subnet(\n subnet_name=net_1['name'],\n network_id=net_1['id'],\n cidr=self.net_data[0][self.net_data[0].keys()[0]],\n ip_version=4)\n\n assert_true(os_conn.get_network(net_1['name'])['id'] == net_1['id'])\n\n self.show_step(3)\n os_conn.neutron.update_network(net_1[\"id\"],\n {\"network\": {\"name\": 'net_2'}})\n\n assert_true(os_conn.get_network('net_2')['id'] == net_1['id'])\n\n self.show_step(4)\n default_net = os_conn.nova.networks.find(label=self.inter_net_name)\n os_conn.neutron.update_network(\n default_net.id, {\"network\": {\"name\": 'spring'}})\n\n assert_true(os_conn.get_network('spring')['id'] == default_net.id)", "def modify_network(self, username, machine_name, new_network, txn_id):\n logger = get_task_logger(txn_id=txn_id, task_id=self.request.id, loglevel=const.VLAB_ONEFS_LOG_LEVEL.upper())\n resp = {'content' : {}, 'error': None, 'params': {}}\n logger.info('Task starting')\n try:\n vmware.update_network(username, machine_name, new_network)\n except ValueError as doh:\n logger.error('Task failed: {}'.format(doh))\n resp['error'] = '{}'.format(doh)\n logger.info('Task complete')\n return resp", "def update_target_network(self, tau):\n for t, e in zip(\n self.target_network.trainable_variables, self.online_network.trainable_variables\n ):\n t.assign(t * (1-tau) + e * tau)", "def _create_target_network_update_op(self, q_network, target_q_network):\n variables = q_network.get_variables()\n target_variables = target_q_network.get_variables()\n # problem\n return tf.group([\n tf.assign(target_v, target_v + self.tau * (v - target_v)) # same as original arm\n for (target_v, v) in zip(target_variables, variables)\n ])", "def update( ):\r\n pass", "def update_policy_network(self):\r\n self.send(self.server_conn, (sys._getframe().f_code.co_name, {}))", "def fusion_api_edit_ethernet_network(self, body, uri, api=None, headers=None):\n return self.ethernet_network.update(body, uri, api, headers)", "def update(cls, client, resource) :\n\t\ttry :\n\t\t\tif type(resource) is not list :\n\t\t\t\tupdateresource = l3param()\n\t\t\t\tupdateresource.srcnat = resource.srcnat\n\t\t\t\tupdateresource.icmpgenratethreshold = resource.icmpgenratethreshold\n\t\t\t\tupdateresource.overridernat = resource.overridernat\n\t\t\t\tupdateresource.dropdfflag = resource.dropdfflag\n\t\t\t\tupdateresource.miproundrobin = resource.miproundrobin\n\t\t\t\tupdateresource.externalloopback = resource.externalloopback\n\t\t\t\tupdateresource.tnlpmtuwoconn = resource.tnlpmtuwoconn\n\t\t\t\tupdateresource.usipserverstraypkt = resource.usipserverstraypkt\n\t\t\t\tupdateresource.forwardicmpfragments = resource.forwardicmpfragments\n\t\t\t\tupdateresource.dropipfragments = resource.dropipfragments\n\t\t\t\tupdateresource.acllogtime = resource.acllogtime\n\t\t\t\tupdateresource.implicitaclallow = resource.implicitaclallow\n\t\t\t\tupdateresource.dynamicrouting = resource.dynamicrouting\n\t\t\t\tupdateresource.ipv6dynamicrouting = resource.ipv6dynamicrouting\n\t\t\t\treturn updateresource.update_resource(client)\n\t\texcept Exception as e :\n\t\t\traise e", "def update(self, update):\n\n params = shlex.split(update)\n if params[0] in self.addr:\n self.addr[params[0]].update(*params)\n\n else:\n a = Addr(self)\n # add both name and IP address\n self.addr[params[0]] = a\n self.addr[params[1]] = a\n a.update(*params)\n self.notify(\"addrmap_added\", *[a], **{})", "def test_update_external_network(self):\n network_policies = [(self.qos_policies[1].id,\n {self.fips[1].id},\n {self.router_fips.id}),\n (None,\n {self.fips[1].id},\n {self.router_fips.id})]\n\n self.fips[0].qos_policy_id = self.qos_policies[0].id\n self.fips[0].update()\n for qos_policy_id, ref_fips, ref_routers in network_policies:\n self.fips_network.qos_policy_id = qos_policy_id\n self.fips_network.update()\n original_network = {'qos_policy_id': self.qos_policies[0]}\n _, reviewed_fips_ids, reviewed_router_ids = (\n self.qos_driver.update_network(\n mock.Mock(), self.fips_network, original_network))\n self.assertEqual(ref_fips, reviewed_fips_ids)\n self.assertEqual(ref_routers, reviewed_router_ids)", "def _update(self, host):\n pass", "def test_update_network(self):\n policies_ports = [\n (None, {self.ports[0].id}),\n (self.qos_policies[1].id, {self.ports[0].id})]\n\n self.ports[1].qos_policy_id = self.qos_policies[0].id\n self.ports[1].update()\n self.ports[2].qos_policy_id = self.qos_policies[1].id\n self.ports[2].update()\n for qos_policy_id, reference_ports in policies_ports:\n self.networks[0].qos_policy_id = qos_policy_id\n self.networks[0].update()\n original_network = {'qos_policy_id': self.qos_policies[0]}\n reviewed_port_ids, _, _ = self.qos_driver.update_network(\n mock.ANY, self.networks[0], original_network)\n self.assertEqual(reference_ports, reviewed_port_ids)\n calls = [mock.call(mock.ANY, self.ports[0].id,\n self.ports[0].network_id, qos_policy_id,\n None)]\n self.mock_rules.assert_has_calls(calls)\n self.mock_rules.reset_mock()", "def update(self, args):\n pass", "def update(self,parameters, grads):\n \n L = len(parameters) // 2 # number of layers in the neural network\n #print(L)\n\n # Update rule for each parameter. Use a for loop.\n for l in range(L):\n \n parameters[\"W\" + str(l+1)] = parameters[\"W\" + str(l+1)] - self.alpha * grads[\"dW\" + str(l+1)]\n \n parameters[\"b\" + str(l+1)] = parameters[\"b\" + str(l+1)] - self.alpha * grads[\"db\" + str(l+1)]\n \n parameters[\"W\" + str(l+1)][np.isnan(parameters[\"W\" + str(l+1)])] = 0\n parameters[\"b\" + str(l+1)][np.isnan(parameters[\"b\" + str(l+1)])] = 0\n \n return parameters", "def update_target_network(self):\n self.target_dqn.set_weights.remote(self.dqn.get_weights.remote())", "def run(self, router_update_args, network_create_args=None,\n subnet_create_args=None, subnet_cidr_start=None,\n subnets_per_network=1, router_create_args=None):\n subnet_create_args = dict(subnet_create_args or {})\n subnet_create_args[\"start_cidr\"] = subnet_cidr_start\n\n net_topo = self.neutron.create_network_topology(\n network_create_args=(network_create_args or {}),\n router_create_args=(router_create_args or {}),\n router_per_subnet=True,\n subnet_create_args=subnet_create_args,\n subnets_count=subnets_per_network\n )\n\n for router in net_topo[\"routers\"]:\n self.neutron.update_router(router[\"id\"], **router_update_args)", "def update(self, *args, **kw):\n pass", "def update_network_profile(arn=None, name=None, description=None, type=None, uplinkBandwidthBits=None, downlinkBandwidthBits=None, uplinkDelayMs=None, downlinkDelayMs=None, uplinkJitterMs=None, downlinkJitterMs=None, uplinkLossPercent=None, downlinkLossPercent=None):\n pass", "def UpdateNetworkID(self, request, context):\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')", "def _update(self, context, values, prune_stats=False):\n return db.compute_node_update(context, self.compute_node['id'],\n values, prune_stats)", "def update_network_postcommit(self, context):\n if self.rpc_handler is None:\n return\n network = self._get_network_info(context._network)\n for _, _network in network.items():\n network_type = _network.get('network_type', '')\n if network_type not in CentecConstant.SUPPORTED_NETWORK_TYPES and len(CentecConstant.SUPPORTED_NETWORK_TYPES) > 0:\n return\n if network is not None:\n try:\n self.rpc_handler.update_network(network)\n except:\n pass", "def put(self, id):\n context = request.environ.get('context')\n net_obj = dbapi.networks_update(context, id, request.json)\n return jsonutils.to_primitive(net_obj), 200, None", "def update(self, *args, **kwargs):", "def update_network(self, a, batch_size):\n for layer in self.layers:\n layer.weights_update(a, self.alpha, self.l2_lambda, batch_size)\n a = layer.a", "def update_target_network(self):\n variables = self.online_network.trainable_variables\n variables_copy = [tf.Variable(v) for v in variables]\n self.target_network.trainable_variables = variables_copy", "def update_target_network(self):\n self.target.set_weights(self.policy.get_weights()) # Update weights of target network with weights of policy network", "def ModifyNetwork(self, network, reason=None, **kwargs):\n query = []\n _AppendReason(query, reason)\n\n return self._SendRequest(HTTP_PUT,\n (\"/%s/networks/%s/modify\" %\n (GANETI_RAPI_VERSION, network)), None, kwargs)", "def hard_update(source_net, target_net):\n for target_param, param in zip(target_net.parameters(), source_net.parameters()):\n target_param.data.copy_(param.data)", "def update(self, target, query):\n node = self._data[target]\n name = \"%s node %.8s\" % (node['type'], target)\n\n query.update({\n 'type': node['type'],\n 'model': node['model']\n })\n\n logger.info(\"Validating query\")\n NodeValidator.validate(query)\n\n self._data[target] = dict_update(node, query, name)\n logger.info(\"Updated parameters above of %s\" % name)\n\n return {target: self._data[target]}", "def run(self, subnet_update_args, network_create_args=None,\n subnet_create_args=None, subnet_cidr_start=None,\n subnets_per_network=1):\n network = self.neutron.create_network(**(network_create_args or {}))\n subnets = []\n for _ in range(subnets_per_network):\n subnets.append(\n self.neutron.create_subnet(\n network[\"id\"], start_cidr=subnet_cidr_start,\n **(subnet_create_args or {}))\n )\n for subnet in subnets:\n self.neutron.update_subnet(subnet[\"id\"], **subnet_update_args)", "def updateNode(self,updates=None):\n\n logging.info(\"editing this node\")\n if updates is not None:\n for key in updates:\n setattr(self,key,updates[key])\n memcache.set(self.cid,self)", "def update_host(self, conf, tenant_id, network_id, host_id, body):\n\t\tpass", "def update_target_q_network(self):\n assert self.target_network != None\n self.target_network.run_copy()", "def update(self,data):\r\n data = data.split(':',1)\r\n\r\n self.log('Signal','Received an update: %s...' % repr(data)[0:10],'update')\r\n \r\n #print \"*** local: \" + repr(data)\r\n \r\n if data[0] == 'Results':\r\n\r\n self.log('Signal','The local node returned these passwords: %s' % repr(data[1]),'update')\r\n\r\n self.addResult(data[1])\r\n elif data[0] == 'Bench':\r\n self.log('Signal','The local node returned these benches: %s' % repr(data[1]),'update')\r\n \r\n self.addBench(data[1])\r\n\r\n elif data[0] == 'Work':\r\n if data[1] == 'Done':\r\n self.finished += 1\r\n if self.finished >= len(self.nodes):\r\n self.runningWork = False\r\n self.log('Signal','Finished working','update')\r\n\r\n notification = 'Work:Done'\r\n self.notifyObservers(notification)", "def defineUpdateOperations(self):\n self.updated_value = tf.placeholder(shape=[1, self.network.action_size], dtype=tf.float32)\n self.loss = tf.reduce_sum(tf.square(self.updated_value - self.network.policyLayer))\n self.trainer = tf.train.GradientDescentOptimizer(learning_rate=self.learning_rate)\n\n self.updateModel = self.trainer.minimize(self.loss)", "def command_update(arguments):\n global current_name\n tag = arguments[0]\n if (len(arguments) == 2):\n old_target, new_target = (...), arguments[1]\n else:\n old_target, new_target = arguments[1:]\n\n to_replace = network[current_name, tag, old_target]\n if not len(to_replace):\n return '\"' + tag + ': ' + old_target + '\" - no such link for this entity'\n if len(to_replace) > 1:\n return 'Sorry, tag \"' + tag + '\" is ambiguous.'\n inverse_tag = to_replace[0].inverse_tag\n to_replace.unlink()\n network.addlink(current_name, tag, new_target, inverse_tag)\n\n return 'Updated link from \"' + tag + ': ' + old_target + '\" to \"' + tag + ': ' + new_target + '\"'", "def do_nic_update(cc, args):\n\n patch = utils.args_array_to_patch(args.attributes[0])\n result = cc.nic.update(args.uuid, patch)\n cliutils.print_dict(result)", "def update(self):\r\n self.g = self.create_graph()", "def update(src):", "def update_node(self, node, updating_node):\n out_edges = list(self.source_net.edges(node, data=True))\n self.remove_node(node)\n self.source_net.add_node(node, attr_dict=self.source_net.nodes[updating_node]['attr_dict'])\n self.source_net.add_edges_from(out_edges)\n\n # Transfer incoming edges\n for u, v, data in self.source_net.in_edges(updating_node, data=True):\n self.source_net.add_edge(u, node, **data)\n\n self.remove_node(updating_node)", "def update(self, *args, **kwargs): # real signature unknown\n pass", "def update(self, *args, **kwargs): # real signature unknown\n pass", "def update(self, *args, **kwargs): # real signature unknown\n pass", "def update(self, *args, **kwargs): # real signature unknown\n pass", "def update(self, *args, **kwargs): # real signature unknown\n pass", "def update(self, *args, **kwargs): # real signature unknown\n pass", "def put(self, request, nnid):\n try:\n input_parm = request.data\n input_parm['nn_id'] = nnid\n if input_parm.get('automl_parms') == None:\n input_parm['automl_parms'] = {}\n if input_parm.get('automl_runtime') == None:\n input_parm['automl_runtime'] = {}\n if input_parm.get('automl_stat') == None:\n input_parm['automl_stat'] = {}\n return_data = NNCommonManager().update_nn_info(input_parm)\n return Response(json.dumps(return_data))\n except Exception as e:\n return_data = {\"status\": \"404\", \"result\": str(e)}\n return Response(json.dumps(return_data))", "def update_target_network(self) -> NoReturn:\n self.target.load_state_dict(self.model.state_dict())", "def update_nat(self, natgw, **attrs):\n return self._update(_gw.Service, natgw, **attrs)", "def hard_update_target_network(self,step):\n \n if step % self.C == 0:\n pars = self.model.get_weights()\n self.target_model.set_weights(pars)", "def hard_update_target_network(self,step):\n \n if step % self.C == 0:\n pars = self.model.get_weights()\n self.target_model.set_weights(pars)", "def _build_update_params(self, params):", "def run(self, port_update_args, network_create_args=None,\n port_create_args=None, ports_per_network=1):\n network = self._get_or_create_network(**(network_create_args or {}))\n for i in range(ports_per_network):\n port = self.neutron.create_port(\n network[\"id\"], **(port_create_args or {}))\n self.neutron.update_port(port[\"id\"], **port_update_args)", "def _build_network(self):\n pass", "async def update_from_workbench(\n projects_networks_repository: ProjectsNetworksRepository,\n projects_repository: ProjectsRepository,\n scheduler: DynamicSidecarsScheduler,\n director_v0_client: DirectorV0Client,\n rabbitmq_client: RabbitMQClient,\n project_id: ProjectID,\n) -> None:\n\n try:\n existing_projects_networks = (\n await projects_networks_repository.get_projects_networks(\n project_id=project_id\n )\n )\n except ProjectNotFoundError:\n existing_projects_networks = ProjectsNetworks.parse_obj(\n dict(project_uuid=project_id, networks_with_aliases={})\n )\n\n existing_networks_with_aliases = existing_projects_networks.networks_with_aliases\n\n # NOTE: when UI is in place this is no longer required\n # for now all services are placed on the same default network\n project: ProjectAtDB = await projects_repository.get_project(project_id)\n assert project.prj_owner # nosec\n new_networks_with_aliases = await _get_networks_with_aliases_for_default_network(\n project_id=project_id,\n user_id=project.prj_owner,\n new_workbench=project.workbench,\n director_v0_client=director_v0_client,\n rabbitmq_client=rabbitmq_client,\n )\n logger.debug(\"%s\", f\"{existing_networks_with_aliases=}\")\n await projects_networks_repository.upsert_projects_networks(\n project_id=project_id, networks_with_aliases=new_networks_with_aliases\n )\n\n await _send_network_configuration_to_dynamic_sidecar(\n scheduler=scheduler,\n project_id=project_id,\n new_networks_with_aliases=new_networks_with_aliases,\n existing_networks_with_aliases=existing_networks_with_aliases,\n )", "def alter_network(self, add=[], remove=[]):\n\n # make the required changes\n # NOTE: remove existing edges *before* adding new ones. \n # if edge e is in `add`, `remove` and `self.network`, \n # it should exist in the new network. (the add and remove cancel out.\n self.network.edges.remove_many(remove)\n self.network.edges.add_many(add) \n\n # check whether changes lead to valid DAG (raise error if they don't)\n affected_nodes = set(unzip(add, 1))\n if affected_nodes and not self.network.is_acyclic(affected_nodes):\n self.network.edges.remove_many(add)\n self.network.edges.add_many(remove)\n raise CyclicNetworkError()\n \n \n # accept changes: \n # 1) determine dirtynodes\n # 2) backup state\n # 3) score network (but only rescore dirtynodes)\n self.dirtynodes.update(set(unzip(add+remove, 1)))\n self._backup_state(add, remove)\n self.score = self._score_network_core()\n #print\"calculated score = \" + str(self.score)\n return self.score", "def test_update_value(self):\n n = Node(inputs=2)\n # Override weights to static value for reproducibility\n n.weights = [1, 1]\n n.update_value([2, 3])\n\n self.assertEqual(round(n.value, 3), 0.993)", "def update_target_network(self, tau):\n for p_target, p_local in zip(self.q_network_target.parameters(), self.q_network_local.parameters()):\n p_target.data.copy_(tau * p_local.data + (1.0-tau) * p_target.data)", "def update_networks(self, agent, force_hard=False):\n\n if self.update_type == \"soft\" and not force_hard:\n self._soft_update(agent.actor, agent.actor_target)\n self._soft_update(agent.critic, agent.critic_target)\n elif self.t_step % self.C == 0 or force_hard:\n self._hard_update(agent.actor, agent.actor_target)\n self._hard_update(agent.critic, agent.critic_target)", "def update_network_postcommit(self, mech_context):\n pass", "def update_nodes(self):\n raise NotImplementedError('ERROR: sweeper has to implement update_nodes(self)')", "def _update_nn(self, bad_feats, good_feats, rate):\n self.nn.update(bad_feats, good_feats, rate)", "def compile_update_svdd(nnet, inputs, targets):\n\n floatX = Cfg.floatX\n B = Cfg.B\n C = Cfg.C\n nu = Cfg.nu\n\n # initialize R\n if nnet.R_init > 0:\n nnet.Rvar = shared(floatX(nnet.R_init), name=\"R\")\n else:\n nnet.Rvar = shared(floatX(1), name=\"R\") # initialization with R=1\n\n # Loss\n feature_layer = nnet.all_layers[-1]\n rep = lasagne.layers.get_output(feature_layer, inputs=inputs,\n deterministic=False)\n\n # initialize c (0.5 in every feature representation dimension)\n rep_dim = feature_layer.num_units\n # nnet.cvar = shared(floatX(np.ones(rep_dim) * (1. / (rep_dim ** 0.5))),\n # name=\"c\")\n nnet.cvar = shared(floatX(np.ones(rep_dim) * 0.5), name=\"c\")\n\n dist = T.sum(((rep - nnet.cvar.dimshuffle('x', 0)) ** 2),\n axis=1, dtype='floatX')\n scores = dist - nnet.Rvar\n stack = T.stack([T.zeros_like(scores), scores], axis=1)\n loss = T.cast(T.sum(T.max(stack, axis=1)) / (inputs.shape[0] * nu),\n dtype='floatX')\n\n y_pred = T.argmax(stack, axis=1)\n acc = T.cast((T.sum(T.eq(y_pred.flatten(), targets), dtype='int32')\n * 1. / targets.shape[0]), 'floatX')\n\n # Network weight decay\n if Cfg.weight_decay:\n l2_penalty = (1/C) * get_l2_penalty(nnet,\n include_bias=Cfg.include_bias,\n pow=Cfg.pow)\n else:\n l2_penalty = T.cast(0, dtype='floatX')\n\n # Network activation sparsity regularization\n if Cfg.sparsity_penalty:\n sparsity_penalty = (1/B) * get_sparsity_penalty(nnet, inputs,\n Cfg.sparsity,\n mode=Cfg.sparsity_mode,\n deterministic=False)\n else:\n sparsity_penalty = T.cast(0, dtype='floatX')\n\n # Backpropagation (hard-margin: only minimizing everything to a ball\n # centered at c)\n trainable_params = lasagne.layers.get_all_params(feature_layer,\n trainable=True)\n if Cfg.gaussian_blob:\n avg_dist = T.mean(1-T.exp(-dist), dtype=\"floatX\")\n else:\n avg_dist = T.mean(dist, dtype=\"floatX\")\n obj_ball = T.cast(floatX(0.5) * l2_penalty + avg_dist + sparsity_penalty,\n dtype='floatX')\n updates_ball = get_updates(nnet, obj_ball, trainable_params,\n solver=nnet.solver)\n nnet.backprop_ball = theano.function([inputs, targets], [obj_ball, acc],\n updates=updates_ball)\n\n # Backpropagation (without training R)\n obj = T.cast(floatX(0.5) * l2_penalty + nnet.Rvar + loss + sparsity_penalty,\n dtype='floatX')\n updates = get_updates(nnet, obj, trainable_params, solver=nnet.solver)\n nnet.backprop_without_R = theano.function([inputs, targets], [obj, acc],\n updates=updates)\n\n # Backpropagation (with training R)\n trainable_params.append(nnet.Rvar) # add radius R to trainable parameters\n updates = get_updates(nnet, obj, trainable_params, solver=nnet.solver)\n nnet.backprop = theano.function([inputs, targets], [obj, acc],\n updates=updates)\n\n\n # Forwardpropagation\n test_rep = lasagne.layers.get_output(feature_layer, inputs=inputs,\n deterministic=True)\n test_rep_norm = test_rep.norm(L=2, axis=1)\n\n test_dist = T.sum(((test_rep - nnet.cvar.dimshuffle('x', 0)) ** 2),\n axis=1, dtype='floatX')\n\n test_scores = test_dist - nnet.Rvar\n test_stack = T.stack([T.zeros_like(test_scores), test_scores], axis=1)\n test_loss = T.cast(T.sum(T.max(test_stack, axis=1)) / (inputs.shape[0]*nu),\n dtype='floatX')\n\n test_y_pred = T.argmax(test_stack, axis=1)\n test_acc = T.cast((T.sum(T.eq(test_y_pred.flatten(), targets),\n dtype='int32')\n * 1. / targets.shape[0]), dtype='floatX')\n\n # Network activation sparsity regularization (with determinisitc=True)\n if Cfg.sparsity_penalty:\n test_sparsity_penalty = ((1 / B) *\n get_sparsity_penalty(nnet, inputs,\n Cfg.sparsity,\n mode=Cfg.sparsity_mode,\n deterministic=True))\n else:\n test_sparsity_penalty = T.cast(0, dtype='floatX')\n\n test_obj = T.cast(floatX(0.5) * l2_penalty + nnet.Rvar + test_loss\n + test_sparsity_penalty, dtype='floatX')\n nnet.forward = theano.function([inputs, targets],\n [test_obj, test_acc, test_scores,\n floatX(0.5) * l2_penalty,\n test_sparsity_penalty, test_rep,\n test_rep_norm, test_loss, nnet.Rvar])", "def update(self, initial, follows):", "def sync(net, net_tar):\n for var, var_tar in zip(net.trainable_weights,\n net_tar.trainable_weights):\n var_tar.assign(var)", "def _validate_update_network(self, context, net_id, original_net,\n net_data):\n extern_net = self._network_is_external(context, net_id)\n with_qos = validators.is_attr_set(\n net_data.get(qos_consts.QOS_POLICY_ID))\n\n # Do not allow QoS on external networks\n if with_qos:\n if extern_net:\n raise nsx_exc.QoSOnExternalNet()\n self._validate_qos_policy_id(\n context, net_data.get(qos_consts.QOS_POLICY_ID))\n\n # Do not support changing external/non-external networks\n if (extnet_apidef.EXTERNAL in net_data and\n net_data[extnet_apidef.EXTERNAL] != extern_net):\n err_msg = _(\"Cannot change the router:external flag of a network\")\n raise n_exc.InvalidInput(error_message=err_msg)\n\n is_ens_net = self._is_ens_tz_net(context, net_id)\n if is_ens_net:\n self._assert_on_ens_with_qos(net_data)", "def test_update_network_no_policy_change(self):\n for qos_policy_id in (self.qos_policies[0].id, None):\n self.networks[0].qos_policy_id = qos_policy_id\n self.networks[0].update()\n original_network = {'qos_policy_id': qos_policy_id}\n port_ids, fip_ids, router_ids = self.qos_driver.update_network(\n mock.ANY, self.networks[0], original_network)\n self.assertEqual(set([]), port_ids)\n self.assertEqual(set([]), fip_ids)\n self.assertEqual(set([]), router_ids)\n self.mock_rules.assert_not_called()", "def post_network_ipam_update(self, resource_id, resource_dict):\n pass", "def update_weights(net, input_values, desired_output, neuron_outputs, r=1):\n raise NotImplementedError", "def init_network(session: \"Session\", new_network_name: str) -> None:\n url_tail = f\"/{CoordConstsV2.RSC_NETWORKS}\"\n _post(session, url_tail, None, params={CoordConstsV2.QP_NAME: new_network_name})", "def Update(self,n,l):\n\t\tself.n = n\n\t\tself.l = l", "def update_virtual_network(self, hVirtNet, nFlags = 0):\n\t\treturn Job(SDK.PrlSrv_UpdateVirtualNetwork(self.handle, conv_handle_arg(hVirtNet), nFlags)[0])", "def update_weights(architecture,grad_weights,grad_bias,m,v,t,lr,optimizer=\"adam\"):\n \n for layer in range(len(architecture)):\n if not (grad_weights['layer{}'.format(layer+1)] is None) and grad_bias['layer{}'.format(layer+1)] is not None:\n grad_weightsi = grad_weights['layer{}'.format(layer+1)]\n grad_weightsi /= bs\n grad_biasi = grad_bias['layer{}'.format(layer+1)]\n grad_biasi /= bs\n\n \n if optimizer.lower()==\"sgd\":\n # Mini-Batch SGD\n qw = lr*grad_weightsi\n qb = lr*grad_biasi\n else:\n # Mini-Batch Adam\n mw,mb = m['layer{}'.format(layer+1)]\n vw,vb = v['layer{}'.format(layer+1)]\n qw,mw,vw = adam(grad_weightsi,beta_1,beta_2,mw,vw,t,lr) # Have obtained dw\n qb,mb,vb = adam(grad_biasi,beta_1,beta_2,mb,vb,t,lr) # Have obtained db\n\n architecture['layer{}'.format(layer+1)][2].requires_grad = False\n architecture['layer{}'.format(layer+1)][3].requires_grad = False\n # Updating weights and biases now\n try:\n architecture['layer{}'.format(layer+1)][2] -= torch.Tensor(qw)\n except:\n architecture['layer{}'.format(layer+1)][2] -= torch.t(torch.Tensor(qw))\n try:\n architecture['layer{}'.format(layer+1)][3] -= torch.Tensor(qb)\n except:\n architecture['layer{}'.format(layer+1)][3] -= torch.t(torch.Tensor(qb))\n\n m['layer{}'.format(layer+1)][0] = torch.Tensor(mw)\n m['layer{}'.format(layer+1)][1] = torch.Tensor(mb)\n v['layer{}'.format(layer+1)][0] = torch.Tensor(vw)\n v['layer{}'.format(layer+1)][1] = torch.Tensor(vb)\n grad_weights['layer{}'.format(layer+1)] = torch.zeros(grad_weightsi.shape)\n grad_bias['layer{}'.format(layer+1)] = torch.zeros(grad_biasi.shape)\n return grad_weights,grad_bias,m,v", "def test_net_weight_update(self):\n nn = NeuralNet(0, 0, '', '', blank=True)\n nn.create_net(2, 1, 2, 2)\n nn.eta = 0.1\n\n # Override weights to static value for reproducibility\n for node in nn.layers[2].nodes:\n node.weights = [0.6, 0.6]\n\n for node in nn.layers[3].nodes:\n node.weights = [1.0, 1.0]\n\n nn.update_weights([2, 3], [0], test=True)\n\n test_weight = nn.layers[-1].nodes[0].weights[0]\n self.assertEqual(round(test_weight, 4), 0.9901)", "def sync_target_network(self):\n for t, e in zip(\n self.target_network.trainable_variables, self.online_network.trainable_variables\n ):\n t.assign(e)", "def update_node(self, node):\n return node.update()", "def update_net_type(self, nn_id, input_data):\n try:\n obj = models.NN_DEF_LIST_INFO.objects.get(nn_id=nn_id)\n data_set = getattr(obj, \"dir\")\n data_set.update(input_data)\n setattr(obj, \"dir\", data_set)\n obj.save()\n return input_data\n except Exception as e:\n raise Exception(e)", "def momentum_update(self, online_net, target_net, momentum):\n for param_ol, param_tgt in zip(online_net.parameters(), target_net.parameters()):\n param_tgt.data = param_tgt.data * momentum + param_ol.data * (1. - momentum)", "def update(self, *args, **kwargs):\n pass" ]
[ "0.7085726", "0.6553442", "0.6488986", "0.62651724", "0.6232808", "0.6232666", "0.61859524", "0.61575675", "0.61455834", "0.6119935", "0.6094648", "0.6091395", "0.6076523", "0.6069876", "0.60480976", "0.60480976", "0.60449016", "0.6043163", "0.60051495", "0.5985916", "0.5977186", "0.5975749", "0.59378105", "0.5916919", "0.58966684", "0.58616114", "0.5842801", "0.5827641", "0.5795396", "0.5781297", "0.57394624", "0.5724246", "0.5723179", "0.57206225", "0.57157505", "0.57017964", "0.5698869", "0.56611043", "0.5647383", "0.56401134", "0.5621022", "0.5619316", "0.56139904", "0.56065816", "0.56047916", "0.55979997", "0.5597541", "0.55933046", "0.55685043", "0.5554675", "0.55484504", "0.554392", "0.55364406", "0.5532904", "0.55183524", "0.5507534", "0.5504708", "0.5498487", "0.54794765", "0.54617643", "0.54571396", "0.544758", "0.544758", "0.544758", "0.544758", "0.544758", "0.544758", "0.54425806", "0.54425585", "0.54382795", "0.54349524", "0.54349524", "0.54313576", "0.54276204", "0.5410212", "0.5409056", "0.5403238", "0.53974056", "0.5391912", "0.5391612", "0.53862387", "0.5377967", "0.53680426", "0.53609544", "0.53567106", "0.53524286", "0.53507376", "0.53475416", "0.53465974", "0.53369117", "0.5325876", "0.53225225", "0.53185165", "0.53056234", "0.53037107", "0.5281921", "0.52789694", "0.52767086", "0.5274804", "0.5267106" ]
0.6348753
3
create a SVM loss for network given in argument
def compile_update_default(nnet, inputs, targets): floatX = Cfg.floatX C = Cfg.C if len(nnet.all_layers) > 1: feature_layer = nnet.all_layers[-2] else: feature_layer = nnet.input_layer final_layer = nnet.svm_layer trainable_params = lasagne.layers.get_all_params(final_layer, trainable=True) # Regularization if Cfg.weight_decay: l2_penalty = (floatX(0.5) / C) * get_l2_penalty(nnet, Cfg.include_bias) else: l2_penalty = T.cast(0, dtype='floatX') # Backpropagation prediction = lasagne.layers.get_output(final_layer, inputs=inputs, deterministic=False) objective, train_acc = final_layer.objective(prediction, targets) train_loss = T.cast((objective) / targets.shape[0], dtype='floatX') train_acc = T.cast(train_acc * 1. / targets.shape[0], dtype='floatX') train_obj = l2_penalty + train_loss updates = get_updates(nnet, train_obj, trainable_params, solver=nnet.solver) nnet.backprop = theano.function([inputs, targets], [train_obj, train_acc], updates=updates) # Hinge loss nnet.hinge_loss = theano.function([inputs, targets], [train_loss, train_acc]) # Forwardpropagation test_prediction = lasagne.layers.get_output(final_layer, inputs=inputs, deterministic=True) if nnet.data.n_classes == 2: scores = test_prediction[:, 1] - test_prediction[:, 0] else: scores = T.zeros_like(targets) objective, test_acc = final_layer.objective(test_prediction, targets) test_loss = T.cast(objective / targets.shape[0], dtype='floatX') test_acc = T.cast(test_acc * 1. / targets.shape[0], dtype='floatX') test_obj = l2_penalty + test_loss # get network feature representation test_rep = lasagne.layers.get_output(feature_layer, inputs=inputs, deterministic=True) test_rep_norm = test_rep.norm(L=2, axis=1) nnet.forward = theano.function([inputs, targets], [test_obj, test_acc, scores, l2_penalty, test_rep_norm, test_loss])
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def svm_loss(W, X, y, classes, reg):\n # compute the loss and the gradient\n # num_classes = W.shape[1]\n num_train = X.shape[0]\n loss = 0.0\n dW = np.zeros(W.shape) # initialize the gradient as zero\n\n #############################################################################\n # Implementation of a SVM loss, storing the result in loss. #\n #############################################################################\n scores = X.dot(W)\n correct_class_scores = scores[np.arange(num_train), y-1]\n margin = np.transpose(scores) - correct_class_scores + 1 # delta = 1\n margin[y-1, np.arange(num_train)] = 0 \n\n # values greater than zeros in margin - calculating max(0, margin)\n gt_zero = np.maximum(np.zeros((margin.shape)), margin)\n\n loss = np.sum(gt_zero)\n\n # Right now the loss is a sum over all training examples, but we want it\n # to be an average instead so we divide by num_train. \n loss /= num_train\n # And regularization to the loss.\n loss += 0.5 * reg * np.sum(W * W)\n\n #############################################################################\n # Implementation the gradient for the SVM loss, storing the result in dW. #\n # #\n #############################################################################\n\n # classifiers having loss > 0\n gt_zero[gt_zero > 0] = 1\n\n # Calculating indexes for the necessary subtractions\n images_sum = np.sum(gt_zero, axis = 0)\n\n # Subtracting the derivative\n gt_zero[y-1, range(num_train)] = -images_sum[range(num_train)]\n\n # updating the gradients\n dW = np.transpose(gt_zero.dot(X))\n\n # Normalizing the gradient\n dW /= num_train\n\n # Adding regularization to the gradieant.\n dW += reg * W\n\n return loss, dW", "def svm_loss(x, y):\n loss, dx = None, None\n ###########################################################################\n # TODO: Implement loss and gradient for multiclass SVM classification. #\n # This will be similar to the svm loss vectorized implementation in #\n # cs231n/classifiers/linear_svm.py. #\n ###########################################################################\n # *****START OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****\n y_temp = np.ones((x.shape[0], x.shape[1])) # 1로 구성된 x와 같은 쉐입의 매트릭스를 만든다\n #print(y_temp)\n y_score = x[np.arange(x.shape[0]), y] # 정답레이블의 스코어로만 구성된 하나의 컬럼 벡터를 만든다\n y_score = np.reshape(y_score, (x.shape[0], 1)) # 브로드캐스팅을 위해 리쉐입 해준다\n y_temp[np.arange(x.shape[0]), y] = 0 # 1로 구성된 템프매트릭스의 정답 레이블에 해당되는 인덱스에 0을 할당한다\n #print(y_temp)\n loss_temp = (x - y_score) - 1\n loss_temp = (-loss_temp * y_temp) / x.shape[0]\n loss = (np.sum(loss_temp))\n #print(loss_temp)\n\n #print(np.sum(loss_temp, axis = 1))\n \n temp = loss_temp * x.shape[0]\n temp[loss_temp > 0] = 1\n row_sum = np.sum(temp, axis = 1)\n temp[np.arange(x.shape[0]), y] = -row_sum.T\n dx = -temp\n\n dx /= x.shape[0]\n\n\n #print(dx)\n\n\n\n\n pass\n\n # *****END OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****\n ###########################################################################\n # END OF YOUR CODE #\n ###########################################################################\n return loss, dx", "def svm_loss(x, y):\n x = np.squeeze(x)\n N = x.shape[0]\n yt = y\n yt[y==0]=-1\n tmp = 1-yt*x\n mask = np.ones_like(tmp)\n mask[tmp<=0] = 0\n tmp = tmp*mask\n loss = np.sum(tmp)/N\n \n dx = -yt*mask/N\n # dx = np.reshape(dx,[dx.shape[0],1])\n return loss, dx", "def svm_loss_naive(W, X, y, reg):\n dW = np.zeros(W.shape) # initialize the gradient as zero\n\n # compute the loss and the gradient\n num_classes = W.shape[1]\n num_train = X.shape[0]\n loss = 0.0\n c = np.array([0, 1])\n pred_class = []\n for i in range(num_train):\n scores = X[i].dot(W)\n pred_class.append(c[np.argmax(scores)])\n #print('scores size:',scores.shape)\n correct_class_score = scores[y[i]]\n for j in range(num_classes):\n if j == y[i]:\n continue\n margin = scores[j] - correct_class_score + 1 # note delta = 1\n if margin > 0:\n loss += margin\n\n # Right now the loss is a sum over all training examples, but we want it\n # to be an average instead so we divide by num_train.\n loss /= num_train\n\n # Add regularization to the loss.\n loss += reg * np.sum(W * W)\n print(pred_class)\n\n return loss, dW, pred_class", "def svm_loss(x, y):\n\n x = x.reshape((-1,1))\n y = y.reshape((-1,1))\n N,_ = x.shape\n \n y_p = np.where(y == 1,1,-1)\n \n losses = np.maximum(0,1-(x*y_p))\n loss = np.sum(losses)/N\n dx = np.where(losses > 0, 1, 0)*(-y_p)/N\n dx = dx.reshape((-1,))\n\n return loss, dx", "def svm_loss_naive(W, X, y, reg):\n dW = np.zeros(W.shape) # initialize the gradient as zero\n\n # compute the loss and the gradient\n num_classes = W.shape[1]\n num_train = X.shape[0]\n loss = 0.0\n for i in xrange(num_train):\n scores = X[i].dot(W)\n correct_class_score = scores[y[i]]\n for j in xrange(num_classes):\n if j == y[i]:\n continue\n margin = scores[j] - correct_class_score + 1 # note delta = 1\n if margin > 0:\n loss += margin\n dW[:,j] += X[i,:].T\n dW[:,y[i]] -= X[i,:].T\n \n\n # Right now the loss is a sum over all training examples, but we want it\n # to be an average instead so we divide by num_train.\n loss /= num_train\n dW/= num_train\n\n # Add regularization to the loss.\n loss += 0.5 * reg * np.sum(W * W)\n dW += reg*W\n\n #############################################################################\n # TODO: #\n # Compute the gradient of the loss function and store it dW. #\n # Rather that first computing the loss and then computing the derivative, #\n # it may be simpler to compute the derivative at the same time that the #\n # loss is being computed. As a result you may need to modify some of the #\n # code above to compute the gradient. #\n #############################################################################\n\n\n return loss, dW", "def svm_loss_naive(W, X, y, reg):\n dW = np.zeros(W.shape) # initialize the gradient as zero\n\n # compute the loss and the gradient\n num_classes = W.shape[1]\n num_train = X.shape[0]\n loss = 0.0\n for i in range(num_train):\n scores = X[i].dot(W)\n correct_class_score = scores[y[i]]\n for j in range(num_classes):\n if j == y[i]:\n continue\n margin = scores[j] - correct_class_score + 1 # note delta = 1\n if margin > 0:\n loss += margin\n dW[:, y[i]] -= X[i].transpose()\n dW[:, j] += X[i].transpose() # chain rule\n\n # Right now the loss is a sum over all training examples, but we want it\n # to be an average instead so we divide by num_train.\n loss /= num_train\n dW /= num_train\n\n # Add regularization to the loss.\n loss += 0.5 * reg * np.sum(W * W)\n dW += reg * W\n\n #############################################################################\n # TODO: #\n # Compute the gradient of the loss function and store it dW. #\n # Rather that first computing the loss and then computing the derivative, #\n # it may be simpler to compute the derivative at the same time that the #\n # loss is being computed. As a result you may need to modify some of the #\n # code above to compute the gradient. #\n #############################################################################\n\n return loss, dW", "def svm_loss_naive(W, X, y, reg):\n dW = np.zeros(W.shape) # initialize the gradient as zero\n\n # compute the loss and the gradient\n num_classes = W.shape[0]\n num_train = X.shape[1]\n loss = 0.0\n for i in xrange(num_train):\n scores = W.dot(X[:, i])\n correct_class_score = scores[y[i]]\n for j in xrange(num_classes):\n if j == y[i]:\n continue\n margin = scores[j] - correct_class_score + 1 # note delta = 1\n if margin > 0:\n loss += margin\n dW[j] += X[:, i]\n dW[y[i]] -= X[:, i]\n\n # Right now the loss is a sum over all training examples, but we want it\n # to be an average instead so we divide by num_train.\n loss /= num_train\n\n # Add regularization to the loss.\n loss += 0.5 * reg * np.sum(W * W)\n\n #############################################################################\n # TODO: #\n # Compute the gradient of the loss function and store it dW. #\n # Rather that first computing the loss and then computing the derivative, #\n # it may be simpler to compute the derivative at the same time that the #\n # loss is being computed. As a result you may need to modify some of the #\n # code above to compute the gradient. #\n #############################################################################\n\n\n return loss, (dW / num_train)", "def svm_loss(x, y):\n N = x.shape[0]\n x = np.squeeze(x)\n loss = np.sum(((1-x*y)>0)*(1-x*y))/N\n dx = ((1-x*y)>0)*(-y)/N\n return loss, dx", "def svm_loss(x, y):\n N = x.shape[0]\n correct_class_scores = x[np.arange(N), y]\n margins = np.maximum(0, x - correct_class_scores[:, np.newaxis] + 1.0)\n margins[np.arange(N), y] = 0\n loss = np.sum(margins) / N\n num_pos = np.sum(margins > 0, axis=1)\n dx = np.zeros_like(x)\n dx[margins > 0] = 1\n dx[np.arange(N), y] -= num_pos\n dx /= N\n return loss, dx", "def compute_loss(self):", "def loss_fn(self, targets, outputs, model):", "def tv_loss(x, name='tv_loss'):\n raise NotImplementedError(\"Please use tensorflow total_variation loss.\")", "def compute_loss(self, features, mode, params, precomputed):\n raise NotImplementedError(\"Model does not implement loss.\")", "def svm_loss(x, y):\n N = x.shape[0]\n correct_class_scores = x[np.arange(N), y]\n margins = np.maximum(0, x - correct_class_scores[:, np.newaxis] + 1.0)\n margins[np.arange(N), y] = 0\n loss = np.sum(margins) / N\n num_pos = np.sum(margins > 0, axis=1)\n dx = np.zeros_like(x)\n dx[margins > 0] = 1\n dx[np.arange(N), y] -= num_pos\n dx /= N\n return loss, dx", "def get_loss(self, xs, y):\n \"*** YOUR CODE HERE ***\"\n y_pred = self.run(xs)\n return nn.SoftmaxLoss(y_pred,y)", "def svm_loss_vectorized(W, X, y, reg):\n num_classes = W.shape[1]\n num_train = X.shape[0]\n #loss = 0.0 \n loss = 0.0\n scores = np.zeros((1,num_classes))\n dW = np.zeros(W.shape) # initialize the gradient as zero\n\n #############################################################################\n # TODO: #\n # Implement a vectorized version of the structured SVM loss, storing the #\n # result in loss. #\n #############################################################################\n \n # lines begin with double \"#\" are the last version of code!!!!!\n \n ##for i in xrange(num_train):\n #XX = np.tile(X[i,:],(num_classes,1)) # try to use broadcasting\n #scores = np.sum(np.multiply(XX,W.T), axis = 1)\n ## scores = np.sum(np.multiply(X[i,:],W.T), axis = 1)\n \n ## if i ==1: print scores\n \n #loss += np.sum(scores - scores[y[i]]) + num_classes -1\n #http://stackoverflow.com/questions/2900084/counting-positive-elements-in-a-list-with-python-list-comprehensions\n ## scores+=1\n ## scores[y[i]]-=1 \n #however, this is sum over index, not values, glaube ich \n #loss+= sum(x < 0 for x in (scores-scores[y[i]]))\n ## loss+= (scores-scores[y[i]])[scores-scores[y[i]]>0].sum()\n #pass\n ############################################\n # construct a zero loop version\n ############################################\n scores2D = np.zeros((num_train, num_classes)) #used to store dotted scores\n scores1D = np.zeros((num_train,1)) #used to store corrected scores\n #index1D = np.zeros((1,num_classes))\n #index1D = range(num_classes) \n #scores1D = y[index1D]\n \n scores2D = np.dot(X,W) \n ##for i in xrange(num_train):\n ## scores1D[i,0]=scores2D[i,y[i]]-1 #find the correct scores and fill them into scores1D, the value -1 is because: si-sj+1\n ## scores2D[i,y[i]]-=1 # we want at corrected score voxel, the value should be 0, correct score -1 - \n #(correct score -1) = 0\n #####################################\n #for loop replacement###\n indexInsert = np.arange(num_train)\n scores1D[indexInsert,0] = scores2D[indexInsert,y[indexInsert]] -1 #using array indexing\n scores2D[indexInsert,y[indexInsert]] -=1\n \n ##################################### \n \n #scores2D = X.dot(W)\n #http://stackoverflow.com/questions/9497290/how-would-i-sum-a-multi-dimensional-array-in-the-most-succinct-python\n #rewrite summation\n #loss += (scores2D-scores1D)[scores2D-scores1D >0].sum()\n #temp = scores2D-np.tile (scores1D, (1,num_classes)) # for each score minus the corrected score\n temp = scores2D-scores1D #broadcasting!!\n #print temp[1,:]\n temp= temp.clip(min=0) \n #loss += sum(map(sum, (temp)[temp>0]))\n #loss += sum(map(sum, (temp)))\n #loss += (temp)[temp >0].sum()\n loss += sum(sum(x) for x in temp) #sum them up\n #loss -= num_train # minus 1 is because in each train, due to the plus 1 above , correct score - correct \n # score +1 = 1, but it should be 0, therefore, i deduce them at the last minute \n # ( then I made this also in the for loop to meet intuitive)\n #############################################################################\n # END OF YOUR CODE #\n #############################################################################\n\n loss /= num_train\n loss += 0.5 * reg * np.sum(W * W)\n #############################################################################\n # TODO: #\n # Implement a vectorized version of the gradient for the structured SVM #\n # loss, storing the result in dW. #\n # #\n # Hint: Instead of computing the gradient from scratch, it may be easier #\n # to reuse some of the intermediate values that you used to compute the #\n # loss. #\n #############################################################################\n #tempBool = np.divide(temp, temp)\n #tempBool = tempBool.clip(max=1,min=0)\n #http://stackoverflow.com/questions/19666626/replace-all-elements-of-python-numpy-array-that-are-greater-than-some-value\n tempBool = np.copy(temp) # temp = scores2D-scores1D , temp= temp.clip(min=0)\n # temp is already the every score minus the correct labeled score\n tempBool[tempBool>0] = 1 # for every element, when it is positive, set it to one (for weighting)\n for j in xrange(num_train):\n tempBool[j,y[j]] =-1*sum(tempBool[j,:]) # calculate how many final scores, max(~~,0) are more than 0, add the number to the correct\n # label element, because it is the times that the corrected scores be used\n dW += np.reshape (X[j,:],(X.shape[1],1))*tempBool[j,:] # broadcasting, out-product\n #pass\n #############################################################################\n # END OF YOUR CODE #\n #############################################################################\n dW/= num_train\n dW += reg*W\n \n return loss, dW", "def get_loss(self, xs, y):\n \"*** YOUR CODE HERE question 4 ***\"\n return nn.SoftmaxLoss(self.run(xs), y)", "def get_loss(self, x, y):\n \"*** YOUR CODE HERE ***\"\n y_pred = self.run(x)\n return nn.SoftmaxLoss(y_pred,y)", "def compute_loss(self, obs, returns):", "def get_loss(self, xs, y):\n \"*** YOUR CODE HERE ***\"\n predictedY = self.run(xs)\n return nn.SoftmaxLoss(predictedY, y)\n # return nn.SquareLoss(predictedY, y)", "def get_loss(self, x, y):\n \"*** YOUR CODE HERE question 3 ***\"\n return nn.SoftmaxLoss(self.run(x), y)", "def _compute_loss(self, predictions, targets, **params):\n pass", "def loss_(self, batch):\n raise NotImplementedError", "def svm_loss_vectorized(W, X, y, reg):\n loss = 0.0\n dW = np.zeros(W.shape) # initialize the gradient as zero\n delta = 1 # margin of the SVM\n #############################################################################\n # TODO: #\n # Implement a vectorized version of the structured SVM loss, storing the #\n # result in loss. #\n # 1) Dot product of weight and data matrix\n XW = np.dot(W,X)\n # 2) get correct class scores using y \n correct_class=XW[y,np.arange(X.shape[1])]\n # 3) find margins by using element wise maximum function\n #print np.matrix(correct_class).shape\n mar=np.maximum(0,XW-np.matrix(correct_class) + delta)\n #print mar.shape\n # Make correct classes 0\n mar[y,np.arange(X.shape[1])]=0\n #print mar.shape\n # get loss by summing and dividing by n\n loss = np.sum(mar)\n loss /= X.shape[1]\n # adjust by regularization strength\n loss += 0.5 * reg * np.sum(np.square(W))\n \n \n \n #############################################################################\n pass\n #############################################################################\n # END OF YOUR CODE #\n #############################################################################\n \n #############################################################################\n # TODO: #\n # Implement a vectorized version of the gradient for the structured SVM #\n # loss, storing the result in dW. #\n # #\n # Hint: Instead of computing the gradient from scratch, it may be easier #\n # to reuse some of the intermediate values that you used to compute the #\n # loss. #\n #############################################################################\n # create a binary matrix \n binary_mat=mar\n binary_mat[mar>0]=1\n \n # sum of all incorrect classes \n #print binary_mat.shape\n sum=np.sum(binary_mat,axis=0)\n \n # y coordinate decreases and hence negative \n binary_mat[y,np.arange(X.shape[1])]= -sum\n \n dW = (np.dot(binary_mat,X.T))\n dW = dW / X.shape[1]\n dW = dW + reg*W \n pass\n\n \n #############################################################################\n # END OF YOUR CODE #\n #############################################################################\n\n return loss, dW", "def get_loss(self, x, y):\n \"*** YOUR CODE HERE ***\"\n predictedY = self.run(x)\n return nn.SoftmaxLoss(predictedY, y)", "def svm_loss(scores, y):\r\n\r\n N = scores.shape[0]\r\n\r\n # Compute svm data loss\r\n correct_class_scores = scores[range(N), y]\r\n margins = np.maximum(0.0, scores - correct_class_scores[:, None] + 1.0)\r\n margins[range(N), y] = 0.0\r\n loss = np.sum(margins) / N\r\n\r\n # Compute gradient off loss function w.r.t. scores\r\n num_pos = np.sum(margins > 0, axis=1)\r\n dscores = np.zeros(scores.shape)\r\n dscores[margins > 0] = 1\r\n dscores[range(N), y] -= num_pos\r\n dscores /= N\r\n\r\n return loss, dscores", "def loss(self, X, labels):\n features = self.get_conv_features(X)\n loss = blah\n return loss", "def svm_loss_forloop(W, X, y, reg, delta=1):\n\n ################################################################################\n # You implementation #\n # Use the ahove svm_loss_bias_forloop implementation as reference #\n ################################################################################\n\n # initialize the returned results\n loss = 0.0\n d_W = np.zeros(W.shape)\n\n # compute the loss and the gradient\n num_classes = W.shape[1]\n num_train = X.shape[0]\n\n for i in xrange(num_train):\n # compute the classification scores for a single image\n scores = X[i].dot(W)\n correct_class_score = scores[y[i]]\n for j in xrange(num_classes):\n # compute the loss for this image\n if j == y[i]:\n continue\n margin = scores[j] - correct_class_score + delta\n if margin > 0:\n loss += margin\n # compute the gradient for this image\n d_W[:, j] += X[i, :].T\n d_W[:, y[i]] -= X[i, :].T\n\n # Right now the loss is a sum over all training examples\n # We need it to be an average instead so we divide by num_train.\n loss /= num_train\n # Add regularization to the loss.\n #no reg on bias\n loss += 0.5 * reg * np.sum(W[:-1,:] * W[:-1,:])\n\n # Do the same for d_W and d_b\n d_W /= num_train\n d_W[:-1,:] += reg * W[:-1,:]\n\n\n return loss, d_W", "def loss(self, X, labels):\n features = self.get_conv_feats(X)\n loss = blah\n return loss", "def loss_fn(model):\n with flax.nn.stateful(state) as new_state:\n with flax.nn.stochastic(prng_key):\n logits = model(batch['image'])\n loss = cross_entropy_loss(logits, batch['label'])\n # TODO(britefury): check if applying L2 regularization to weights but\n # *not* biases improves results\n weight_penalty_params = jax.tree_leaves(model.params)\n weight_l2 = sum([jnp.sum(x ** 2)\n for x in weight_penalty_params\n if x.ndim > 1])\n weight_penalty = l2_reg * 0.5 * weight_l2\n loss = loss + weight_penalty\n return loss, (new_state, logits)", "def compute_loss(self, **kwargs):\n raise NotImplementedError", "def loss(returns, predicted_output):\n ################################\n # YOUR IMPLEMENTATION HERE #\n ################################\n raise NotImplementedError", "def svm_loss_vectorized(W, X, y, reg, delta=1):\n loss = 0.0\n dW = np.zeros(W.shape) # initialize the gradient as zero\n\n #############################################################################\n # Understand this implementation #\n #############################################################################\n # Hint: check how numpy broadcasting and advanced indexing are used\n # https://docs.scipy.org/doc/numpy/user/basics.broadcasting.html\n # https://docs.scipy.org/doc/numpy/reference/arrays.indexing.html\n # This allows selection of arbitrary items in the array based on their N-dimensional index. Each integer array represents a number of indexes into that dimension.\n\n # Get dims\n D = X.shape[1]\n num_classes = W.shape[1]\n num_train = X.shape[0]\n scores = X.dot(W)\n\n correct_scores = scores[np.arange(num_train), y].reshape(-1, 1) # using the fact that all elements in y are < C == num_classes\n mat = scores - correct_scores + delta \n mat[np.arange(num_train), y] = 0 # accounting for the j=y_i term we shouldn't count (subtracting 1 makes up for it since w_j = w_{y_j} in this case)\n \n # Compute max\n thresh = np.maximum(np.zeros((num_train, num_classes)), mat)\n # Compute loss as double sum\n loss = np.sum(thresh)\n loss /= num_train\n \n # Add regularization\n loss += 0.5 * reg * np.sum(W * W)\n\n # Binarize into integers\n binary = thresh\n binary[thresh > 0] = 1\n\n row_sum = np.sum(binary, axis=1)\n binary[range(num_train), y] = -row_sum[range(num_train)]\n dW = np.dot(X.T, binary)\n\n # Divide\n dW /= num_train\n\n # Regularize\n dW += reg*W\n \n return loss, dW", "def svm_loss_naive(theta, X, y, reg):\n\n delta = 1.0\n dtheta = np.zeros(theta.shape) # initialize the gradient as zero\n\n # compute the loss function\n\n K = theta.shape[1]\n m = X.shape[0]\n J = 0.0\n for i in xrange(m):\n\tscores = X[i,:].dot(theta)\n\tcorrect_class_score = scores[y[i]]\n\tfor j in xrange(K):\n\t\tif j == y[i]:\n\t\t\tcontinue\n\t\tmargin = max(0,scores[j] - correct_class_score + delta)\n\t\tJ += margin\n\t\tif margin > 0 and j!=y[i]:\t\t\n\t\t\tdtheta[:,j] = dtheta[:,j]+X[i,:]\n\t\t\tdtheta[:,y[i]] = dtheta[:,y[i]]-X[i,:]\n\n\n # Right now the loss is a sum over all training examples, but we want it\n # To be an average instead so we divide by num_train.\n J /= m\n dtheta = dtheta/m\n # Add regularization to the loss.\n J += 0.5 * reg * np.sum(theta * theta)\n dtheta =dtheta + reg*theta\n\n #############################################################################\n # TODO: #\n # Compute the gradient of the loss function and store it dtheta. #\n # Rather that first computing the loss and then computing the derivative, #\n # it may be simpler to compute the derivative at the same time that the #\n # loss is being computed. As a result you may need to modify some of the #\n # code above to compute the gradient. #\n #############################################################################\n\n\n return J, dtheta", "def svm():", "def loss(self, X, y):\n pass", "def loss(A, Y):\n return A - Y", "def loss(self, **kwargs):\n pass", "def loss_fn(params):\n logits = models.ProgramTransformer(config).apply(\n {'params': params},\n inputs,\n outputs,\n programs,\n rngs={'dropout': train_rng})\n loss, weight_sum = compute_weighted_cross_entropy(logits, programs, weights)\n mean_loss = loss / weight_sum\n return mean_loss, logits", "def make_loss(self, logit=None, labels=None):\r\n return nn.functional.mse_loss(logit, labels, reduction='mean') # The MSE Loss\r", "def tv_loss(img, tv_weight):\n # Your implementation should be vectorized and not require any loops!\n # *****START OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****\n\n pass\n\n # *****END OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****", "def loss(self, X, y):\n\n # Initialize the loss to zero.\n loss = 0.0\n num_classes = self.W.shape[0] # C = num_classes\n num_train = X.shape[0]\n \n exp_a = np.zeros((num_classes,num_train))\n # ================================================================ #\n # YOUR CODE HERE:\n # Calculate the normalized softmax loss. Store it as the variable loss.\n # (That is, calculate the sum of the losses of all the training \n # set margins, and then normalize the loss by the number of \n # training examples.)\n # ================================================================ #\n \n \n for i in np.arange(num_train):\n \n Loss = 0.0\n\n class_scores = np.dot(self.W,X[i,:].T) # calculating class scores (C x 1 vector)\n class_scores -= np.max(class_scores) # considering the possible issue for numerical instability and account for it\n\n exp_a[:,i] = np.exp(class_scores) # turning class scores to probabilities (C x 1 vector), without normalization\n\n Loss -= np.log(exp_a[y[i],i]/np.sum(exp_a[:,i]))\n \n\n #p[:,i] = exp_a[:,i]/np.sum(exp_a[:,i]) # p now is a valid probability matrix\n #print(p[:,i])\n\n loss += Loss \n #print(Loss,i) \n \n pass\n loss /= num_train\n # ================================================================ #\n # END YOUR CODE HERE\n # ================================================================ #\n\n return loss", "def loss_fun(para):\n\n return -data_processing(my_cir(para))", "def svm_loss_vectorized(W, X, y, reg):\n loss = 0.0\n dW = np.zeros(W.shape) # initialize the gradient as zero\n delta = 1 # margin of the SVM\n #############################################################################\n # TODO: #\n # Implement a vectorized version of the structured SVM loss, storing the #\n # result in loss. #\n #############################################################################\n pass\n dim, num_train = X.shape\n # print X.dtype\n num_classes = np.max(y) + 1 # assume y takes values 0...K-1 where K is number of classes\n # print \"Num Classes: \", num_classes\n if W is None:\n # lazily initialize W\n W = np.random.randn(num_classes, dim) * 0.0001\n\n loss_image_arr = np.empty([num_train, num_classes])\n\n # Information about various dimensions\n # print \"Num Dimensions: \", dim, \"Num Samples: \", num_train, \"Num Classes: \", num_classes\n\n score_matrix = np.matmul(W, X)\n for i in range(num_train):\n\n # Sanity Check for the sizes of he matrices after multiplication\n # All rows in score_matrix represents the score of an image in a class\n # print \"Weight Matrix Shape: \", self.W.shape, \"Score Matrix Shape: \", score_matrix.shape\n\n for j in range(num_classes):\n if (j!=y[i]):\n loss_image_arr[i, j] = (max(0, score_matrix[j, i] - score_matrix[y[i], i] + delta))\n\n reg_loss = reg * np.sum(np.square(W))\n loss = np.sum(loss_image_arr)/num_train + reg_loss\n\n #############################################################################\n # END OF YOUR CODE #\n #############################################################################\n \n #############################################################################\n # TODO: #\n # Implement a vectorized version of the gradient for the structured SVM #\n # loss, storing the result in dW. #\n # #\n # Hint: Instead of computing the gradient from scratch, it may be easier #\n # to reuse some of the intermediate values that you used to compute the #\n # loss. #\n #############################################################################\n pass\n binary_matrix = loss_image_arr>0\n # print \"Binary Matrix: \", binary_matrix.shape, \"X_batch: \", X_batch.shape\n\n dW = -np.transpose(np.matmul(X, binary_matrix))\n dW = dW/num_train\n # print \"Iteration -- \", \"Loss: \", loss_iter , \"Gradient Shape: \", dW.shape, \"Weight Shape: \", self.W.shape\n\n \n #############################################################################\n # END OF YOUR CODE #\n #############################################################################\n\n return loss, dW", "def ss_loss_(self, batch):\n raise NotImplementedError", "def svm_loss_vectorized(W, X, y, reg):\n loss = 0.0\n dW = np.zeros(W.shape) # initialize the gradient as zero\n delta = 1.0\n num_train = X.shape[1]\n #############################################################################\n # TODO: #\n # Implement a vectorized version of the structured SVM loss, storing the #\n # result in loss. #\n #############################################################################\n scores = W.dot(X)\n idx = range(X.shape[1])\n correct_score = scores[y, idx]\n\n # print scores[y[0], 0], correct_score[0]\n \n correct_score = np.tile(correct_score, (10,1))\n loss = np.sum(np.maximum(np.zeros((W.shape[0], X.shape[1])), scores - correct_score + delta))\n loss -= X.shape[1] * delta\n loss /= X.shape[1]\n #############################################################################\n # END OF YOUR CODE #\n #############################################################################\n\n\n #############################################################################\n # TODO: #\n # Implement a vectorized version of the gradient for the structured SVM #\n # loss, storing the result in dW. #\n # #\n # Hint: Instead of computing the gradient from scratch, it may be easier #\n # to reuse some of the intermediate values that you used to compute the #\n # loss. #\n #############################################################################\n # Calculate 0, loss maximum\n # take out all the non-zero entries\n # multiply with the training examples matrix transpose.\n # Add the resulting ans to dW\n maximum_mask = np.maximum(np.zeros((W.shape[0], X.shape[1])), scores - correct_score + delta)\n maximum_mask[y, idx] = 0\n\n maximum_mask[maximum_mask != 0] = 1\n \n sum_columnwise = np.sum(maximum_mask, axis=0)\n # replace correct entry with sum of columns\n maximum_mask[y, idx] = -sum_columnwise[range(num_train)]\n\n # Here we are doing two things at once, first we are calculating sum of all 1 entries in row\n # and then subtract that many number of times as sum of ones across column.\n dW = maximum_mask.dot(X.T)\n dW /= num_train\n dW += reg * W\n #############################################################################\n # END OF YOUR CODE #\n #############################################################################\n\n return loss, dW", "def loss(self, x, y):\n raise NotImplementedError", "def compute_loss(self, *args, **kwargs):\n raise NotImplementedError", "def loss(self, labels, input_data):\n\n pred, out = self.inference(input_data)\n loss = tf.reduce_mean(tf.losses.sparse_softmax_cross_entropy(labels, out), name=\"loss\") + \\\n tf.losses.get_regularization_loss()\n return loss, pred", "def add_loss_op(self, pred):\n ### YOUR CODE HERE\n loss = cross_entropy_loss(self.labels_placeholder,pred)\n ### END YOUR CODE\n return loss", "def compute_loss(self):\n self.prototypes = self.compute_prototypes()\n self.test_logits = self.compute_logits()\n loss = tf.nn.sparse_softmax_cross_entropy_with_logits(\n labels=self.episode.test_labels, logits=self.test_logits)\n cross_entropy_loss = tf.reduce_mean(loss)\n regularization = tf.reduce_sum(\n tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES))\n loss = cross_entropy_loss + self.weight_decay * regularization\n return loss", "def svm_loss_vectorized(W, X, y, reg):\n loss = 0.0\n dW = np.zeros(W.shape) # initialize the gradient as zero\n\n #############################################################################\n # TODO: #\n # Implement a vectorized version of the structured SVM loss, storing the #\n # result in loss. #\n #############################################################################\n num_classes = W.shape[1]\n num_train = X.shape[0]\n scores = np.dot(X, W)\n correct_class_score = np.choose(y, scores.T).reshape(-1, 1)\n thresh = np.maximum(np.zeros(scores.shape), scores - correct_class_score + 1)\n thresh[np.arange(num_train), y] = 0\n loss = np.sum(thresh)\n\n loss /= num_train\n loss += 0.5 * reg * np.sum(W * W)\n #############################################################################\n # END OF YOUR CODE #\n #############################################################################\n\n\n #############################################################################\n # TODO: #\n # Implement a vectorized version of the gradient for the structured SVM #\n # loss, storing the result in dW. #\n # #\n # Hint: Instead of computing the gradient from scratch, it may be easier #\n # to reuse some of the intermediate values that you used to compute the #\n # loss. #\n #############################################################################\n binary = thresh\n binary[binary > 0] = 1\n\n row_sum = np.sum(binary, axis=1)\n binary[np.arange(num_train), y] -= row_sum\n dW = np.dot(X.T, binary)\n\n dW /= num_train\n dW += reg * W\n #############################################################################\n # END OF YOUR CODE #\n #############################################################################\n\n return loss, dW", "def loss_fn(self, recons, inputs, mu, log_var, **kwargs):\n# kld_weight = kwargs['M_N'] # Account for the minibatch samples from the dataset\n recons_loss = F.mse_loss(recons, inputs)\n# recons_loss = F.binary_cross_entropy(recons, inputs)\n KLD = torch.mean(-0.5 * torch.sum(1 + log_var - mu**2 - log_var.exp(), dim=1), dim=0)\n loss = recons_loss - KLD\n return loss, recons_loss, KLD", "def loss(self):\n return 'mse'", "def eval_loss(self, input_dataset, target_dataset):\n\t\t#######################################################################\n\t\t# ** START OF YOUR CODE **\n\t\t#######################################################################\n\t\tprediction = self.network.forward(input_dataset)\n\t\tloss = self._loss_layer.forward(prediction, target_dataset)\n\t\t\n\t\treturn loss\n\t\t#######################################################################\n\t\t# ** END OF YOUR CODE **\n\t\t#######################################################################", "def get_loss(self, xs, y):\n return nn.SoftmaxLoss(self.run(xs), y)", "def tv_loss(input: th.Tensor):\n input = tf.pad(input, (0, 1, 0, 1), \"replicate\")\n x_diff = input[..., :-1, 1:] - input[..., :-1, :-1]\n y_diff = input[..., 1:, :-1] - input[..., :-1, :-1]\n return (x_diff ** 2 + y_diff ** 2).mean([1, 2, 3])", "def loss_fn(outputs, labels):\n #print('this is outputs', outputs.shape) # 2,3,128,128\n #print('this is labels', labels.shape) # 2,3,128,128\n N, C, H, W = outputs.shape\n \n# outputs = unnormalize(outputs, mean=[0.51371954, 0.40949144, 0.35572536], std= [0.2926419, 0.26180502, 0.25512055])\n # check if we normalize label images #labels = unnormalize(labels, mean=[0.53459634,0.39673596,0.33788489], std= [0.29101071,0.26140346,0.25485687])\n \n mse_loss = torch.sum((outputs - labels) ** 2) / N / C # each photo, each channel\n mse_loss *= 255 * 255\n mse_loss /= H * W \n # average loss on each pixel(0-255)\n return mse_loss", "def _calc_loss(self, fvs, labels, w, b):\n\n loss = 0.5 * self.lda * (np.linalg.norm(w) ** 2)\n tmp = sum(map(lambda x, y: (x - y) ** 2, fvs.dot(w) + b, labels))\n loss += tmp / fvs.shape[0]\n\n return loss", "def predictSVM(w, x):\n \n # compute activation for test example and threshold the result\n a = np.dot(w, x);\n label = 1 if a > 0 else -1;\n \n return label;", "def _create_loss(self):\n\n with tf.name_scope(\"loss\"):\n \n # gini=(tf.nn.l2_loss( self.score))/100000\n gini = tf.losses.softmax_cross_entropy(self.score, 0*self.score)\n \n promo_prob=tf.reduce_sum(tf.multiply(self.score, self.cohort_weight),\n axis=1)\n inc_value = tf.reduce_mean(tf.multiply(promo_prob, self.value))- self.control_value\n inc_cost = tf.reduce_mean( tf.multiply(promo_prob, self.cost)) - self.control_cost\n \n\n\n # determine loss function based on self.obj_rule\n if self.obj_rule == 'cpiv':\n self.objective = inc_cost / inc_value\n\n elif self.obj_rule == 'ivc':\n # maximize ivc\n self.objective = - inc_value / inc_cost\n\n elif self.obj_rule == 'lagrangian':\n assert self.shadow is not None, 'Need to pass in shadow value if use lagrangian as obj_rule.'\n self.objective = inc_cost - self.shadow * inc_value\n\n elif self.obj_rule == 'value':\n # maximize delta values\n self.objective = - inc_value\n\n # use only cost as objective\n elif self.obj_rule == 'cost':\n # maximize delta cost\n self.objective = - inc_cost\n\n else:\n raise Exception('Invalid obj_rule!')\n\n # regularization\n reg_loss = tf.reduce_sum(tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES))\n # weights = tf.trainable_variables() # all vars of your graph\n # reg_loss = tf.norm( weights,ord=1)\n\n # final loss\n self.loss = self.objective +reg_loss+.1*gini", "def vae_loss(x, t_decoded):\r\n return K.mean(reconstruction_loss(x, t_decoded))", "def get_loss(self, x, y):\n \"*** YOUR CODE HERE ***\"\n #make your predictions using run\n #compute loss nn.squareloss\n y_pred = self.run(x)\n return nn.SquareLoss(y_pred,y)", "def model_loss(inp, fake, real_label, fake_label):\n \n \n Dreal,realcls,R1 = gradpen(inp)\n [Dfake,fakecls] = D(fake)\n # 1. Adversarial loss\n \n glabel = tf.ones_like(Dfake)#tf.random.uniform((Dfake.shape), 1-LN, 1)\n dlabelr = tf.ones_like(Dreal)#tf.random.uniform((Dreal.shape), 1-LN, 1)\n dlabelf = tf.zeros_like(Dfake)#tf.random.uniform((Dfake.shape), 0, LN)\n \n \n \n # D has no sigmoid activation: \"from_logits=True\"\n real_loss = tf.keras.losses.binary_crossentropy(\n dlabelr, Dreal, from_logits=True)\n real_loss = tf.reduce_mean(real_loss)\n \n fake_loss = tf.keras.losses.binary_crossentropy(\n dlabelf, Dfake, from_logits=True)\n fake_loss = tf.reduce_mean(fake_loss)\n \n Dadv = 0.5*(real_loss+fake_loss)\n \n Gadv = tf.keras.losses.binary_crossentropy(\n glabel, Dfake, from_logits=True)\n Gadv = tf.reduce_mean(Gadv)\n \n # 2. Classification loss\n \n Dcls = tf.keras.losses.binary_crossentropy(real_label, realcls, from_logits=True)\n Dcls = tf.reduce_mean(Dcls)\n \n Gcls = tf.keras.losses.binary_crossentropy(fake_label, fakecls, from_logits=True)\n Gcls = tf.reduce_mean(Gcls)\n \n # 3. Total loss\n \n Dloss = Dadv + (GAMMA/2)*R1 + LAMBDA_CLS*Dcls\n \n Gloss = Gadv + LAMBDA_CLS*Gcls\n \n return (Dloss, Dadv, Dcls, R1), (Gloss, Gadv, Gcls)", "def loss(W_vect, X, T):\n # log_prior = - 0.5 * L2_reg * jnp.dot(W_vect, W_vect)\n return jnp.mean((predictions(W_vect, X) - T)**2) + 0.5*jnp.log(2*jnp.pi)", "def demo():\n def load_data():\n train = open(\"csv/svd_train.csv\", \"r\")\n r = csv.reader(train)\n next(r)\n\n data = []\n target = []\n\n print \"Prepping data...\"\n for row in r:\n aux = [0 for x in xrange(10)]\n aux[int(row[0])] = 1\n target.append(aux)\n data.append([float(x) for x in row[1:]])\n\n train.close()\n\n data = np.array(data)\n\n target = np.array(target)\n\n #train = [target[:35000],data[:35000]]\n #test = [target[35000:],data[35000:]]\n\n return [target, data]\n\n NN = MLP_NeuralNetwork(101, 75, 35, 10,\n iterations = 200,\n learning_rate = 0.5,\n momentum = 0.05,\n rate_decay = 0.005)\n\n train = load_data()\n\n NN.train(train)\n #NN.test_cross(test)\n #NN.test()\n NN.test_against()", "def loss_fun(model: GPModel, params: dict) -> float:\n py = model.module.call(params, train_ds['index_points'])\n return -py.log_prob(train_ds['y'])", "def get_loss_fn():\n return reconstruction", "def train(self, in_v, out_v): \n current_loss = self._update(in_v, out_v,\n self._context_init_zeros, self._out_init_zeros)\n\n return current_loss", "def _svm_loss_penalty_dual(name):\n return hp.choice(name, [\n ('hinge', 'l2', True),\n ('squared_hinge', 'l2', True),\n ('squared_hinge', 'l1', False),\n ('squared_hinge', 'l2', False)\n ])", "def compute_loss(y, tx, w):\n e = y - tx.dot(w)\n return calculate_mse(e)", "def loss_fn(self, lbl, y):\n\n binlbl = self._to_device(lbl[:,0]>.5)\n # center = self._to_device(lbl[:,3]) \n offset = 5. * self._to_device(lbl[:,1:]) \n\n loss = self.criterion(y[:,:2], offset) \n loss2 = self.criterion2(y[:,2], binlbl)\n\n # loss3 = self.criterion(y[:,3], center)\n\n loss = loss + loss2\n return loss", "def get_loss(self, x, y):\n \"*** YOUR CODE HERE ***\"\n predictedY = self.run(x)\n return nn.SquareLoss(predictedY, y)", "def compute_loss(self):\n self.test_logits = self.compute_logits()\n loss = tf.nn.sparse_softmax_cross_entropy_with_logits(\n labels=self.data.test_labels, logits=self.test_logits)\n cross_entropy_loss = tf.reduce_mean(loss)\n regularization = tf.reduce_sum(\n tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES))\n loss = cross_entropy_loss + self.weight_decay * regularization\n return loss", "def get_loss(self, x, y):\n \"*** YOUR CODE HERE question 2 ***\"\n return nn.SquareLoss(self.run(x), y)", "def build_loss(self):\n\n opt = tf.train.AdamOptimizer(self.learning_rate)\n mse = tf.losses.mean_squared_error(self.label[-1], self.outputs[-1])\n loss = tf.losses.get_total_loss()\n\n return mse, loss", "def loss(loss_name):\n \n def contrastive_loss(y_true, y_pred, margin = 1):\n \"\"\"Implementation of the triplet loss function\n\n\n Contrastive loss = 0.5 * mean( (1-true_value) * square(distance) + true_value * square( max(margin-distance, 0) ))\n\n Args:\n y_true (int): true label, positive pair (same class) -> 0, \n negative pair (different class) -> 1\n \n y_pred (list): python list containing two objects in a pair of tensors:\n left : the encodings for one image data in a pair\n right : the encodings for the other image data in a pair\n margin (float, optional): m > 0 determines how far the embeddings of \n a negative pair should be pushed apart. Defaults to 1.\n\n\n Returns:\n loss (float): real number, value of the loss\n \"\"\"\n\n left = y_pred[0]\n right = y_pred[1]\n\n distance = tf.math.sqrt(tf.math.reduce_sum(tf.math.square(left - right), axis=-1))\n\n loss_positive = tf.math.square(distance)\n loss_negative = tf.math.square(tf.maximum(0., margin - distance))\n \n loss = y_true * loss_negative + (1 - y_true) * loss_positive\n loss = 0.5 * tf.math.reduce_mean(loss)\n\n return loss\n\n def triplet_loss(y_true, y_pred, margin = 1):\n \"\"\"Implementation of the triplet loss function\n\n Arguments:\n y_true : true labels, required when you define a loss in Keras, \n not applied in this function.\n\n y_pred (list): python list containing three objects:\n anchor : the encodings for the anchor data\n positive : the encodings for the positive data (similar to anchor)\n negative : the encodings for the negative data (different from anchor)\n \n margin (float, optional): m > 0 determines how far the embeddings of \n a negative data should be pushed apart. Defaults to 1.\n\n Returns:\n loss (float): real number, value of the loss\n \"\"\"\n\n anchor = y_pred[0]\n positive = y_pred[1]\n negative = y_pred[2]\n\n # squared distance between the anchor and the positive\n pos_dist = tf.math.reduce_sum(tf.math.square(anchor - positive), axis=-1)\n\n # squared distance between the anchor and the negative\n neg_dist = tf.math.reduce_sum(tf.math.square(anchor - negative), axis=-1)\n\n # compute loss\n basic_loss = margin + pos_dist - neg_dist\n loss = tf.math.maximum(basic_loss,0.0)\n loss = tf.math.reduce_mean(loss)\n return loss\n\n \n if loss_name == 'contrastive_loss':\n return contrastive_loss\n \n if loss_name == 'triplet_loss':\n return triplet_loss", "def __init__(self, generator, tgt_vocab,\n normalization=\"sents\",\n label_smoothing=0.0,\n use_kl_annealing=False,\n use_kl_freebits=False,\n kl_freebits_margin=0.0,\n kl_annealing_current=0.0,\n kl_annealing_increment=0.0001,\n kl_annealing_warmup_steps=1000,\n image_loss_type='logprob',\n use_local_image_features=False,\n two_step_image_prediction=False\n ):\n self.multimodal_model_type = 'vi-model1'\n\n super(NMTVIModel1LossCompute, self).__init__(generator, tgt_vocab,\n normalization, label_smoothing)\n\n # kl annealing parameters\n self.n_model_updates = 0\n self.use_kl_annealing = use_kl_annealing\n if use_kl_annealing:\n self.kl_annealing_current = kl_annealing_current\n self.kl_annealing_increment = kl_annealing_increment\n self.kl_annealing_warmup_steps = kl_annealing_warmup_steps\n else:\n self.kl_annealing_current = 1.0\n self.kl_annealing_increment = 0.0\n self.kl_annealing_warmup_steps = 0\n\n self.use_kl_freebits = use_kl_freebits\n if use_kl_freebits:\n self.kl_freebits_margin = kl_freebits_margin\n else:\n self.kl_freebits_margin = 0.0\n\n self.image_loss_type = image_loss_type\n self.use_local_image_features = use_local_image_features\n self.two_step_image_prediction = two_step_image_prediction\n self._statistics = onmt.VIStatistics\n\n if image_loss_type == 'categorical':\n self.image_loss_criterion = nn.NLLLoss2d()", "def genLoss(self, *data):\r\n _, (x_unlab, _) = data\r\n z = self.getInputNoise(self.hypers['ul_BS'])\r\n fake_logits = self.D(self.G(z))\r\n g_losses = -1*logOneMinusSoftmax(fake_logits)[:,self.D.numClasses-1]\r\n return torch.mean(g_losses)", "def _initLoss(self):\n\n return torch.nn.MSELoss()", "def cal_loss(loss,M,H_x,Segmentation=False):\n\tdef dyn_loss(y_true,y_pred):\n\t\t# L = K.mean(K.categorical_crossentropy(y_true, y_pred) - K.mean(K.log(M-loss(y_pred,H_x)),axis=-1),axis=0)\n\t\tL = K.mean(K.categorical_crossentropy(y_true, y_pred) - K.log(M-loss(y_pred,H_x)),axis=0)\n\t\tif Segmentation == True:\n\t\t\tL = K.mean(K.categorical_crossentropy(y_true, y_pred) - K.log(M-loss(y_pred,H_x)),axis=-1)\n\n\t\treturn L\n\treturn dyn_loss", "def _create_loss(self):\n with tf.device('/cpu:0'):\n with tf.name_scope('loss'):\n self.loss = tf.reduce_mean(\n tf.nn.softmax_cross_entropy_with_logits(\n labels=self.labels_placeholder, \n logits=self.logits, name='loss'))", "def loss(self, x: np.ndarray, y: np.ndarray, **kwargs) -> np.ndarray:\n raise NotImplementedError", "def compute_loss(self, X, y):\r\n # INSERT YOUR CODE HERE\r\n #raise Exception('Function not yet implemented!')\r\n\r\n # Computing the loss using the below formula\r\n # Loss = -(1/m)*sum( (y_i)*log(σ(wTx_i)) + (1-y_i)*log(1 - σ(wTx_i)))\r\n # m = number of examples and i for ith example\r\n\r\n loss = 0\r\n X = np.append(X, np.array([[1]]*X.shape[0]), axis=1)\r\n # for idx,example in enumerate(X):\r\n # loss = loss + y[idx] * np.log(self.sigmoid(np.dot(example, self.w))) + (1 - y[idx]) * np.log(1 - self.sigmoid(np.dot(example, self.w)))\r\n # loss = -loss/ X.shape[0]\r\n\r\n loss = -np.mean(y * np.log(self.sigmoid(np.dot(X, self.w))) + (1 - y) * np.log(1 - self.sigmoid(np.dot(X, self.w))))\r\n return loss", "def get_loss(self, Loss, results, inputs, device):\n return", "def loss_creator(config):\n return torch.nn.BCELoss()", "def evaluate_loss(\n model,\n ds,\n loss_func_name = 'CE'\n):\n loss = 0\n if loss_func_name == 'CE':\n loss_func = tf.keras.losses.SparseCategoricalCrossentropy(\n reduction=tf.keras.losses.Reduction.SUM\n )\n else:\n raise ValueError(f'Not supported loss function {loss_func_name}!')\n n = 0\n for batch_x, batch_y in ds:\n batch_output = get_model_output(model, batch_x)\n loss += loss_func(batch_y, batch_output)\n n += batch_y.shape[0]\n return loss / n", "def compute_loss_linear(y, tx, w, method=\"mse\"):\n error = y - np.dot(tx,w)\n if method == \"mae\":\n return np.sum(np.abs(error)) / np.shape(y)[0] / 2\n elif method == \"mse\":\n return np.inner(error,error) / np.shape(y)[0] / 2 #for MSE\n else:\n raise Exception(\"Specified method unknown\")", "def loss(self, X_batch, y_batch, reg):\n pass", "def cross_entropoy_loss_naive(W, X, y, reg):\n # pylint: disable=too-many-locals\n # Initialize the loss and gradient to zero.\n loss = 0.0\n dW = np.zeros_like(W)\n\n ############################################################################\n # TODO: Compute the cross-entropy loss and its gradient using explicit #\n # loops. Store the loss in loss and the gradient in dW. If you are not #\n # careful here, it is easy to run into numeric instability. Don't forget #\n # the regularization! #\n ############################################################################\n num_train_sample = X.shape[0] #row of train data\n num_class = W.shape[1] #column of weight, plane,horse..\n for i in range(num_train_sample):\n p_score = X[i].dot(W) #a row of score corresponding to each class\n p_score -= np.max(p_score) #normalize, highest is 1\n\n ###compute softmax loss\n # sum of scores corresponding to different classes of a sample \n sum_score = np.sum(np.exp(p_score)) \n # each class's score over sum_score of a sample \n score_i = lambda k: np.exp(p_score[k]) / sum_score\n # for the correct label in each sample, find softmax loss over sum\n # iteration make loss sum up all samples\n loss = loss - np.log(score_i(y[i]))\n\n for k in range(num_class):\n p_k = score_i(k)\n # gradient of softmax\n dW[:, k] += (p_k - (k == y[i])) * X[i]\n\n loss /= num_train_sample\n loss += 0.5 * reg * np.sum(W * W)\n dW /= num_train_sample\n dW += reg*W\n ############################################################################\n # END OF YOUR CODE #\n ############################################################################\n\n return loss, dW", "def _vae_loss(self, x, x_generated):\n x = K.flatten(x)\n x_generated = K.flatten(x_generated)\n reconstruction_loss = self.input_shape[0] * self.input_shape[1] * \\\n binary_crossentropy(x, x_generated)\n kl_normal_loss = kl_normal(self.z_mean, self.z_log_var)\n kl_disc_loss = 0\n return reconstruction_loss + kl_normal_loss + kl_disc_loss", "def _vae_loss(self, x, x_generated):\n x = K.flatten(x)\n x_generated = K.flatten(x_generated)\n reconstruction_loss = self.input_shape[0] * self.input_shape[1] * \\\n binary_crossentropy(x, x_generated)\n kl_normal_loss = kl_normal(self.z_mean, self.z_log_var)\n kl_disc_loss = 0\n return reconstruction_loss + kl_normal_loss + kl_disc_loss", "def _vae_loss(self, x, x_generated):\n x = K.flatten(x)\n x_generated = K.flatten(x_generated)\n reconstruction_loss = self.input_shape[0] * self.input_shape[1] * \\\n binary_crossentropy(x, x_generated)\n kl_normal_loss = kl_normal(self.z_mean, self.z_log_var)\n kl_disc_loss = 0\n return reconstruction_loss + kl_normal_loss + kl_disc_loss", "def _vae_loss(self, x, x_generated):\n x = K.flatten(x)\n x_generated = K.flatten(x_generated)\n reconstruction_loss = self.input_shape[0] * self.input_shape[1] * \\\n binary_crossentropy(x, x_generated)\n kl_normal_loss = kl_normal(self.z_mean, self.z_log_var)\n kl_disc_loss = 0\n return reconstruction_loss + kl_normal_loss + kl_disc_loss", "def loss_op(pred, y, index, loss_func):\n loss = 0\n for node_type in pred:\n idx = index[node_type]\n loss += loss_func(pred[node_type][idx], y[node_type])\n return loss", "def loss(self, X, y=None):\n\n # In dev testing, the loss fnc stops at \"scores\" , unfollowed by \"softmax\" probability prediction.\n # In real testing, \"self.predict()\" needs to be implemented in Solver() class.\n \n if y is None:\n for bn_param in self.bn_params:\n bn_param[\"mode\"] = \"test\"\n\n\n W1, b1 = self.params['W1'], self.params['b1']\n gamma1, beta1 = self.params[\"sbnGamma1\"], self.params[\"sbnBeta1\"]\n bn_param1 = self.bn_params[0]\n\n W2, b2 = self.params['W2'], self.params['b2']\n gamma2, beta2 = self.params[\"sbnGamma2\"], self.params[\"sbnBeta2\"]\n bn_param2 = self.bn_params[1]\n\n W3, b3 = self.params['W3'], self.params['b3']\n gamma3, beta3 = self.params[\"bnGamma3\"], self.params[\"bnBeta3\"]\n bn_param3 = self.bn_params[2]\n\n W4, b4 = self.params['W4'], self.params['b4']\n \n # pass conv_param to the forward pass for the convolutional layer\n conv_param = self.conv_param\n\n # pass pool_param to the forward pass for the max-pooling layer\n pool_param = self.maxpool_params\n\n ############################################################################\n # TODO: Implement the forward pass for the three-layer convolutional net, #\n # computing the class scores for X and storing them in the scores #\n # variable. #\n ############################################################################\n \n scores = None \n cache = {}\n # def conv_sbn_relu_forward(x, w, b, gamma, beta, conv_param, bn_param): return out, cache;\n out, cache[\"layer1\"] = layer_utils.conv_sbn_relu_forward(X, W1, b1, gamma1, beta1, conv_param, bn_param1) \n out, cache[\"layer2\"] = layer_utils.conv_sbn_relu_forward(out, W2, b2, gamma2, beta2, conv_param, bn_param2)\n\n # def max_pool_forward_fast(x, pool_param): return out, cache;\n out, cache[\"maxpool\"] = fast_layers.max_pool_forward_fast(out, pool_param)\n\n # def affine_bn_relu_forward(x, w, b, gamma, beta, bn_param): return out, cache;\n \n out, cache[\"layer3\"] = layer_utils.affine_bn_relu_forward(out, W3, b3, gamma3, beta3, bn_param3)\n\n # def affine_forward(x, w, b): return out, cache;\n scores, cache[\"layer4\"] = layers.affine_forward(out, W4, b4)\n\n\n ############################################################################\n # END OF YOUR CODE #\n ############################################################################\n \n if y is None:\n return scores\n \n ############################################################################\n # TODO: Implement the backward pass for the three-layer convolutional net, #\n # storing the loss and gradients in the loss and grads variables. Compute #\n # data loss using softmax, and make sure that grads[k] holds the gradients #\n # for self.params[k]. Don't forget to add L2 regularization! #\n ############################################################################\n \n loss, grads = 0, {}\n\n # def softmax_loss(x, y): return loss, dscore;\n loss, dscores = layers.softmax_loss(scores, y)\n loss += 0.5 * self.reg * (np.sum(W1 * W1) + np.sum(W2 * W2) + np.sum(W3 * W3) + np.sum(W4 * W4))\n\n # def affine_backward(dout, cache): return dx, dw, db;\n dout, dW4, db4 = layers.affine_backward(dscores, cache[\"layer4\"]) \n\n # def affine_bn_relu_backward(dout, cache): return dx, dw, db, dgamma, dbeta;\n dout, dW3, db3, dgamma3, dbeta3 = layer_utils.affine_bn_relu_backward(dout, cache[\"layer3\"])\n\n # print cache[\"layer3\"]\n\n # def max_pool_backward_fast(dout, cache): return max_pool_backward_im2col(dout, real_cache);\n # def max_pool_backward_im2col(dout, cache): return dx;\n dout = fast_layers.max_pool_backward_fast(dout, cache[\"maxpool\"])\n\n # def conv_sbn_relu_backward(dout, cache): return dx, dw, db, dgamma, dbeta;\n dout, dW2, db2, dgamma2, dbeta2 = layer_utils.conv_sbn_relu_backward(dout, cache[\"layer2\"])\n _, dW1, db1, dgamma1, dbeta1 = layer_utils.conv_sbn_relu_backward(dout, cache[\"layer1\"])\n\n # reg\n grads['W4'], grads['b4'] = dW4 + self.reg * W4, db4\n \n grads['W3'], grads['b3'] = dW3 + self.reg * W3, db3\n grads[\"bnGamma3\"], grads[\"bnBeta3\"] = dgamma3, dbeta3\n\n grads['W2'], grads['b2'] = dW2 + self.reg * W2, db2\n grads[\"sbnGamma2\"], grads[\"sbnBeta2\"] = dgamma2, dbeta2\n\n grads['W1'], grads['b1'] = dW1 + self.reg * W1, db1\n grads[\"sbnGamma1\"], grads[\"sbnBeta1\"] = dgamma1, dbeta1\n\n ############################################################################\n # END OF YOUR CODE #\n ############################################################################\n \n return loss, grads", "def detector_loss(self, input, target, mask=None, loss_type=\"softmax\"):\n if loss_type == \"l2\":\n loss_func = nn.MSELoss(reduction=\"mean\")\n loss = loss_func(input, target)\n elif loss_type == \"softmax\":\n loss_func_BCE = nn.BCELoss(reduction='none').cuda()\n loss = loss_func_BCE(nn.functional.softmax(input, dim=1), target)\n loss = (loss.sum(dim=1) * mask).sum()\n loss = loss / (mask.sum() + 1e-10)\n return loss", "def loss_dataset(model, train_data, loss_f):\n loss=0\n \n for idx,(features,labels) in enumerate(train_data):\n \n predictions= model(features)\n loss+=loss_f(predictions,labels)\n \n loss/=idx+1\n return loss", "def svm_model_fn(features, labels, mode, params):\n\n feature_columns = [layers.real_valued_column(i) for i in features.keys()]\n example_id_column(features)\n\n weight_column_name = params.get(\"weight_column_name\")\n\n head = head_lib.binary_svm_head(\n weight_column_name=weight_column_name,\n enable_centered_bias=False)\n\n optimizer = sdca_optimizer.SDCAOptimizer(\n example_id_column=\"index\",\n num_loss_partitions=params[\"num_loss_partitions\"],\n symmetric_l1_regularization=params[\"l1_regularization\"],\n symmetric_l2_regularization=params[\"l2_regularization\"])\n\n chief_hook = linear._SdcaUpdateWeightsHook()\n update_weights_hook = chief_hook\n\n if not isinstance(optimizer, sdca_optimizer.SDCAOptimizer):\n raise ValueError(\"Optimizer must be of type SDCAOptimizer\")\n\n if isinstance(head,\n head_lib._BinarySvmHead): # pylint: disable=protected-access\n loss_type = \"hinge_loss\"\n elif isinstance(head,\n head_lib._BinaryLogisticHead): # pylint:\n # disable=protected-access\n loss_type = \"logistic_loss\"\n elif isinstance(head,\n head_lib._RegressionHead): # pylint:\n # disable=protected-access\n assert head.logits_dimension == 1, (\"SDCA only applies for \"\n \"logits_dimension=1.\")\n loss_type = \"squared_loss\"\n else:\n raise ValueError(\"Unsupported head type: {}\".format(head))\n\n parent_scope = \"linear\"\n\n with variable_scope.variable_op_scope(\n features.values(), parent_scope) as scope:\n features = features.copy()\n features.update(layers.transform_features(features, feature_columns))\n logits, columns_to_variables, bias = (\n layers.weighted_sum_from_feature_columns(\n columns_to_tensors=features,\n feature_columns=feature_columns,\n num_outputs=1,\n scope=scope))\n\n linear._add_bias_column(feature_columns, features, bias,\n columns_to_variables)\n\n def _train_op_fn(unused_loss):\n global_step = contrib_variables.get_global_step()\n sdca_model, train_op = optimizer.get_train_step(columns_to_variables,\n weight_column_name,\n loss_type, features,\n labels, global_step)\n if update_weights_hook is not None:\n update_weights_hook.set_parameters(sdca_model, train_op)\n return train_op\n\n model_fn_ops = head.create_model_fn_ops(\n features=features,\n labels=labels,\n mode=mode,\n train_op_fn=_train_op_fn,\n logits=logits)\n if update_weights_hook is not None:\n return model_fn_ops._replace(\n training_chief_hooks=(model_fn_ops.training_chief_hooks +\n [update_weights_hook]))\n return model_fn_ops", "def loss(data, y_pred):\n # TODO: Try using points other than the training data points for the divergence calculation.\n y_true = data[:,:2]\n p1 = data[:,2:5]\n p2 = data[:,5:8]\n p3 = data[:,8:11]\n p4 = data[:,11:14]\n\n ### Calculate divergence using model predictions:\n\n # Step 1: Use the model to calculate predicted wind field in the surrounding points p1, p2, p3 and p4.\n y_pred_p1 = model(p1)\n y_pred_p2 = model(p2)\n y_pred_p3 = model(p3)\n y_pred_p4 = model(p4)\n\n # Step 2: Calculate the partial derivatives with a three-point centered difference.\n scale_x = self.scaler_data.scale_[0] #scale-factor for x\n scale_y = self.scaler_data.scale_[1] #scale-factor for y\n\n dudx = (y_pred_p1[:, 0] - y_pred_p3[:, 0]) / (p1[:,0] - p3[:,0]) # <- pj = transformed data\n dvdy = (y_pred_p2[:, 1] - y_pred_p4[:, 1]) / (p2[:,1] - p4[:,1]) # <- pj = transformed data\n\n # Step 3: Calculate the divergence.\n divergence = ( dudx / scale_x + dvdy / scale_y ) * np.mean([scale_x, scale_y])\n #tf.print(K.mean(K.abs(divergence)))\n\n # Step 4: Calculate and return total loss.\n return K.mean(K.square(y_true - y_pred)) + gamma*K.mean(K.square(divergence))" ]
[ "0.6997603", "0.69757587", "0.6954347", "0.69388586", "0.6791079", "0.6756301", "0.6728832", "0.67196614", "0.66558504", "0.6613795", "0.6579743", "0.6554059", "0.6525655", "0.65236473", "0.64979446", "0.6477414", "0.6435054", "0.64005077", "0.6381835", "0.63708454", "0.6361705", "0.6336853", "0.6320483", "0.6306843", "0.6295718", "0.6293777", "0.62927514", "0.62799764", "0.6230095", "0.62285686", "0.6204874", "0.6178764", "0.61684227", "0.6167166", "0.61668235", "0.61382437", "0.6134128", "0.6126177", "0.6123747", "0.6096822", "0.6091271", "0.60677296", "0.60651624", "0.606148", "0.60512745", "0.6050735", "0.60501134", "0.60480076", "0.60428035", "0.6037901", "0.6032506", "0.601192", "0.5997492", "0.59930193", "0.59886813", "0.5986432", "0.59721255", "0.59716576", "0.59700733", "0.5969907", "0.59660375", "0.5959973", "0.5949701", "0.5939034", "0.5937922", "0.59376293", "0.5935106", "0.5924024", "0.59114105", "0.589989", "0.58878404", "0.58754045", "0.58751196", "0.5872902", "0.58713764", "0.5851484", "0.5848564", "0.58485126", "0.5845113", "0.58415145", "0.58412826", "0.58190703", "0.58163935", "0.580929", "0.5804573", "0.57932776", "0.5791572", "0.57910085", "0.57812285", "0.57799304", "0.57785535", "0.57711303", "0.57711303", "0.57711303", "0.57711303", "0.5771082", "0.5769985", "0.57606095", "0.5754136", "0.5752275", "0.57519335" ]
0.0
-1
create a softmax loss for network given in argument
def compile_update_softmax(nnet, inputs, targets): floatX = Cfg.floatX C = Cfg.C final_layer = nnet.all_layers[-1] trainable_params = lasagne.layers.get_all_params(final_layer, trainable=True) # Regularization if Cfg.weight_decay: l2_penalty = (floatX(0.5) / C) * get_l2_penalty(nnet, Cfg.include_bias) else: l2_penalty = T.cast(0, dtype='floatX') # Backpropagation prediction = lasagne.layers.get_output(final_layer, inputs=inputs, deterministic=False) if Cfg.ad_experiment: train_loss = T.mean(l_objectives.binary_crossentropy( prediction.flatten(), targets), dtype='floatX') train_acc = T.mean(l_objectives.binary_accuracy(prediction.flatten(), targets), dtype='floatX') else: train_loss = T.mean(l_objectives.categorical_crossentropy(prediction, targets), dtype='floatX') train_acc = T.mean(T.eq(T.argmax(prediction, axis=1), targets), dtype='floatX') train_obj = T.cast(train_loss + l2_penalty, dtype='floatX') updates = get_updates(nnet, train_obj, trainable_params, solver=nnet.solver) nnet.backprop = theano.function([inputs, targets], [train_obj, train_acc], updates=updates) # Forwardpropagation test_prediction = lasagne.layers.get_output(final_layer, inputs=inputs, deterministic=True) if Cfg.ad_experiment: test_loss = T.mean(l_objectives.binary_crossentropy( test_prediction.flatten(), targets), dtype='floatX') test_acc = T.mean(l_objectives.binary_accuracy( test_prediction.flatten(), targets), dtype='floatX') else: test_loss = T.mean(l_objectives.categorical_crossentropy( test_prediction, targets), dtype='floatX') test_acc = T.mean(T.eq(T.argmax(test_prediction, axis=1), targets), dtype='floatX') test_obj = T.cast(test_loss + l2_penalty, dtype='floatX') nnet.forward = theano.function([inputs, targets], [test_obj, test_acc, test_prediction, l2_penalty, test_loss])
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_loss(self, xs, y):\n \"*** YOUR CODE HERE question 4 ***\"\n return nn.SoftmaxLoss(self.run(xs), y)", "def get_loss(self, xs, y):\n \"*** YOUR CODE HERE ***\"\n y_pred = self.run(xs)\n return nn.SoftmaxLoss(y_pred,y)", "def softmax_loss_naive(W, X, y, reg):\n # Initialize the loss and gradient to zero.\n loss = 0.0\n dW = np.zeros_like(W)\n\n #############################################################################\n # TODO: Compute the softmax loss and its gradient using explicit loops. #\n # Store the loss in loss and the gradient in dW. If you are not careful #\n # here, it is easy to run into numeric instability. Don't forget the #\n # regularization! #\n #############################################################################\n num_train = X.shape[0]\n num_classe = W.shape[1]\n loss = 0.0\n\n for i in range(num_train): #pour chaque image de l'ensemble d'entrainement\n scores = X[i].dot(W)\n scores -= max(scores)\n\n correct_class_score = scores[y[i]] #y[i]=c\n e_syi = np.exp(correct_class_score)\n e_sj = np.sum(np.exp(scores))\n\n loss -= np.log(e_syi/e_sj)\n\n for k in range(num_classe): #pour chaque classe\n dW[:, k] += ((np.exp(scores[k])/e_sj) - (k == y[i])) * X[i].T\n\n # Right now the loss is a sum over all training examples, but we want it\n # to be an average instead so we divide by num_train.\n loss /= num_train\n dW/= num_train\n\n # Add regularization to the loss.\n loss += reg * np.sum(W * W)\n dW += 2 * reg * W\n #############################################################################\n # END OF YOUR CODE #\n #############################################################################\n\n return loss, dW", "def get_loss(self, x, y):\n \"*** YOUR CODE HERE question 3 ***\"\n return nn.SoftmaxLoss(self.run(x), y)", "def get_loss(self, xs, y):\n return nn.SoftmaxLoss(self.run(xs), y)", "def softmax_loss(x, y):\n #raise NotImplementedError\n #######################################################################\n # #\n # #\n # TODO: YOUR CODE HERE #\n # #\n # #\n #######################################################################\n N=x.shape[0]\n\n \n x-=np.max(x,axis=1,keepdims=True)\n temp=np.exp(x)\n dr_vec=np.sum(temp,axis=1,keepdims=True)\n\n nr=(x[np.arange(N),y]).reshape([N,1])\n loss=np.sum(-(nr)+np.log(dr_vec))\n \n loss=(loss/N)\n temp/=dr_vec\n temp[np.arange(N),y] -= 1\n \n dx = temp/N\n \n return loss, dx", "def softmax_loss_naive(W, X, y, reg):\n # Initialize the loss and gradient to zero.\n loss = 0.0\n dW = np.zeros_like(W)\n\n # needed for calculations\n num_train = X.shape[1]\n\n for i in xrange(num_train):\n # calculate the scores for the current training example with the current weights\n scores = W.dot(X[:, i])\n # scale by the max for numerical stability\n scores -= np.max(scores)\n # calculate the loss\n loss += -scores[y[i]] + np.log(np.sum(np.exp(scores)))\n\n ## L' = -1_y + 1/(\\sum_{}^{} e^f) * e^f\n # e^f\n scores = np.exp(scores)\n # 1/(\\sum_{}^{} e^f)\n scores /= np.sum(scores)\n # -1_y\n scores[y[i]] -= 1\n\n # now scale it by the data\n # we need to use [:, np.newaxis] because when you make a X by 1 dimension slices in numpy the 1 dimension is null\n dW += scores[:, np.newaxis].dot(X[:, i][:, np.newaxis].T)\n\n\n # get the average loss\n loss /= num_train\n # get the average gradient\n dW /= num_train\n\n # regularize the loss function\n loss += 0.5 * reg * np.sum(W * W)\n\n return loss, dW", "def softmax_loss_naive(W, X, y, reg):\n # Initialize the loss and gradient to zero.\n loss = 0.0\n dW = np.zeros_like(W)\n\n #############################################################################\n # TODO: Compute the softmax loss and its gradient using explicit loops. #\n # Store the loss in loss and the gradient in dW. If you are not careful #\n # here, it is easy to run into numeric instability. Don't forget the #\n # regularization! #\n #############################################################################\n # *****START OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****\n # softmax P(Y=k|X=x_i) = e^{s_k}/∑e^{s_j} softmax loss = -log(softmax)\n num_train = X.shape[0]\n num_class = W.shape[1]\n for i in range(num_train):\n scores = X[i].dot(W) # get scores\n max_score = np.max(scores)\n scores -= max_score # 考虑数值计算稳定性 softmax = (e^s_c - max)/∑(e^s_j - max)\n correct_score = scores[y[i]] # score_correct\n P_ic = np.exp(correct_score)/np.sum(np.exp(scores))\n loss += -np.log(P_ic)\n for j in range(num_class):\n if j == y[i]:\n dW[:, j] += (P_ic - 1) * X[i].T\n else:\n P_ij = np.exp(scores[j])/np.sum(np.exp(scores))\n dW[:, j] += P_ij * X[i].T\n \n \n loss /= num_train\n loss += reg*np.sum(W*W)\n dW /= num_train\n dW += 2 * reg * W\n # *****END OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****\n\n return loss, dW", "def get_loss(self, x, y):\n \"*** YOUR CODE HERE ***\"\n y_pred = self.run(x)\n return nn.SoftmaxLoss(y_pred,y)", "def softmax_loss_naive(W, X, y, reg):\n # Initialize the loss and gradient to zero.\n # print \"dW's shape\", dW.shape\n # compute the loss and the gradient\n num_classes = W.shape[1]\n num_train = X.shape[0]\n loss = 0.0\n dW = np.zeros_like(W)\n\n #############################################################################\n # TODO: Compute the softmax.ipynb loss and its gradient using explicit loops. #\n # Store the loss in loss and the gradient in dW. If you are not careful #\n # here, it is easy to run into numeric instability. Don't forget the #\n # regularization! #\n #############################################################################\n # For every training image\n for train_image in xrange(num_train):\n # Multiply the weights by the image to get the scores\n scores = X[train_image].dot(W)\n # print(scores)\n # And then get the correct score\n correct_label = y[train_image]\n correct_score = scores[correct_label]\n # TODO: Right up to here\n # And then get the score of every other classifier\n all_scores = np.sum(scores)\n # Add a normalizing factor for numeric stability\n normalizing_constant = np.max(scores)\n scores -= normalizing_constant\n correct_score -= normalizing_constant\n #Calculating the softmax values\n softmax = np.exp(correct_score)/np.sum(np.exp(scores))\n\n # print(\"Correct score softmax\",softmax)\n\n # And calculating the loss\n loss += -1*np.log(softmax)\n # print loss\n #TODO: Loss computation is also correct\n\n # And calculating the gradient\n\n # First, update the Weight matrix with the correct example's derivative\n dW[:,correct_label] += (softmax-1)*np.transpose(X[train_image])\n\n # Then do the same for the wrong cases\n incorrect_labels = [x for x in xrange(num_classes) if x != correct_label]\n # Now, update the weights\n for label_index in incorrect_labels:\n #Calculating the softmax for a wrong label\n incorrect_label_softmax = np.exp(scores[label_index])/(np.sum(np.exp(scores)))\n # Calculating the derivative\n necessary_weight = incorrect_label_softmax*np.transpose(X[train_image])\n # Updating the weights\n dW[:,label_index] += necessary_weight\n\n\n # Divide the loss\n loss /= num_train\n dW /= num_train\n\n # Now, do regularization\n loss += 0.5*reg*np.sum(W*W)# Penalize big weights\n dW += reg*W\n\n\n\n\n #############################################################################\n # END OF YOUR CODE #\n #############################################################################\n\n return loss, dW", "def softmax_loss_naive(W, X, y, reg):\r\n # Initialize the loss and gradient to zero.\r\n loss = 0.0\r\n dW = np.zeros_like(W)\r\n num_train = X.shape[1] # d*n\r\n num_class = W.shape[0]\r\n\r\n #############################################################################\r\n # Compute the softmax loss and its gradient using explicit loops. #\r\n # Store the loss in loss and the gradient in dW. If you are not careful #\r\n # here, it is easy to run into numeric instability. Don't forget the #\r\n # regularization! #\r\n #############################################################################\r\n loss = 0.0\r\n for i in range(num_train):\r\n X_i = X[:,i] # D*1\r\n score_i = W.dot(X_i)\r\n score_i -= np.max(score_i) #C*1 but keepdims = false so it becomes 1*C\r\n exp_score_i = np.exp(score_i)\r\n probs_i = exp_score_i/np.sum(exp_score_i) #1*C\r\n correct_logprobs_i = -np.log(probs_i[y[i]])\r\n loss += correct_logprobs_i\r\n \r\n dscore_i = probs_i.reshape(num_class,-1)#c*1\r\n dscore_i[y[i]] -= 1 #C*1\r\n X_i = X_i.reshape(1,-1)# 1*D\r\n dW += dscore_i.dot(X_i)\r\n \r\n loss /= num_train\r\n loss += 0.5*reg*np.sum(W*W)\r\n\r\n dW /= num_train\r\n dW += reg*W\r\n \r\n return loss, dW", "def softmax(x, name):\n with tf.name_scope(name):\n outputs = tf.nn.softmax (x)\n # Return layer's output\n return outputs", "def softmax_loss_naive(W, X, y, reg):\n # Initialize the loss and gradient to zero.\n loss = 0.0\n dW = np.zeros_like(W)\n num_train = X.shape[1]\n num_classes = W.shape[0]\n #############################################################################\n # Compute the softmax loss and its gradient using explicit loops. #\n # Store the loss in loss and the gradient in dW. If you are not careful #\n # here, it is easy to run into numeric instability. Don't forget the #\n # regularization! #\n #############################################################################\n for i in range(num_train): # for each image\n # compute the score\n scores = W.dot(X[:, i])\n\n # shift the values of f so that the highest number is 0:\n scores -= np.max(scores)\n\n # compute the loss\n loss += -np.log(np.exp(scores[y[i]]) / np.sum(np.exp(scores)))\n\n # gradient(https://github.com/seyedamo/cs231n/blob/master/assignment1/cs231n/classifiers/softmax.py)\n scores = np.exp(scores)\n scores /= np.sum(scores)\n for j in range(num_classes): # for each class\n dW[j, :] += scores[j] * X[:, i].T\n\n # dW wrt correct class scores w_yi\n dW[y[i], :] += -X[:, i].T\n\n # Average the loss \n loss /= num_train\n\n # Add regularization to the loss.\n loss += 0.5 * reg * np.sum(W * W)\n\n # average of the gradient\n dW /= num_train\n #############################################################################\n # END OF YOUR CODE #\n #############################################################################\n return loss, dW", "def get_loss(self, xs, y):\n \"*** YOUR CODE HERE ***\"\n predictedY = self.run(xs)\n return nn.SoftmaxLoss(predictedY, y)\n # return nn.SquareLoss(predictedY, y)", "def softmax_loss_naive(W, X, y, reg):\n # Initialize the loss and gradient to zero.\n loss = 0.0\n dW = np.zeros_like(W)\n num_train=X.shape[0]\n num_class=W.shape[1]\n num_feature=X.shape[1]\n #############################################################################\n # TODO: Compute the softmax loss and its gradient using explicit loops. #\n # Store the loss in loss and the gradient in dW. If you are not careful #\n # here, it is easy to run into numeric instability. Don't forget the #\n # regularization! #\n #############################################################################\n for i in range(num_train):\n #W*Xi C*1\n x=np.exp(np.dot(W.T,X[i,:]))\n denominator=np.sum(x)\n numerator=x[y[i]]\n loss-=np.log(numerator/denominator)\n #numerator and denominator\n #for j in range(num_class):\n normalize_score=x/denominator\n nm=np.reshape(normalize_score, (num_class, 1))\n \n #CxD\n dscore=nm.dot(np.reshape(X[i,:],(1,num_feature)))\n #print(dscore.shape)\n\n dscore[y[i],:]-=X[i,:]\n dW+=dscore.T\n\n loss/=num_train\n dW = dW/num_train + reg*W\n #\n #############################################################################\n # END OF YOUR CODE #\n #############################################################################\n\n return loss, dW", "def softmax_loss(x, y):\n loss, dx = None, None\n ###########################################################################\n # TODO: Implement the loss and gradient for softmax classification. This #\n # will be similar to the softmax loss vectorized implementation in #\n # cs231n/classifiers/softmax.py. #\n ###########################################################################\n # *****START OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****\n num_train = x.shape[0]\n\n x = np.exp(x)\n temp_sum = np.sum(x, axis = 1, keepdims = True)\n x = x / temp_sum\n softmax_result = x\n trans_y = np.zeros((x.shape[0],x.shape[1]))\n trans_y[np.arange(x.shape[0]), y] += 1\n x = - np.log(x)\n x = x * trans_y\n x_sum = np.sum(x)\n loss = x_sum / num_train\n loss = loss + \n\n dx = softmax_result - trans_y\n dx = dx / num_train\n\n\n pass\n\n # *****END OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****\n ###########################################################################\n # END OF YOUR CODE #\n ###########################################################################\n return loss, dx", "def softmax_loss_naive(W, X, y, reg):\n # Initialize the loss and gradient to zero.\n loss = 0.0\n dW = np.zeros_like(W)\n num_train = X.shape[0]\n\n #############################################################################\n # TODO: Compute the softmax loss and its gradient using explicit loops. #\n # Store the loss in loss and the gradient in dW. If you are not careful #\n # here, it is easy to run into numeric instability. Don't forget the #\n # regularization! #\n #############################################################################\n scores = X.dot(W)\n scores_exp = np.exp(scores-np.max(scores, axis=1, keepdims=True))\n\n sum = np.sum(scores_exp, axis=1, keepdims=True)\n probability = scores_exp/sum\n #list containing the correct classification\n indices = [range(num_train), y]\n correct_class_score = probability[indices]\n\n #calculate -log(prob_y) and take the sum across all training examples\n loss = np.sum(-np.log(correct_class_score))\n loss /= num_train\n loss += 0.5 * reg * np.sum(W * W)\n\n #Compute Gradient\n probability[indices] -=1\n dW = X.T.dot(probability)\n dW /= num_train\n dW += .5 * reg * W\n\n\n #############################################################################\n # END OF YOUR CODE #\n #############################################################################\n\n return loss, dW", "def softmax_loss_naive(W, X, y, reg):\n # Initialize the loss and gradient to zero.\n loss = 0.0\n dW = np.zeros_like(W)\n\n #############################################################################\n # TODO: Compute the softmax loss and its gradient using explicit loops. #\n # Store the loss in loss and the gradient in dW. If you are not careful #\n # here, it is easy to run into numeric instability. Don't forget the #\n # regularization! #\n #############################################################################\n # *****START OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****\n\n for i in range(X.shape[0]):\n# c = np.matmul(X[i],W)\n# c -= np.amax(c)\n# e_c = np.exp(c)\n# denom = np.sum(e_c)\n# #Nice fact: we know that the largest element in c will also be the largest softmax value, so we only\n# # need to transform that one value. \n# sm_c = e_c/denom\n# \n# loss1 += -np.log(sm_c[y[i]])\n\n # Need to make this whole dang thing more numerically stable. \n c = np.matmul(X[i],W)\n c -= np.amax(c)\n e_c = np.exp(c)\n denom = np.sum(e_c)\n sm_c = e_c/denom\n\n loss += np.log(denom) - c[y[i]]\n# print(-np.log(sm_c[y[i]]) - (np.log(denom)-c[y[i]]))\n\n \"\"\"They are basically the same value\"\"\"\n\n # now computing some gradients\n dL_ds = sm_c\n dL_ds[y[i]] -= 1\n #note that sm_c is modified now!\n \"\"\" #ah, something fundamentally different is happening with numpy. When an array element\n is changed, it's really changed for good. And it changes for all pointers pointing to same object.\n yikes. Actually it's the same with python lists. Anything pointing to And underlying object can\n change that underlying object for all things that point to it. Alas.\"\"\"\n# import pdb; pdb.set_trace()\n \"\"\"Okay I just coudln't bear the for loops...\"\"\"\n dW_update = np.matmul(X[i].reshape(1,X.shape[1]).T,dL_ds[np.newaxis,:])\n dW+=dW_update\n # for n in range(W.shape[0]):\n# for m in range(W.shape[1]):\n# if m == y[i]:\n# dW[n,m] += X[i,n]*(sm_c[m]-e_c[m])\n# else:\n# dW[n,m] += X[i,n]*sm_c[m]\n\n # should be numerically unstable I think.\n\n loss /= X.shape[0]\n loss += reg*np.sum(W*W)\n\n dW /= X.shape[0]\n dW += reg*2*W\n # *****END OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****\n\n return loss, dW", "def softmax_loss_naive(W, X, y, reg):\n # Initialize the loss and gradient to zero.\n loss = 0.0\n dW = np.zeros_like(W)\n num_train = X.shape[0]\n num_class = W.shape[1]\n #scores = np.zeros(num_train,num_class)\n scores = X.dot(W)\n #############################################################################\n # TODO: Compute the softmax loss and its gradient using explicit loops. #\n # Store the loss in loss and the gradient in dW. If you are not careful #\n # here, it is easy to run into numeric instability. Don't forget the #\n # regularization! #\n #############################################################################\n for i in range(num_train):\n # compute Li\n fmax= np.max(scores[i])\n scores[i] -= fmax\n correct_class_score = scores[i,y[i]]\n M = np.exp(correct_class_score)/np.sum(np.exp(scores[i]))\n loss += -np.log(M)\n for j in range(num_class):\n N = np.exp(scores[i,j])/np.sum(np.exp(scores[i]))\n if j ==y[i]:\n dW[:,y[i]]+= (M-1)*X[i].T\n else:\n dW[:,j] += N*X[i].T \n loss /= num_train\n loss += reg*np.sum(W*W)\n dW /= num_train \n dW += 2*reg*W \n #############################################################################\n # END OF YOUR CODE #\n #############################################################################\n\n return loss, dW", "def get_loss(self, x, y):\n \"*** YOUR CODE HERE ***\"\n predictedY = self.run(x)\n return nn.SoftmaxLoss(predictedY, y)", "def softmax_weighted_loss(pred, gr_truth):\n gr_truth = tf.cast(gr_truth, dtype='int32')\n gr_truth = one_hot(gr_truth)\n softmax_pred = tf.nn.softmax(pred)\n loss = 0\n\n for i in range(8):\n gti = gr_truth[:, :, :, :, i]\n predi = softmax_pred[:, :, :, :, i]\n weighted = 1 - (tf.reduce_sum(gti) / tf.reduce_sum(gr_truth))\n loss += -tf.reduce_mean(weighted * gti * tf.math.log(tf.clip_by_value(predi, 0.005, 1)))\n\n return loss", "def softmax_loss_naive(W, X, y, reg):\n # Initialize the loss and gradient to zero.\n loss = 0.0\n dW = np.zeros_like(W)\n\n #############################################################################\n # TODO: Compute the softmax loss and its gradient using explicit loops. #\n # Store the loss in loss and the gradient in dW. If you are not careful #\n # here, it is easy to run into numeric instability. Don't forget the #\n # regularization! #\n #############################################################################\n num_classes = W.shape[1]\n #print('num_classes = ', num_classes)\n num_train = X.shape[0]\n #print('num_train = ', num_train)\n \n min_score = 0.0\n shifted_scores = np.zeros(W.shape[1])\n #max_score = np.zeros(W.shape[1])\n max_score = 0.0\n \n loss_array = np.zeros(y.shape[0])\n for i in range(num_train):\n scores = X[i].dot(W)\n #print('scores dimensions = ', scores.shape)\n #print('scores = ', scores)\n #print('i =', i, 'y = ', y[i])\n min_score = np.min(scores)\n max_score = np.max(scores)\n #print(min_score,max_score)\n shifted_scores = np.multiply(-1,scores + abs(min_score))\n #print(scores)\n #print(shifted_scores)\n exp_scores = np.exp(shifted_scores)\n norm = np.amax(exp_scores)\n norm_scores = np.divide(exp_scores,norm)\n loss_array[i] = np.multiply(-1,np.log(norm_scores[y[i]]/(np.sum(norm_scores)-norm_scores[y[i]])))\n #print(loss_array)\n for j in range(num_classes): \n\t\n if j == y[i]: \n dW[:,j] = np.multiply(norm_scores[y[i]],1-norm_scores[y[i]])\n else:\n dW[:,j] = np.multiply(-1,np.multiply(norm_scores[y[i]],norm_scores[y[j]]))\n\t\t\t\n\t\t\t\n loss = np.amax(loss_array)\n\n # Add regularization to the loss.\n loss = 0.5 * reg * np.sum(W * W) + loss\n \n \n pass\n #############################################################################\n # END OF YOUR CODE #\n #############################################################################\n\n return loss, dW", "def softmax_loss_naive(W, X, y, reg):\n # Initialize the loss and gradient to zero.\n loss = 0.0\n dW = np.zeros_like(W)\n\n #############################################################################\n # TODO: Compute the softmax loss and its gradient using explicit loops. #\n # Store the loss in loss and the gradient in dW. If you are not careful #\n # here, it is easy to run into numeric instability. Don't forget the #\n # regularization! #\n #############################################################################\n pass\n num_tran = X.shape[0]\n num_classes = W.shape[1]\n loss_par =np.zeros(num_tran)\n\n Score = np.dot(X,W)\n expS = np.exp(Score)\n # for i in num_tran:\n sumS = np.sum(expS,axis=1)\n sumS = sumS.reshape(sumS.shape[0],1)\n normalize = np.divide(expS,sumS)\n softmax = -np.log(normalize)\n\n for i in np.arange(num_tran):\n loss_par[i]=softmax[i, y[i]]\n for j in np.arange(num_classes) :\n if j!=y[i]:\n # dW[:,j]+=1/normalize[i,y[i]]*expS[i,y[i]]*expS[i,j]/np.power(sumS[i],2) *X[i,:]\n dW[:,j]+=expS[i,j]/sumS[i] *X[i,:]\n else:\n # dW[:,y[i]]+=-1/normalize[i,y[i]]*expS[i,y[i]]*(sumS[i]-expS[i,y[i]])/np.power(sumS[i],2) *X[i,:]\n dW[:,y[i]]+=-(sumS[i]-expS[i,y[i]])/sumS[i] *X[i,:]\n\n dW /=num_tran\n\n loss = np.sum(loss_par) / num_tran\n # print num_tran,loss\n\n dW+=reg*W\n loss+=0.5*reg*np.sum(W*W)\n #############################################################################\n # END OF YOUR CODE #\n #############################################################################\n\n return loss, dW", "def softmax_loss_naive(W, X, y, reg):\n # Initialize the loss and gradient to zero.\n loss = 0.0\n dW = np.zeros_like(W)\n\n #############################################################################\n # TODO: Compute the softmax loss and its gradient using explicit loops. #\n # Store the loss in loss and the gradient in dW. If you are not careful #\n # here, it is easy to run into numeric instability. Don't forget the #\n # regularization! #\n #############################################################################\n #############################################################################\n # START OF YOUR CODE #\n #############################################################################\n # construct a one-hot vector for y\n onehot_y = np.zeros((y.size, W.shape[1]))\n onehot_y[np.arange(y.size), y] = 1\n dW = dW.T\n for i in range(y.shape[0]):\n f = np.dot(X[i], W)\n \n for j in range(W.shape[1]):\n e_f = np.exp(f - np.max(f))\n softmax = e_f / e_f.sum()\n loss -= onehot_y[i][j] * np.log(softmax[j])\n dW[j] -= X[i] * (onehot_y[i][j] - softmax[j])\n \n loss = loss / y.shape[0] + reg * np.linalg.norm(W)\n dW = dW.T / y.shape[0] + 2 * reg * W\n\n #############################################################################\n # END OF YOUR CODE #\n #############################################################################\n \n\n return loss, dW", "def softmax_loss_naive(W, X, y, reg):\n # Initialize the loss and gradient to zero.\n loss = 0.0\n dW = np.zeros_like(W)\n\n #############################################################################\n # TODO: Compute the softmax loss and its gradient using explicit loops. #\n # Store the loss in loss and the gradient in dW. If you are not careful #\n # here, it is easy to run into numeric instability. Don't forget the #\n # regularization! #\n #############################################################################\n # *****START OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****\n\n num_train = X.shape[0]\n # print(\"num_train:\", num_train)\n num_classes = W.shape[1]\n # print(\"num_classes:\", num_classes)\n \n for i in range(num_train):\n scores = X[i].dot(W) # scores is 1 * C\n correct_class = y[i]\n \n # LOSS DUE TO TRAINING SAMPLE = -log(exp^correct_score / sum(exp^all_other_scores))\n log_c = np.max(scores)\n scores -= log_c\n correct_class_score = scores[correct_class]\n exp_scores = np.exp(scores)\n sum_exp_scores = np.sum(np.exp(scores))\n proportion = np.exp(correct_class_score) / sum_exp_scores\n loss -= np.log(proportion)\n # print(proportion)\n \n # ALTERNATIVELY: (we split the log)\n# loss -= scores[y[i]]\n# loss += np.log(np.sum(np.exp(X[i].dot(W))))\n \n # UPDATE GRADIENT\n for j in range(num_classes):\n p = np.exp(scores[j]) / sum_exp_scores # \"probability\" of class j\n dW[:,j] += (p - (j == y[i])) * X[i,:]\n # dW is D by C\n\n loss /= num_train\n loss += reg * np.sum(W * W) \n dW /= num_train\n dW += reg * 2 * W\n\n # *****END OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****\n \n return loss, dW", "def softmax_loss_naive(W, X, y, reg):\n # Initialize the loss and gradient to zero.\n loss = 0.0\n dW = np.zeros_like(W)\n\n #############################################################################\n # TODO: Compute the softmax loss and its gradient using explicit loops. #\n # Store the loss in loss and the gradient in dW. If you are not careful #\n # here, it is easy to run into numeric instability. Don't forget the #\n # regularization! #\n #############################################################################\n num_classes = W.shape[1]\n num_train = X.shape[0]\n for i in range(num_train):\n score = X[i].dot(W)\n exp_score = np.exp(score)\n probability = exp_score[y[i]] / exp_score.sum()\n loss += -np.log(probability)\n dp = -1 / probability\n for j in range(num_classes):\n ds = np.exp(score[j])\n if j == y[i]:\n des = (exp_score.sum() - exp_score[y[i]]) / np.square(exp_score.sum())\n else:\n des = -(exp_score[y[i]]) / np.square(exp_score.sum())\n dW[:, j] += X[i].T * ds * des * dp # chain rule\n\n loss /= num_train\n dW /= num_train\n\n loss += 0.5 * reg * np.sum(W * W)\n dW += reg * W\n #############################################################################\n # END OF YOUR CODE #\n #############################################################################\n\n return loss, dW", "def softmax_loss_naive(W, X, y, reg):\n # Initialize the loss and gradient to zero.\n loss = 0.0\n dW = np.zeros_like(W)\n\n #############################################################################\n # TODO: Compute the softmax loss and its gradient using explicit loops. #\n # Store the loss in loss and the gradient in dW. If you are not careful #\n # here, it is easy to run into numeric instability. Don't forget the #\n # regularization! #\n #############################################################################\n num_classes = W.shape[1]\n num_train = X.shape[0]\n\n for i in xrange(num_train):\n scores = X[i].dot(W)\n\n # Normalization trick to resolve numerical instability\n # when dealing with the large exponential terms.\n scores -= np.max(scores)\n\n # Cache some terms that are used repeatedly.\n exp_scores = np.exp(scores)\n sum_exp_scores = np.sum(exp_scores)\n correct_class_score = scores[y[i]]\n \n # Update the loss \n loss -= correct_class_score\n loss += np.log(sum_exp_scores)\n\n # Update the gradient\n dW[:,y[i]] -= X[i,:].T\n for j in xrange(num_classes):\n dW[:,j] += ((X[i,:].T * exp_scores[j]) / sum_exp_scores)\n\n \n # Right now the loss is a sum over all training examples, but we want it\n # to be an average instead so we divide by num_train.\n loss /= num_train\n\n dW /= num_train\n\n # Add regularization to the loss.\n loss += 0.5 * reg * np.sum(W * W)\n\n dW += reg*W\n \n pass\n #############################################################################\n # END OF YOUR CODE #\n #############################################################################\n\n return loss, dW", "def softmax_loss_naive(W, X, y, reg):\n # Initialize the loss and gradient to zero.\n loss = 0.0\n dW = np.zeros_like(W)\n\n #############################################################################\n # TODO: Compute the softmax loss and its gradient using explicit loops. #\n # Store the loss in loss and the gradient in dW. If you are not careful #\n # here, it is easy to run into numeric instability. Don't forget the #\n # regularization! #\n #############################################################################\n # *****START OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****\n num_train = X.shape[0]\n num_class = W.shape[1]\n l = np.zeros([num_train,1])\n for i in range(num_train):\n scores = np.dot(X[i], W)\n f_yi = scores[y[i]]\n exp_num = np.exp(f_yi)\n exp = np.exp(scores)\n exp_deno = np.sum(exp)\n for j in range(num_class):\n if (j == y[i]):\n dW[:,j] -= X[i,:].transpose()\n dW[:,j] += (np.exp(scores[j]) / exp_deno) * X[i,:].transpose()\n l[i] = -np.log(exp_num/exp_deno)\n\n loss = np.sum(l)/num_train\n loss += reg * np.sum(W*W)\n dW /= num_train \n dW += 2 * reg * W\n # *****END OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****\n\n return loss, dW", "def softmax_loss(x, y):\n # softmax\n num = np.exp(x)\n den = np.sum(num, axis=1)\n softmax = num/den[:, None]\n N = x.shape[0]\n\n # compute the los per class\n loss = softmax[np.arange(N), y]\n loss = -np.log(loss)\n\n # sum all the losses and divide by number of class\n # Also add the regularization loss term\n loss = np.sum(loss)/N \n \n dscores = softmax\n dscores[np.arange(N), y] -= 1\n dscores /= N\n\n return loss, dscores", "def softmax_loss_naive(W, X, y, reg):\n # Initialize the loss and gradient to zero.\n loss = 0.0\n dW = np.zeros_like(W)\n\n #############################################################################\n # TODO: Compute the softmax loss and its gradient using explicit loops. #\n # Store the loss in loss and the gradient in dW. If you are not careful #\n # here, it is easy to run into numeric instability. Don't forget the #\n # regularization! #\n #############################################################################\n num_train = X.shape[0]\n num_classes = W.shape[1]\n for i in xrange(num_train):\n scores = X[i, :].dot(W)\n scores -= np.max(scores)\n correct_scores = scores[y[i]]\n score_sum = np.sum(np.exp(scores))\n h = np.exp(correct_scores) / score_sum\n loss += -np.log(h)\n for j in xrange(num_classes):\n if j == y[i]:\n dW[:, y[i]] += (np.exp(scores[j]) / score_sum - 1) * X[i, :]\n else:\n dW[:, j] += (np.exp(scores[j]) / score_sum) * X[i, :]\n \n \n loss /= num_train + ( reg * np.sum(W * W))\n dW /= num_train\n\n #############################################################################\n # END OF YOUR CODE #\n #############################################################################\n\n return loss, dW", "def softmaxCostAndGradient(predicted, target, outputVectors, dataset):\n\n ### YOUR CODE HERE\n y_hat = softmax(np.dot(outputVectors,predicted))\n y = np.zeros(outputVectors.shape[0])\n y[target] = 1.0\n\n cost = -np.log(y_hat[target])\n gradPred = np.dot(outputVectors.T,y_hat - y)\n grad = np.outer(y_hat - y,predicted)\n ### END YOUR CODE\n\n return cost, gradPred, grad", "def softmax_loss_naive(W, X, y, reg):\n # Initialize the loss and gradient to zero.\n loss = 0.0\n dW = np.zeros_like(W)\n num_train = X.shape[0]\n num_classes = W.shape[1]\n #############################################################################\n # TODO: Compute the softmax loss and its gradient using explicit loops. #\n # Store the loss in loss and the gradient in dW. If you are not careful #\n # here, it is easy to run into numeric instability. Don't forget the #\n # regularization! #\n #############################################################################\n scores = np.dot(X,W)\n scores = (scores.T - np.max(scores,1)).T\n for i in xrange(num_train):\n nominator = np.exp(scores[i,:])\n denominator = np.sum(np.exp(scores[i,:]))\n loss -= np.log(nominator[y[i]]/denominator)\n for j in xrange(num_classes):\n dW[:,j] += (nominator[j]/denominator)*X[i,:]\n dW[:,y[i]] -= X[i,:]\n\n loss /= num_train\n dW /= num_train\n loss += 0.5*reg*np.sum(W*W)\n dW += reg*W\n\n #############################################################################\n # END OF YOUR CODE #\n #############################################################################\n\n return loss, dW", "def softmax_loss(x, y):\n def softmax(x):\n exps = np.exp(x)\n return exps / np.sum(exps, axis=1)[:,None]\n\n N = y.shape[0]\n p = softmax(x)\n log_likelihood = -np.log(p[range(N),y])\n loss = np.sum(log_likelihood) / N\n\n dx = p.copy()\n dx[range(N),y] -= 1\n dx = dx/N\n\n return loss, dx", "def softmax_classifier(W, input, label, lamda):\n\n ############################################################################\n # TODO: Put your code here\n\n loss = 0.0\n num_train = input.shape[0]\n num_classes = W.shape[1]\n\n score = np.dot(input, W) # (N,C)\n prediction = np.argmax(score, axis=1)\n score -= np.max(score, axis=1, keepdims=True)\n\n # # cross entropy loss\n # # take exponent of the score and normalized with sum of all exponents.\n probs = np.exp(score) # (N,C)\n e_y = np.sum(np.multiply(probs,label), axis=1) # (N,) probability for correct class\n e_sum = np.sum(probs, axis=1) # (N,) sum of probability over all classes\n\n # implementation of loss equivalent l_i = -f_y_i + log sum_j(e^(f_j))\n # loss = np.sum(-np.log(e_y/e_sum)) # sum of -log across all samples.\n # loss /= num_train # average loss\n loss = np.sum(-1 * e_y) + np.sum(np.log(e_sum))\n loss /= num_train\n\n loss += lamda * np.sum(W * W) # regularization \n\n # Gradient\n delta_score = probs / e_sum.reshape(num_train,1) # (N,C)\n delta_score -= label # (NxC)\n gradient = np.dot(input.T, delta_score)\n gradient /= num_train\n gradient += lamda * 2 * W\n\n ############################################################################\n\n return loss, gradient, prediction", "def softmax_loss(x, y):\n N, C = x.shape\n loss, dx = 0, np.zeros(x.shape) \n for i in range(N):\n loss += -np.log(np.exp(x[i,y[i]])/np.sum(np.exp(x[i,:])))\n dx[i,:] = np.exp(x[i,:])/np.sum(np.exp(x[i,:]))\n dx[i,y[i]] += (-1)\n \n loss /= N\n dx /= N\n return loss, dx", "def softmax_loss(x, y):\n ############################################################################\n # TODO: You can use the previous softmax loss function here. # \n # Hint: Be careful on overflow problem #\n ############################################################################\n ############################################################################\n # START OF YOUR CODE #\n ############################################################################\n N = len(x)\n # We want to get the real y\n log_C = -np.max(x,axis=1,keepdims = True)\n # Get numerator\n e_all = np.exp(x+log_C)\n # Get the final prob\n prob = e_all/e_all.sum(axis=1,keepdims=True)\n # Find final loss\n loss = np.sum(-np.log(prob)[np.arange(N),y])/N\n # Get dx\n dx = prob\n dx[np.arange(N),y] -= 1\n dx /= N\n \n ############################################################################\n # END OF YOUR CODE #\n ############################################################################\n return loss, dx", "def softmax_loss_naive(W, X, y, reg):\n # Initialize the loss and gradient to zero.\n loss = 0.0\n dW = np.zeros_like(W)\n\n #############################################################################\n # TODO: Compute the softmax loss and its gradient using explicit loops. #\n # Store the loss in loss and the gradient in dW. If you are not careful #\n # here, it is easy to run into numeric instability. Don't forget the #\n # regularization! #\n #############################################################################\n (num_class, D), (D, num_train) = (W.shape, X.shape)\n class_scores = np.dot(W, X)\n \n # Subtract maximum unnormalized score from each set of class scores\n for i in range(num_train):\n max_class_score = np.max(class_scores[:, i])\n for j in range(num_class):\n class_scores[j, i] -= max_class_score\n \n # Compute softmax and update gradient\n for i in range(num_train):\n normalization_term = sum(np.exp(class_score) for class_score in class_scores[:, i])\n for j in range(num_class):\n class_scores[j, i] = np.exp(class_scores[j, i]) / normalization_term\n # Thanks again to MyHumbleSelf for making me examine this further and discover a bug in my derivation of the softmax gradient!\n dW[j] += (class_scores[j, i] - (j==y[i])) * X[:, i]\n \n # Compute cross-entropy errors and total loss from that\n losses = [np.log(class_scores[y[i], i]) for i in range(num_train)]\n loss = -sum(losses) / num_train\n\n # Add regularization to loss and normalize dW\n loss += 0.5 * reg * np.sum(W * W)\n dW /= num_train\n dW += reg * W\n #############################################################################\n # END OF YOUR CODE #\n #############################################################################\n\n return loss, dW", "def softmax_loss(x, y):\n probs = np.exp(x - np.max(x, axis=1, keepdims=True))\n probs /= np.sum(probs, axis=1, keepdims=True)\n N = x.shape[0]\n loss = -np.sum(np.log(probs[np.arange(N), y])) / N\n dx = probs.copy()\n dx[np.arange(N), y] -= 1\n dx /= N\n return loss, dx", "def softmax_loss_naive(W, X, y, reg):\n # Initialize the loss and gradient to zero.\n loss = 0.0\n dW = np.zeros_like(W)\n\n #############################################################################\n # TODO: Compute the softmax loss and its gradient using explicit loops. #\n # Store the loss in loss and the gradient in dW. If you are not careful #\n # here, it is easy to run into numeric instability. Don't forget the #\n # regularization! #\n #############################################################################\n num_cases = X.shape[0]\n num_class = W.shape[1]\n y_label = np.zeros((num_cases,num_class))\n for i in range(num_cases):\n h1 = np.exp(X[i].dot(W))\n h = h1/np.sum(h1)\n y_label[i] = (np.arange(h.shape[0]) == y[i]) + 0\n loss -= (np.sum(y_label[i] * np.log(h) + (1 - y_label[i]) * np.log(1 - h)))\n delta = np.zeros(W.shape)\n for j in range(num_class):\n delta[:,j] += X[i]\n delta[:,j] *= h1[j]\n delta[:,j] *= (np.sum(h1) - h1[j])/(np.sum(h1) ** 2)\n delta[:,j] = y_label[i][j] / h[j] * delta[:,j] - (1 - y_label[i][j]) / (1 - h[j]) * delta[:,j]\n dW -= delta\n loss /= num_cases\n loss += reg * np.sum(W * W)\n dW /= num_cases\n dW += 2 * reg * W\n #############################################################################\n # END OF YOUR CODE #\n #############################################################################\n\n return loss, dW", "def loss(self, X, y):\n\n # Initialize the loss to zero.\n loss = 0.0\n num_classes = self.W.shape[0] # C = num_classes\n num_train = X.shape[0]\n \n exp_a = np.zeros((num_classes,num_train))\n # ================================================================ #\n # YOUR CODE HERE:\n # Calculate the normalized softmax loss. Store it as the variable loss.\n # (That is, calculate the sum of the losses of all the training \n # set margins, and then normalize the loss by the number of \n # training examples.)\n # ================================================================ #\n \n \n for i in np.arange(num_train):\n \n Loss = 0.0\n\n class_scores = np.dot(self.W,X[i,:].T) # calculating class scores (C x 1 vector)\n class_scores -= np.max(class_scores) # considering the possible issue for numerical instability and account for it\n\n exp_a[:,i] = np.exp(class_scores) # turning class scores to probabilities (C x 1 vector), without normalization\n\n Loss -= np.log(exp_a[y[i],i]/np.sum(exp_a[:,i]))\n \n\n #p[:,i] = exp_a[:,i]/np.sum(exp_a[:,i]) # p now is a valid probability matrix\n #print(p[:,i])\n\n loss += Loss \n #print(Loss,i) \n \n pass\n loss /= num_train\n # ================================================================ #\n # END YOUR CODE HERE\n # ================================================================ #\n\n return loss", "def softmaxCostAndGradient(predicted, target, outputVectors, dataset):\n\n ### YOUR CODE HERE\n scores = outputVectors.dot(predicted.T) # shape = (V, 1)\n y_hat = softmax(scores)\n cost = -scores[target] + np.log(np.sum(np.exp(scores)))\n one_hot_target = np.zeros_like(y_hat)\n one_hot_target[target] = 1\n grad = np.outer((y_hat - one_hot_target), predicted)\n gradPred = outputVectors.T.dot(y_hat - one_hot_target)\n \n '''\n final_predicted = predicted.dot(outputVectors.T)\n probability = softmax(final_predicted)\n cost = -np.log(probability[target])\n \n one_hot_target = np.zeros_like(probability)\n one_hot_target[target] += 1\n dlogits = probability - one_hot_target\n grad = np.outer(predicted, dlogits).T\n gradPred = outputVectors.T.dot(dlogits)\n '''\n ### END YOUR CODE\n\n return cost, gradPred, grad", "def softmax_loss_naive(W, X, y, reg):\n # Initialize the loss and gradient to zero.\n loss = 0.0\n dW = np.zeros_like(W)\n\n #############################################################################\n # TODO: Compute the softmax loss and its gradient using explicit loops. #\n # Store the loss in loss and the gradient in dW. If you are not careful #\n # here, it is easy to run into numeric instability. Don't forget the #\n # regularization! #\n #############################################################################\n for i in range(X.shape[0]):\n scores = X[i].dot(W)\n \n idx_max = np.argmax(scores)\n s_max = scores[idx_max]\n scores -= s_max # shift for numerical stability\n \n temp = np.exp(scores)\n summation = np.sum(temp)\n loss += (- scores[y[i]] + np.log(summation))\n \n # computing gradients\n # (1) an explicit version:\n# for j in range(W.shape[1]):\n# if j == y[i]:\n# dW[:, j] -= X[i]\n# dW[:, idx_max] -= (-X[i])\n \n# dW[:, j] += (1 / summation) * temp[j] * X[i]\n# dW[:, idx_max] += (1 / summation) * temp[j] * (-X[i])\n# elif j == idx_max:\n# dW[:, j] += 0 # X[i] + (-X[i]) = 0\n# else:\n# dW[:, j] += (1 / summation) * temp[j] * X[i]\n# dW[:, idx_max] += (1 / summation) * temp[j] * (-X[i])\n \n # (2) a more concise version:\n softmax_scores = temp / summation\n for j in range(W.shape[1]):\n if j == y[i]:\n dW[:, j] += (-1 + softmax_scores[j]) * X[i]\n else:\n dW[:, j] += softmax_scores[j] * X[i]\n \n loss /= X.shape[0]\n dW /= X.shape[0]\n \n loss += reg * np.sum(W * W)\n dW += 2 * reg * W\n\n #############################################################################\n # END OF YOUR CODE #\n #############################################################################\n\n return loss, dW", "def loss(params: hk.Params, batch, label) -> jnp.ndarray:\r\n logits = net.apply(params, batch)\r\n labels = jax.nn.one_hot(label, n_classes)\r\n\r\n # Cross Entropy Loss\r\n softmax_xent = -jnp.sum(labels * jax.nn.log_softmax(logits))\r\n softmax_xent /= labels.shape[0]\r\n return softmax_xent", "def softmax_loss(x, y):\n\n eps = 1e-5\n \n N,C = x.shape\n p = softmax(x)\n llikelihood = -np.log(p[range(N),y] + eps)\n# print(llikelihood)\n loss = np.sum(llikelihood) / N\n\n dx = p\n dx[range(N),y] -= 1\n dx = dx/N\n \n return loss, dx", "def apply_network(inputs):\n return apply_layer(tf.sigmoid(apply_layer(inputs, 64)), 1)", "def softmax_loss1(x, y):\n # tmp = np.max(x, axis=1, keepdims=True)\n shifted_logits = x - np.max(x, axis=1, keepdims=True)\n Z = np.sum(np.exp(shifted_logits), axis=1, keepdims=True)\n log_probs = shifted_logits - np.log(Z)\n probs = np.exp(log_probs)\n N = x.shape[0]\n # tmp2 = np.arange(N)\n tmp3 = log_probs[np.arange(N), y]\n # tmp4 = log_probs[[0,1,2],[2,5,0]]\n loss = -np.sum(log_probs[np.arange(N), y]) / N\n dx = probs.copy()\n dx[np.arange(N), y] -= 1\n dx /= N\n return loss, dx", "def _create_loss(self):\n with tf.device('/cpu:0'):\n with tf.name_scope('loss'):\n self.loss = tf.reduce_mean(\n tf.nn.softmax_cross_entropy_with_logits(\n labels=self.labels_placeholder, \n logits=self.logits, name='loss'))", "def loss_fn(self, targets, outputs, model):", "def loss_fn(y_true,y_pred): \n loss = tf.nn.softmax_cross_entropy_with_logits_v2(y_true,\n y_pred,\n axis=-1,\n )\n loss = tf.reduce_mean(loss,name=\"loss\")\n return loss", "def softmax_loss(x, y):\n shifted_logits = x - np.max(x, axis=1, keepdims=True)\n Z = np.sum(np.exp(shifted_logits), axis=1, keepdims=True)\n log_probs = shifted_logits - np.log(Z)\n probs = np.exp(log_probs)\n N = x.shape[0]\n loss = -np.sum(log_probs[np.arange(N), y]) / N\n dx = probs.copy()\n dx[np.arange(N), y] -= 1\n dx /= N\n return loss, dx", "def loss(output, y):\n #Computes softmax cross entropy between logits and labels.\n xentropy = tf.nn.softmax_cross_entropy_with_logits(logits=output, labels=y)\n loss = tf.reduce_mean(xentropy)\n\n return loss", "def make_loss(self, logit=None, labels=None):\r\n return nn.functional.mse_loss(logit, labels, reduction='mean') # The MSE Loss\r", "def my_loss(y_pred,y_true,n_outputs):\n y_true = tf.one_hot(tf.cast(y_true,tf.int64), n_outputs, dtype=tf.float32)\n return tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(y_true,y_pred))", "def loss_fn(gr_truth, pred):\n return 100 * dice_loss(pred, gr_truth) + softmax_weighted_loss(pred, gr_truth)", "def temporal_softmax_loss(x, y, mask, verbose=False):\n\n N, T, V = x.shape\n\n x_flat = x.reshape(N * T, V)\n y_flat = y.reshape(N * T)\n mask_flat = mask.reshape(N * T)\n\n # dividing by max doesn't hurt.. rather it makes the operand with in the exponential\n # more well behaved -- no very huge numbers.. Low numbers will be in -... which will also\n # become more well behaved.. low in magnitude say close to 0 will loose precision I guess or\n # say 0.000001 and 0..000005 after divisionn by a huge nummber willl be almost 0.\n # in any case, since the numerator and denominator will be divided by the same number\n # it doesn't make a diffference.. so softmax is np.exp(p)/sum(np.exp(p))\n # np.exp(p-p_max)/sum(np.exp(p-p_max)) is the same thing -- but perhaps numerical\n probs = np.exp(x_flat - np.max(x_flat, axis=1, keepdims=True))\n probs /= np.sum(probs, axis=1, keepdims=True)\n loss = -np.sum(mask_flat * np.log(probs[np.arange(N * T), y_flat])) / N\n\n # this gives the derivative\n dx_flat = probs.copy()\n dx_flat[np.arange(N * T), y_flat] -= 1\n dx_flat /= N\n dx_flat *= mask_flat[:, None]\n\n if verbose: print('dx_flat: ', dx_flat.shape)\n\n dx = dx_flat.reshape(N, T, V)\n\n return loss, dx", "def softmax(y):\n# y = y.squeeze()\n epsilon = 0.001\n y = y.detach().numpy()\n y[y > 400] = 400 # For stability to prevent overflow\n denominator = epsilon + sum(np.exp(y)) # Further stability to prevent overflow\n numerator = np.exp(y)\n softmax = numerator / denominator\n return torch.Tensor(softmax)", "def softmax_loss_naive(W, X, y, reg):\n # Initialize the loss and gradient to zero.\n loss = 0.0\n dW = np.zeros_like(W)\n\n #############################################################################\n # TODO: Compute the softmax loss and its gradient using explicit loops. #\n # Store the loss in loss and the gradient in dW. If you are not careful #\n # here, it is easy to run into numeric instability. Don't forget the #\n # regularization! #\n #############################################################################\n num_train = X.shape[0]\n num_classes = W.shape[1]\n\n # Calculate loss for each example\n f = np.zeros((num_train, num_classes))\n f_max = np.zeros((num_train, 1))\n for i in xrange(num_train):\n for j in xrange(num_classes):\n f[i, j] = np.dot(X[i, :], W[:, j])\n if f[i, j] > f_max[i]:\n f_max[i] = f[i, j]\n\n exp_f = np.zeros_like(f)\n sum_exp_f = np.zeros((num_train, 1))\n for i in xrange(num_train):\n for j in xrange(num_classes):\n f[i, j] -= f_max[i]\n exp_f[i, j] = math.exp(f[i, j])\n sum_exp_f[i] += exp_f[i, j]\n\n for i in xrange(num_train):\n loss += -math.log(exp_f[i, y[i]] / sum_exp_f[i])\n\n loss /= num_train\n\n # Calculate regularization term\n reg_term = 0.0\n for i in xrange(W.shape[0]):\n for j in xrange(W.shape[1]):\n reg_term += W[i, j]**2\n\n loss += reg * reg_term\n\n # Calculate gradient\n P = np.zeros((num_train, num_classes))\n for i in xrange(num_train):\n for j in xrange(num_classes):\n P[i, j] = exp_f[i, j] / sum_exp_f[i]\n P[i, y[i]] -= 1\n\n for i in xrange(dW.shape[0]):\n for j in xrange(dW.shape[1]):\n dW[i, j] = 1 / num_train * np.dot(X[:, i].T, P[:, j])\n \n #############################################################################\n # END OF YOUR CODE #\n #############################################################################\n\n return loss, dW", "def softmax_loss_vectorized(W, X, y, reg):\n\n num_train = X.shape[1]\n\n # calculate the scores for the current training example with the current weights\n scores = W.dot(X)\n # scale by the max for numerical stability\n scores -= np.max(scores, axis = 0)\n # calculate the loss\n loss = np.sum(-scores[y, range(num_train)] + np.log(np.sum(np.exp(scores), axis = 0)))\n\n ## L' = -1_y + 1/(\\sum_{}^{} e^f) * e^f\n # e^f\n scores = np.exp(scores)\n # 1/(\\sum_{}^{} e^f)\n scores /= np.sum(scores,axis = 0)\n # -1_y\n scores[y, range(num_train)] -= 1\n # now we scale it by the data\n dW = scores.dot(X.T)\n\n # get the average loss\n loss /= num_train\n # get the average gradient\n dW /= num_train\n\n # regularize the loss function\n loss += 0.5 * reg * np.sum(W * W)\n\n return loss, dW", "def softmax_loss_vectorized(W, X, y, reg):\n # Initialize the loss and gradient to zero.\n loss = 0.0\n dW = np.zeros_like(W)\n num_train = X.shape[0]\n dim = dW.shape[0]\n num_classe = W.shape[1]\n\n #############################################################################\n # TODO: Compute the softmax loss and its gradient using no explicit loops. #\n # Store the loss in loss and the gradient in dW. If you are not careful #\n # here, it is easy to run into numeric instability. Don't forget the #\n # regularization! #\n #############################################################################\n S = X.dot(W)\n # ajouter le - max a la fin\n indexes=np.arange(num_train)\n #c = correct class score\n c = S[indexes, y]\n\n e_syi = np.exp(c)\n e_sj = np.sum(np.exp(S), axis=1)\n Li = - np.log(e_syi/e_sj)\n loss = np.sum(Li) / num_train + reg * np.sum(W * W)\n\n\n M = np.exp(S)/(np.repeat(e_sj, num_classe).reshape(num_train, num_classe)) #(500,10)\n M[indexes, y] -= 1 #bonnes classes\n dW = X.T.dot(M)\n\n dW = dW/num_train + 2 * reg * W\n #############################################################################\n # END OF YOUR CODE #\n #############################################################################\n\n return loss, dW", "def softmax(inputs):\n return np.exp(inputs) / float(sum(np.exp(inputs)))", "def softmax(inputs):\n return np.exp(inputs) / float(sum(np.exp(inputs)))", "def _softmax_loss(self, y_true, y_predicted):\n y_predicted = tf.maximum(tf.minimum(y_predicted, 1- 1e-15), 1e-15)\n softmax_loss = -tf.reduce_sum(y_true * tf.log(y_predicted),\n reduction_indices=-1)\n return softmax_loss", "def heteroscedastic_loss(network, params, x):\n\n pred_mean, pred_var = network(x)\n logvar = tf.reduce_sum(0.5 * tf.math.log(pred_var), axis=-1)\n squared_error = tf.reduce_sum(0.5 * tf.math.square(params - pred_mean) / pred_var, axis=-1)\n loss = tf.reduce_mean(squared_error + logvar)\n return loss", "def loss_and_grad(self, X, y):\n\n # Initialize the loss and gradient to zero.\n loss = 0.0\n grad = np.zeros_like(self.W)\n grad_tmp = np.zeros_like(self.W)\n num_classes = self.W.shape[0] # C = num_classes\n num_train = X.shape[0]\n \n # ================================================================ #\n # YOUR CODE HERE:\n # Calculate the softmax loss and the gradient. Store the gradient\n # as the variable grad.\n # ================================================================ #\n \n exp_a = np.zeros((num_classes,num_train))\n for i in np.arange(num_train):\n \n Loss = 0.0\n\n class_scores = np.dot(self.W,X[i,:].T) # calculating class scores (C x 1 vector)\n class_scores -= np.max(class_scores) # considering the possible issue for numerical instability and account for it\n\n exp_a[:,i] = np.exp(class_scores) # turning class scores to probabilities (C x 1 vector), without normalization\n\n Loss -= np.log(exp_a[y[i],i]/np.sum(exp_a[:,i]))\n \n \n #if i==0:\n grada = np.zeros(X.shape[1])\n \n for j in range(num_classes):\n if j != y[i]:\n grad_tmp[j,:] = X[i,:].T * (exp_a[j,i] / np.sum(exp_a[:,i])) \n else: \n grad_tmp[j,:] = X[i,:].T * (exp_a[j,i] / np.sum(exp_a[:,i])) - X[i,:].T \n\n grad += grad_tmp\n loss += Loss \n \n pass\n\n\n loss /= num_train\n grad /= num_train\n # ================================================================ #\n # END YOUR CODE HERE\n # ================================================================ #\n\n return loss, grad", "def softmax(x):\r\n e_x = np.exp(x - np.expand_dims(np.max(x, axis=-1), axis=-1))\r\n return e_x / np.expand_dims(e_x.sum(axis=-1), axis=-1) # only difference\r", "def loss(self, X, y=None):\n W1, b1 = self.params['W1'], self.params['b1']\n W2, b2 = self.params['W2'], self.params['b2']\n W3, b3 = self.params['W3'], self.params['b3']\n\n # conv - relu - 2x2 max pool - affine - relu - affine - softmax\n\n\n # pass conv_param to the forward pass for the convolutional layer\n # Padding and stride chosen to preserve the input spatial size\n filter_size = W1.shape[2]\n conv_param = {'stride': 1, 'pad': (filter_size - 1) // 2}\n\n # pass pool_param to the forward pass for the max-pooling layer\n pool_param = {'pool_height': 2, 'pool_width': 2, 'stride': 2}\n\n\n h1, c1 = conv_forward_im2col(X, W1, b1, conv_param) #\n h1, r1 = relu_forward(h1)\n h1, p1 = max_pool_forward_fast(h1, pool_param) #\n max_pool_shape = h1.shape\n h1 = h1.reshape(X.shape[0], -1)\n h2, c2 = affine_relu_forward(h1, W2, b2)\n scores, c3 = affine_forward(h2, W3, b3)\n\n if y is None:\n return scores\n\n loss, dx = softmax_loss(scores, y)\n\n loss += self.reg / 2 * (self.params['W1']**2).sum()\n loss += self.reg / 2 * (self.params['W2']**2).sum()\n loss += self.reg / 2 * (self.params['W3']**2).sum()\n\n ############################################################################\n # TODO: Implement the backward pass for the three-layer convolutional net, #\n # storing the loss and gradients in the loss and grads variables. Compute #\n # data loss using softmax, and make sure that grads[k] holds the gradients #\n # for self.params[k]. Don't forget to add L2 regularization! #\n # #\n # NOTE: To ensure that your implementation matches ours and you pass the #\n # automated tests, make sure that your L2 regularization includes a factor #\n # of 0.5 to simplify the expression for the gradient. #\n ############################################################################\n \n grads = {}\n dx, grads['W3'], grads['b3'] = affine_backward(dx, c3)\n grads['W3'] += self.reg * self.params['W3']\n dx, grads['W2'], grads['b2'] = affine_relu_backward(dx, c2)\n dx = dx.reshape(max_pool_shape)\n dx = max_pool_backward_fast(dx, p1)\n dx = relu_backward(dx, r1)\n dx, grads['W1'], grads['b1'] = conv_backward_im2col(dx, c1)\n\n ############################################################################\n # END OF YOUR CODE #\n ############################################################################\n\n return loss, grads", "def softmax_dice_loss(y, t, normalize=True, class_weight=None,\n ignore_label=-1, reduce='mean', eps=1e-08):\n return 1.0 - softmax_dice(y, t, normalize, class_weight,\n ignore_label, reduce, eps)", "def _create_softmax(cls, onnx_node, inputs, opset_version):\n factor = onnx_node.getattr('axis', 1)\n if factor < 0:\n # in order to support the negative axis\n factor = len(inputs[0].shape) + factor\n _, forward = cls._common_onnx_node_to_singa_op(onnx_node, inputs,\n opset_version)\n return None, forward(axis=factor)", "def loss_fn(params):\n logits = models.ProgramTransformer(config).apply(\n {'params': params},\n inputs,\n outputs,\n programs,\n rngs={'dropout': train_rng})\n loss, weight_sum = compute_weighted_cross_entropy(logits, programs, weights)\n mean_loss = loss / weight_sum\n return mean_loss, logits", "def _define_loss(self):\n\n cost = []\n unit_cost = []\n for nn in range(len(self.ffnet_out)):\n data_out = self.data_out_batch[nn]\n if self.filter_data:\n # this will zero out predictions where there is no data,\n # matching Robs here\n pred = tf.multiply(\n self.networks[self.ffnet_out[nn]].layers[-1].outputs,\n self.data_filter_batch[nn])\n else:\n pred = self.networks[self.ffnet_out[nn]].layers[-1].outputs\n\n nt = tf.cast(tf.shape(pred)[0], tf.float32)\n # define cost function\n if self.noise_dist == 'gaussian':\n with tf.name_scope('gaussian_loss'):\n cost.append(tf.nn.l2_loss(data_out - pred) / nt)\n unit_cost.append(tf.reduce_mean(tf.square(data_out-pred), axis=0))\n\n elif self.noise_dist == 'poisson':\n with tf.name_scope('poisson_loss'):\n\n if self.poisson_unit_norm is not None:\n # normalize based on rate * time (number of spikes)\n cost_norm = tf.multiply(self.poisson_unit_norm[nn], nt)\n else:\n cost_norm = nt\n\n cost.append(-tf.reduce_sum(tf.divide(\n tf.multiply(data_out, tf.log(self._log_min + pred)) - pred,\n cost_norm)))\n\n unit_cost.append(-tf.divide(\n tf.reduce_sum(\n tf.multiply(\n data_out, tf.log(self._log_min + pred)) - pred, axis=0),\n cost_norm))\n\n elif self.noise_dist == 'bernoulli':\n with tf.name_scope('bernoulli_loss'):\n # Check per-cell normalization with cross-entropy\n # cost_norm = tf.maximum(\n # tf.reduce_sum(data_out, axis=0), 1)\n cost.append(tf.reduce_mean(\n tf.nn.sigmoid_cross_entropy_with_logits(\n labels=data_out, logits=pred)))\n unit_cost.append(tf.reduce_mean(\n tf.nn.sigmoid_cross_entropy_with_logits(\n labels=data_out, logits=pred), axis=0))\n else:\n TypeError('Cost function not supported.')\n\n self.cost = tf.add_n(cost)\n self.unit_cost = unit_cost\n\n # Add regularization penalties\n reg_costs = []\n with tf.name_scope('regularization'):\n for nn in range(self.num_networks):\n reg_costs.append(self.networks[nn].define_regularization_loss())\n self.cost_reg = tf.add_n(reg_costs)\n\n self.cost_penalized = tf.add(self.cost, self.cost_reg)\n\n # save summary of cost\n # with tf.variable_scope('summaries'):\n tf.summary.scalar('cost', self.cost)\n tf.summary.scalar('cost_penalized', self.cost_penalized)\n tf.summary.scalar('reg_pen', self.cost_reg)", "def _softmax(x):\n e = K.exp(x - K.max(x, axis=-1, keepdims=True))\n s = K.sum(e, axis=-1, keepdims=True)\n return e / s", "def loss_grad_softmax_vectorized(W, X, y):\n loss = 0 \n grad = np.zeros_like(W)\n dim, num_train = X.shape\n\n scores = W.dot(X) # [K, N]\n # Shift scores so that the highest value is 0\n scores -= np.max(scores)\n scores_exp = np.exp(scores)\n correct_scores_exp = scores_exp[y, range(num_train)] # [N, ]\n scores_exp_sum = np.sum(scores_exp, axis=0) # [N, ]\n loss = -np.sum(np.log(correct_scores_exp / scores_exp_sum))\n loss /= num_train\n #loss += 0.5 * reg * np.sum(W * W)\n\n scores_exp_normalized = scores_exp / scores_exp_sum\n # deal with the correct class\n scores_exp_normalized[y, range(num_train)] -= 1 # [K, N]\n grad = scores_exp_normalized.dot(X.T)\n grad /= num_train\n grad += W\n\n return grad", "def loss_fn(model):\n with flax.nn.stateful(state) as new_state:\n with flax.nn.stochastic(prng_key):\n logits = model(batch['image'])\n loss = cross_entropy_loss(logits, batch['label'])\n # TODO(britefury): check if applying L2 regularization to weights but\n # *not* biases improves results\n weight_penalty_params = jax.tree_leaves(model.params)\n weight_l2 = sum([jnp.sum(x ** 2)\n for x in weight_penalty_params\n if x.ndim > 1])\n weight_penalty = l2_reg * 0.5 * weight_l2\n loss = loss + weight_penalty\n return loss, (new_state, logits)", "def __removeSoftMax(self,layer):\n newLayer = layer.__class__.from_config(layer.get_config())\n if hasattr(newLayer,\"activation\") and newLayer.activation == tf.keras.activations.softmax:\n newLayer.activation = tf.keras.activations.linear #No computa nada, deja pasar los valores --> f(x) = x\n return newLayer", "def softmax_loss_vectorized(W, X, y, reg):\n\n #############################################################################\n # TODO: Compute the softmax.ipynb loss and its gradient using no explicit loops. #\n # Store the loss in loss and the gradient in dW. If you are not careful #\n # here, it is easy to run into numeric instability. Don't forget the #\n # regularization! #\n #############################################################################\n\n # Initialize the loss and gradient to zero.\n loss = 0.0\n dW = np.zeros_like(W)\n train_images = X.shape[0]\n # Store all the scores in a matrix\n all_scores = np.dot(X,W)\n #First, calculate the normalizing constant for numeric stability\n constant = np.max(all_scores,axis=1)\n normalized_scores = np.transpose(np.subtract(np.transpose(all_scores),constant))\n\n #Then, calculate softmax for the correct scores\n exp_scores = np.exp(all_scores)\n # First, keep track of the sum of values per row\n exp_sum = np.sum(exp_scores,axis=1)\n\n # Finally, calculate the softmax score for every entry\n softmax_scores = np.transpose(exp_scores)/exp_sum # useful when computing gradient\n softmax_scores = np.transpose(softmax_scores)\n # And then, compute the loss\n loss_score = softmax_scores[range(train_images),y]\n loss_score = -1 * np.log(loss_score) #taking the logarithm\n loss += np.sum(loss_score)\n\n #Normalize and regularize the loss\n loss /= train_images\n loss += 0.5*reg*np.sum(W*W)\n\n #Finally, calculate a vectorized gradient\n\n # Calculate the derivative at the correct label\n softmax_scores[range(train_images),y] -= 1\n # Then, make a matrix containing all the gradient values\n gradient_values = np.dot(np.transpose(X),softmax_scores)\n gradient_values = gradient_values\n\n #FINALLY, update the gradient\n dW+= gradient_values\n #And normalize and regularize it\n dW /= train_images\n dW += reg*W\n\n #############################################################################\n # END OF YOUR CODE #\n #############################################################################\n\n return loss, dW", "def forwardPropagation(self, inputs, label):\n node_hidden = np.dot(inputs, self.input_W)\n node_hidden = np.add(node_hidden, self.input_B)\n node_hidden = np.maximum(0, node_hidden)\n node_output = np.dot(node_hidden, self.hidden_W)\n node_output = np.add(node_output, self.hidden_B)\n #print(node_output)\n exp_node_output = np.exp(node_output)\n node_output = exp_node_output / np.sum(exp_node_output, axis=1, keepdims=True)\n #print(node_output)\n #node_output = self.softmax(node_output)\n loss = np.sum(-np.log(node_output[range(inputs.shape[0]),label]))/(inputs.shape[0])+0.5 * self.regularizer*np.sum(self.input_W *self.input_W)+0.5 * self.regularizer*np.sum(self.hidden_W *self.hidden_W)\n \"\"\"Loss= Input data loss + Loss correction by penalizing the loss, here we use 0.2 as an experimental value\"\"\"\n #loss = np.sum(-np.log(node_output[range(inputs.shape[0]), label])) / (inputs.shape[0]) + 0.2 * self.regularizer * np.sum(self.input_W ^ 2) + 0.2 * self.regularizer * np.sum(self.hidden_W ^ 2)\n return loss, node_hidden, node_output", "def compute_loss(self, x, y):\n\n self.batch_size = x.shape[0]\n self.x = x\n self.y = y\n self.soft = self.softmax(x) + 10**(-11)\n out = np.zeros(self.batch_size)\n for i in range(self.batch_size):\n out[i] = -(y[i] @ np.log(self.soft[i]))\n\n return out", "def setup_loss(logits, labels):\n predictions = tf.nn.softmax(logits)\n cost = tf.losses.softmax_cross_entropy(onehot_labels=labels,\n logits=logits,\n )\n return predictions, cost", "def softmax_layer(avg_per_word, U, b, y_mask, maxw, training=False):\n #avg_per_word = theano.printing.Print(\"avg_per_word\")(avg_per_word)\n if training:\n srng = RandomStreams(seed=12345)\n dropout_mask = tensor.cast(srng.binomial(size=U.shape, p=0.5), theano.config.floatX)\n #U = theano.printing.Print(\"U\", attrs=[\"shape\"])(U)\n #dropout_mask = theano.printing.Print(\"dropout_mask\", attrs=[\"shape\"])(dropout_mask)\n raw_pred, _ = theano.scan(fn=lambda p, free_variable: tensor.nnet.softmax(tensor.dot(p, tensor.mul(U, dropout_mask)) + b),\n outputs_info=None,\n sequences=[avg_per_word, tensor.arange(maxw)]\n )\n else:\n raw_pred, _ = theano.scan(fn=lambda p, free_variable: tensor.nnet.softmax(tensor.dot(p, U) + b),\n\t\t\t\t outputs_info=None,\n\t\t\t\t sequences=[avg_per_word, tensor.arange(maxw)]\n\t\t\t\t )\n return raw_pred", "def ranknet_loss(y, m_):\n conf = 1.0\n ones_ = tf.ones_like(m_, dtype=tf.float32)\n y_m_ = tf.mul(y, ones_)\n y_diff_ = tf.sub(y_m_, tf.transpose(y_m_))\n t_1_ = -tf.mul(conf*ones_, y_diff_)\n t_2_ = tf.log(ones_ + tf.exp(y_diff_))\n sum_ = tf.add(t_1_, t_2_)\n mult_sum_ = tf.mul(m_, sum_)\n loss_ = tf.reduce_sum(mult_sum_) / tf.reduce_sum(m_)\n return loss_, m_", "def convert_softmax(g, op, block):\n\n axis = op.attr(\"axis\")\n input_shape = block.var(op.input(\"X\")[0]).shape\n if axis < 0:\n axis = len(input_shape) + axis\n x = g.get_node(op.input(\"X\")[0])\n m = _op.max(x, axis, keepdims=True)\n e = _op.exp(x - m)\n out = e / _op.sum(e, axis, keepdims=True)\n g.add_node(op.output(\"Out\")[0], out)", "def softmax(x):\r\n output = np.exp(x)\r\n return output / np.sum(output, axis=1, keepdims=True)", "def softmax(x):\n return np.exp(x) / np.sum(np.exp(x), axis=1, keepdims=True)", "def grad_softmax_cross_entropy_loss(logit, labels):\n return softmax(logit) - labels", "def softmax(x):\r\n sum_c = np.sum(np.exp(x), axis=1)\r\n sum_c = np.expand_dims(sum_c, axis=1)\r\n pred_x = np.divide(np.exp(x), sum_c)\r\n return pred_x", "def softmax_loss(self, sim, ratio=1.0):\n prob = tf.nn.softmax(ratio * sim)\n hit_prob = tf.slice(prob, [0, 0], [-1, 1])\n loss = -tf.log(hit_prob)\n\n return tf.reduce_mean(loss, name='softmax_loss')", "def _create_softmax(cls, op, op_t):\n node = cls._common_singa_tensor_to_onnx_node(op, op_t)\n\n node.attribute.extend([\n helper.make_attribute('axis', op.axis),\n ])\n return node", "def softmaxCostAndGradient(predicted, target, outputVectors, dataset):\n\n ### YOUR CODE HERE\n # identify the target predicted vector and then find the dot product\n # between the vector and the output vectors\n # outputVector structured as V x D \n # v_c structured as 1xD matrix\n # we are assuming here that the output vector and the \n # predicted vector is structured so that each row represent a word / token in {1, V}\n v_c = predicted\n z_w = np.dot(outputVectors, v_c)\n # the output yhat is a 1xV matrix\n yhat = softmax(z_w)\n # create the one hot vector for the predicted word\n # calculate the difference for gradient\n ydiff = yhat.copy()\n ydiff[target] -= 1.0\n\n # find the cross-entropy cost function based on yhat\n # cost = calc_cost_from_prediction(y, yhat)\n cost = - np.log( yhat[target] )\n\n # calculate the gradient wrt to the v_c (the predicted word vector)\n # the gradient is U(yhat - y)\n # the output should be a D x 1 matrix, same as v_c\n # y is a one-hot vector that represents the actual word\n # and we multiply it by output vector, it can also be calculated\n # by using index to find the vector\n gradPred = np.dot( outputVectors.T, ydiff)\n\n\n # calculate the gradient wrt to all other word vectors\n # the gradient is v_c(yhat - y)\n # we multiple yhat by v_c to get a V x D matrix\n grad = np.outer(ydiff, v_c)\n\n ### END YOUR CODE\n return cost, gradPred, grad", "def demo_np_vs_tf_softmax():\n X = sample_images(9, 10)\n W = np.random.randn(9, 4) * 0.0001\n y = np.array([0, 1, 1, 1, 3, 2, 0, 2, 0, 3])\n\n print(\"np softmax:\\n{}\".format(\"\\n-----\\n\".join(['{}'.format(el) for el in np_softmax_loss(W, X, y, 0.00001)])))\n print(\"\\n\\ntf softmax:\\n{}\".format(\"\\n-----\\n\".join(['{}'.format(el) for el in tf_softmax_loss(W, X, y, 0.00001)])))", "def _create_loss(self):\n\n with tf.name_scope(\"loss\"):\n \n # gini=(tf.nn.l2_loss( self.score))/100000\n gini = tf.losses.softmax_cross_entropy(self.score, 0*self.score)\n \n promo_prob=tf.reduce_sum(tf.multiply(self.score, self.cohort_weight),\n axis=1)\n inc_value = tf.reduce_mean(tf.multiply(promo_prob, self.value))- self.control_value\n inc_cost = tf.reduce_mean( tf.multiply(promo_prob, self.cost)) - self.control_cost\n \n\n\n # determine loss function based on self.obj_rule\n if self.obj_rule == 'cpiv':\n self.objective = inc_cost / inc_value\n\n elif self.obj_rule == 'ivc':\n # maximize ivc\n self.objective = - inc_value / inc_cost\n\n elif self.obj_rule == 'lagrangian':\n assert self.shadow is not None, 'Need to pass in shadow value if use lagrangian as obj_rule.'\n self.objective = inc_cost - self.shadow * inc_value\n\n elif self.obj_rule == 'value':\n # maximize delta values\n self.objective = - inc_value\n\n # use only cost as objective\n elif self.obj_rule == 'cost':\n # maximize delta cost\n self.objective = - inc_cost\n\n else:\n raise Exception('Invalid obj_rule!')\n\n # regularization\n reg_loss = tf.reduce_sum(tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES))\n # weights = tf.trainable_variables() # all vars of your graph\n # reg_loss = tf.norm( weights,ord=1)\n\n # final loss\n self.loss = self.objective +reg_loss+.1*gini", "def build_nt_loss(self, n_logits, n_target):\n n_loss = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=n_logits, labels=n_target)\n n_loss = tf.reduce_mean(n_loss)\n return n_loss", "def model_loss(inp, fake, real_label, fake_label):\n \n \n Dreal,realcls,R1 = gradpen(inp)\n [Dfake,fakecls] = D(fake)\n # 1. Adversarial loss\n \n glabel = tf.ones_like(Dfake)#tf.random.uniform((Dfake.shape), 1-LN, 1)\n dlabelr = tf.ones_like(Dreal)#tf.random.uniform((Dreal.shape), 1-LN, 1)\n dlabelf = tf.zeros_like(Dfake)#tf.random.uniform((Dfake.shape), 0, LN)\n \n \n \n # D has no sigmoid activation: \"from_logits=True\"\n real_loss = tf.keras.losses.binary_crossentropy(\n dlabelr, Dreal, from_logits=True)\n real_loss = tf.reduce_mean(real_loss)\n \n fake_loss = tf.keras.losses.binary_crossentropy(\n dlabelf, Dfake, from_logits=True)\n fake_loss = tf.reduce_mean(fake_loss)\n \n Dadv = 0.5*(real_loss+fake_loss)\n \n Gadv = tf.keras.losses.binary_crossentropy(\n glabel, Dfake, from_logits=True)\n Gadv = tf.reduce_mean(Gadv)\n \n # 2. Classification loss\n \n Dcls = tf.keras.losses.binary_crossentropy(real_label, realcls, from_logits=True)\n Dcls = tf.reduce_mean(Dcls)\n \n Gcls = tf.keras.losses.binary_crossentropy(fake_label, fakecls, from_logits=True)\n Gcls = tf.reduce_mean(Gcls)\n \n # 3. Total loss\n \n Dloss = Dadv + (GAMMA/2)*R1 + LAMBDA_CLS*Dcls\n \n Gloss = Gadv + LAMBDA_CLS*Gcls\n \n return (Dloss, Dadv, Dcls, R1), (Gloss, Gadv, Gcls)", "def softmaxCostAndGradient(predicted, target, outputVectors, dataset):\n\n\t#calculate the predictions\n\t#outputVectors: U: (V,d) ->!!diff from 3(a) dimension\n\tvhat = predicted\t#center word or BOW: (d,1)\n\tz = np.dot(outputVectors, vhat)\t#(V,1)\n\tpreds = softmax(z)\t#yhat: (V,1)\n\n\t#calculate the cost \n\tcost = -np.log(preds[target])\n\n\t#gradients\n\tgrad_pred = preds \n\tgrad_pred[target] -= 1\t#yhat - y\n\n\tgrad = np.outer(grad_pred, vhat)\t#(V, d)\n\tgradPred = np.dot(outputVectors.T, grad_pred)\t#dvc\n\n\treturn cost, gradPred, grad", "def loss_function(\n self, x_p, const, target, reconstructed_original, confidence, min_, max_):\n\n ## get the output of model before softmax\n x_p.requires_grad = True\n logits = self.model.get_logits(x_p).to(self.device)\n\n ## find the largest class except the target class\n targetlabel_mask = (torch.from_numpy(onehot_like(np.zeros(self.classnum), target))).double()\n secondlargest_mask = (torch.from_numpy(np.ones(self.classnum)) - targetlabel_mask).to(self.device)\n\n secondlargest = np.argmax((logits.double() * secondlargest_mask).cpu().detach().numpy(), axis = 1)\n\n is_adv_loss = logits[0][secondlargest] - logits[0][target]\n\n # is_adv is True as soon as the is_adv_loss goes below 0\n # but sometimes we want additional confidence\n is_adv_loss += confidence\n\n if is_adv_loss == 0:\n is_adv_loss_grad = 0\n else:\n is_adv_loss.backward()\n is_adv_loss_grad = x_p.grad\n\n is_adv_loss = max(0, is_adv_loss)\n\n s = max_ - min_\n squared_l2_distance = np.sum( ((x_p - reconstructed_original) ** 2).cpu().detach().numpy() ) / s ** 2\n total_loss = squared_l2_distance + const * is_adv_loss\n\n\n squared_l2_distance_grad = (2 / s ** 2) * (x_p - reconstructed_original)\n\n #print(is_adv_loss_grad)\n total_loss_grad = squared_l2_distance_grad + const * is_adv_loss_grad\n return total_loss, total_loss_grad", "def SoftEntropy(nn_last_layer, correct_label, learning_rate): \n \n loss = tf2.math.reduce_sum( tf2.nn.softmax_cross_entropy_with_logits(tf2.stop_gradient(correct_label), nn_last_layer))\n \n #obtain training operation\n optimizer = tf.train.AdamOptimizer(learning_rate = learning_rate, epsilon = 1e-8) #Note default value of epsilon 1e-8 results in instability after few epochs\n \n #clip the gradients\n gvs = optimizer.compute_gradients(loss)\n #capped_gvs = [(tf.clip_by_value(grad, -1., 1.), var) for grad, var in gvs]\n training_operation = optimizer.apply_gradients(gvs)\n\n return training_operation, loss", "def softmax_loss_vectorized(W, X, y, reg):\n # Initialize the loss and gradient to zero.\n loss = 0.0\n dW = np.zeros_like(W)\n\n #############################################################################\n # TODO: Compute the softmax loss and its gradient using no explicit loops. #\n # Store the loss in loss and the gradient in dW. If you are not careful #\n # here, it is easy to run into numeric instability. Don't forget the #\n # regularization! #\n #############################################################################\n # *****START OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****\n\n num_train = X.shape[0]\n # print(\"num_train:\", num_train)\n num_classes = W.shape[1]\n # print(\"num_classes:\", num_classes)\n \n scores = X.dot(W) # scores is N*D x D*C -> N*C \n log_c = np.max(scores, axis=1).T\n scores -= log_c[:,None]\n correct_class_score = scores[np.arange(num_train),y]\n exp_scores = np.exp(scores)\n sum_exp_scores = np.sum(np.exp(scores), axis=1)\n proportion = np.exp(correct_class_score) / sum_exp_scores\n loss -= np.sum(np.log(proportion))\n \n # calculating dW = (p - (c = correct c ? 1 : 0)) * x\n correct_class_one_hot = np.zeros_like(scores)\n correct_class_one_hot[np.arange(num_train),y] += 1\n p = np.exp(scores) / sum_exp_scores[:,None] - correct_class_one_hot # N*C / N:1 -> N*C\n dW += X.T.dot(p) # D*N x N*C -> D*C\n\n loss /= num_train\n loss += 0.5 * reg * np.sum(W * W) \n dW /= num_train\n dW += reg * W\n\n # *****END OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****\n\n return loss, dW", "def compute_loss(self):\n self.test_logits = self.compute_logits()\n loss = tf.nn.sparse_softmax_cross_entropy_with_logits(\n labels=self.data.test_labels, logits=self.test_logits)\n cross_entropy_loss = tf.reduce_mean(loss)\n regularization = tf.reduce_sum(\n tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES))\n loss = cross_entropy_loss + self.weight_decay * regularization\n return loss", "def softmax(x):\n return np.exp(x) / np.sum(np.exp(x), axis=1, keepdims=True)", "def loss(self, X, y=None):\n\t\tW1, b1 = self.params['W1'], self.params['b1']\n\t\tW2, b2 = self.params['W2'], self.params['b2']\n\t\tW3, b3 = self.params['W3'], self.params['b3']\n\t\t\n\t\t# pass conv_param to the forward pass for the convolutional layer\n\t\tfilter_size = W1.shape[2]\n\t\tconv_param = {'stride': 1, 'pad': (filter_size - 1) / 2}\n\n\t\t# pass pool_param to the forward pass for the max-pooling layer\n\t\tpool_param = {'pool_height': 2, 'pool_width': 2, 'stride': 2}\n\n\t\tscores = None\n\t\t############################################################################\n\t\t# TODO: Implement the forward pass for the three-layer convolutional net, #\n\t\t# computing the class scores for X and storing them in the scores\t\t\t\t\t #\n\t\t# variable.\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t #\n\t\t############################################################################\n\t\tz1, cache1 = conv_relu_pool_forward(X, W1, b1, conv_param, pool_param)\n\t\tz2, cache2 = affine_relu_forward(z1, W2, b2)\n\t\ty3, cache3 = affine_forward(z2, W3, b3)\n\t\tscores = y3\n\t\t############################################################################\n\t\t#\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tEND OF YOUR CODE\t\t\t\t\t\t\t\t\t\t\t\t\t\t #\n\t\t############################################################################\n\t\t\n\t\tif y is None:\n\t\t\treturn scores\n\t\t\n\t\tloss, grads = 0, {}\n\t\t############################################################################\n\t\t# TODO: Implement the backward pass for the three-layer convolutional net, #\n\t\t# storing the loss and gradients in the loss and grads variables. Compute #\n\t\t# data loss using softmax, and make sure that grads[k] holds the gradients #\n\t\t# for self.params[k]. Don't forget to add L2 regularization!\t\t\t\t\t\t\t #\n\t\t############################################################################\n\t\tloss, dout = softmax_loss(scores, y)\n\t\tloss += self.reg * 0.5 * (np.power(self.params['W3'], 2).sum() + \n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tnp.power(self.params['W2'], 2).sum() + \n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tnp.power(self.params['W1'], 2).sum())\n\n\t\tdx3, grads['W3'], grads['b3'] = affine_backward(dout, cache3)\n\t\tdx2, grads['W2'], grads['b2'] = affine_relu_backward(dx3, cache2)\n\t\tdx1, grads['W1'], grads['b1'] = conv_relu_pool_backward(dx2, cache1)\n\t\t############################################################################\n\t\t#\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tEND OF YOUR CODE\t\t\t\t\t\t\t\t\t\t\t\t\t\t #\n\t\t############################################################################\n\t\t\n\t\treturn loss, grads", "def compute_loss(y, tx, w):\n e = y - tx.dot(w)\n return calculate_mse(e)" ]
[ "0.74243444", "0.7283457", "0.72514814", "0.7247183", "0.71797806", "0.71731097", "0.7170593", "0.70910347", "0.7069629", "0.7036034", "0.70345086", "0.7027396", "0.70033365", "0.7002239", "0.700087", "0.69909036", "0.69491434", "0.69352823", "0.69285876", "0.6924547", "0.68893355", "0.68825775", "0.68642175", "0.68631935", "0.68538433", "0.685248", "0.6850914", "0.6840298", "0.68314034", "0.68210995", "0.6791547", "0.67853737", "0.6784484", "0.6780831", "0.6777642", "0.67590415", "0.675557", "0.6753944", "0.67345226", "0.67290235", "0.67093027", "0.67018604", "0.6700123", "0.66966915", "0.66630435", "0.66499746", "0.66345763", "0.66033316", "0.6600189", "0.6576164", "0.6549129", "0.65484357", "0.6537313", "0.6507475", "0.6501025", "0.6501002", "0.6484432", "0.6473912", "0.6423255", "0.6420895", "0.6420895", "0.64166975", "0.64153105", "0.63893646", "0.6385742", "0.63730025", "0.63725317", "0.63652736", "0.6360316", "0.6357389", "0.63503206", "0.6332327", "0.63271815", "0.63235813", "0.63225913", "0.63141936", "0.6311804", "0.63077", "0.629877", "0.62948984", "0.62906265", "0.62864804", "0.6272038", "0.6270935", "0.6263585", "0.6260221", "0.62494403", "0.6249074", "0.62478644", "0.62464684", "0.6233774", "0.623282", "0.6230315", "0.6227338", "0.6221352", "0.6218695", "0.6213567", "0.62076616", "0.62007314", "0.6198238" ]
0.6411446
63
create a OCSVM loss for network given in argument
def compile_update_ocsvm(nnet, inputs, targets): floatX = Cfg.floatX C = Cfg.C A = Cfg.A nu = Cfg.nu if len(nnet.all_layers) > 1: feature_layer = nnet.all_layers[-2] else: feature_layer = nnet.input_layer final_layer = nnet.ocsvm_layer trainable_params = lasagne.layers.get_all_params(final_layer, trainable=True) # Regularization (up to feature map) if Cfg.weight_decay: if Cfg.prod_penalty: l2_penalty = (1/C) * get_prod_penalty(nnet) elif Cfg.spec_penalty: l2_penalty = (1/C) * get_spectral_penalty(nnet, Cfg.include_bias) else: l2_penalty = ((1/C) * get_l2_penalty(nnet, include_bias=Cfg.include_bias, pow=Cfg.pow)) else: l2_penalty = T.cast(0, dtype='floatX') # Bias offset if Cfg.bias_offset: bias_offset = get_bias_offset(nnet) else: bias_offset = T.cast(0, dtype='floatX') # Backpropagation prediction = lasagne.layers.get_output(final_layer, inputs=inputs, deterministic=False) objective, train_acc = final_layer.objective(prediction, targets) # Normalization rep = lasagne.layers.get_output(feature_layer, inputs=inputs, deterministic=False) rep_norm = rep.norm(L=2, axis=1).dimshuffle((0, 'x')) if Cfg.ball_penalty: ball_penalty, _ = final_layer.objective( T.ones_like(rep_norm) - (rep_norm ** 2), targets) else: ball_penalty = T.cast(0, dtype='floatX') ball_penalty = (1/A) * T.cast(ball_penalty / targets.shape[0], dtype='floatX') # Output regularization if Cfg.output_penalty: l2_output = (1/C) * (T.sum(abs(final_layer.W) ** Cfg.pow) * T.sum(abs(rep) ** 2)) else: l2_output = T.cast(0, dtype='floatX') l2_output = T.cast(l2_output / targets.shape[0], dtype='floatX') # SVM parameter regularization if Cfg.Wsvm_penalty: Wsvm_penalty = T.sum(abs(final_layer.W) ** Cfg.pow) else: Wsvm_penalty = T.cast(0, dtype='floatX') # OC SVM loss has nu parameter and adds margin from origin to objective train_loss = T.cast(objective / (targets.shape[0] * nu), dtype='floatX') train_acc = T.cast(train_acc * 1. / targets.shape[0], dtype='floatX') train_obj = T.cast(floatX(0.5) * l2_penalty + floatX(0.5) * ball_penalty + floatX(0.5) * l2_output + floatX(0.5) * Wsvm_penalty + train_loss + T.sum(final_layer.b) + bias_offset, dtype='floatX') updates = get_updates(nnet, train_obj, trainable_params, solver=nnet.solver) nnet.backprop = theano.function([inputs, targets], [train_obj, train_acc], updates=updates) # Forwardpropagation test_prediction = lasagne.layers.get_output(final_layer, inputs=inputs, deterministic=True) # get network feature representation test_rep = lasagne.layers.get_output(feature_layer, inputs=inputs, deterministic=True) test_rep_norm = test_rep.norm(L=2, axis=1) if Cfg.ball_penalty: test_ball_penalty, _ = final_layer.objective( T.ones_like(test_rep_norm.dimshuffle((0, 'x'))) - (test_rep_norm.dimshuffle((0, 'x')) ** 2), targets) else: test_ball_penalty = T.cast(0, dtype='floatX') test_ball_penalty = ((1/A) * T.cast( test_ball_penalty / targets.shape[0], dtype='floatX')) # Output regularization if Cfg.output_penalty: test_l2_output = (1/C) * (T.sum(abs(final_layer.W) ** Cfg.pow) * T.sum(abs(test_rep) ** 2)) else: test_l2_output = T.cast(0, dtype='floatX') test_l2_output = T.cast(test_l2_output / targets.shape[0], dtype='floatX') objective, test_acc = final_layer.objective(test_prediction, targets) test_loss = T.cast(objective / (targets.shape[0] * nu), dtype='floatX') test_acc = T.cast(test_acc * 1. / targets.shape[0], dtype='floatX') test_obj = T.cast(floatX(0.5) * l2_penalty + floatX(0.5) * test_ball_penalty + floatX(0.5) * test_l2_output + floatX(0.5) * Wsvm_penalty + test_loss + T.sum(final_layer.b), dtype='floatX') nnet.forward = theano.function([inputs, targets], [test_obj, test_acc, test_prediction, floatX(0.5) * l2_penalty, floatX(0.5) * test_l2_output, test_rep, test_rep_norm, test_loss, floatX(0.5) * test_ball_penalty])
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def loss_fn(self, targets, outputs, model):", "def loss(self, **kwargs):\n pass", "def compute_loss(self, obs, returns):", "def compute_loss(self):", "def _create_loss(self):\n\n with tf.name_scope(\"loss\"):\n \n # gini=(tf.nn.l2_loss( self.score))/100000\n gini = tf.losses.softmax_cross_entropy(self.score, 0*self.score)\n \n promo_prob=tf.reduce_sum(tf.multiply(self.score, self.cohort_weight),\n axis=1)\n inc_value = tf.reduce_mean(tf.multiply(promo_prob, self.value))- self.control_value\n inc_cost = tf.reduce_mean( tf.multiply(promo_prob, self.cost)) - self.control_cost\n \n\n\n # determine loss function based on self.obj_rule\n if self.obj_rule == 'cpiv':\n self.objective = inc_cost / inc_value\n\n elif self.obj_rule == 'ivc':\n # maximize ivc\n self.objective = - inc_value / inc_cost\n\n elif self.obj_rule == 'lagrangian':\n assert self.shadow is not None, 'Need to pass in shadow value if use lagrangian as obj_rule.'\n self.objective = inc_cost - self.shadow * inc_value\n\n elif self.obj_rule == 'value':\n # maximize delta values\n self.objective = - inc_value\n\n # use only cost as objective\n elif self.obj_rule == 'cost':\n # maximize delta cost\n self.objective = - inc_cost\n\n else:\n raise Exception('Invalid obj_rule!')\n\n # regularization\n reg_loss = tf.reduce_sum(tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES))\n # weights = tf.trainable_variables() # all vars of your graph\n # reg_loss = tf.norm( weights,ord=1)\n\n # final loss\n self.loss = self.objective +reg_loss+.1*gini", "def node_loss_construct(cfg, model_name='node_loss', **kwargs):\n losses = node_loss_dict()\n loss_cfg = cfg[model_name]\n name = loss_cfg.get('name', 'type')\n if not name in losses:\n raise Exception(\"Unknown node loss name provided:\", name)\n\n return losses[name](loss_cfg, **kwargs)", "def loss_fun(para):\n\n return -data_processing(my_cir(para))", "def loss(self):\n return 'mse'", "def loss_fn(params):\n logits = models.ProgramTransformer(config).apply(\n {'params': params},\n inputs,\n outputs,\n programs,\n rngs={'dropout': train_rng})\n loss, weight_sum = compute_weighted_cross_entropy(logits, programs, weights)\n mean_loss = loss / weight_sum\n return mean_loss, logits", "def compute_loss(self, **kwargs):\n raise NotImplementedError", "def loss(self, X, labels):\n features = self.get_conv_features(X)\n loss = blah\n return loss", "def get_loss(self, Loss, results, inputs, device):\n return", "def loss_fn(outputs, labels):\n #print('this is outputs', outputs.shape) # 2,3,128,128\n #print('this is labels', labels.shape) # 2,3,128,128\n N, C, H, W = outputs.shape\n \n# outputs = unnormalize(outputs, mean=[0.51371954, 0.40949144, 0.35572536], std= [0.2926419, 0.26180502, 0.25512055])\n # check if we normalize label images #labels = unnormalize(labels, mean=[0.53459634,0.39673596,0.33788489], std= [0.29101071,0.26140346,0.25485687])\n \n mse_loss = torch.sum((outputs - labels) ** 2) / N / C # each photo, each channel\n mse_loss *= 255 * 255\n mse_loss /= H * W \n # average loss on each pixel(0-255)\n return mse_loss", "def loss_creator(config):\n return torch.nn.BCELoss()", "def compute_loss(self, features, mode, params, precomputed):\n raise NotImplementedError(\"Model does not implement loss.\")", "def _build_loss(self, **kwargs):\n pass", "def _compute_loss(self, predictions, targets, **params):\n pass", "def loss(self, X, y):\n pass", "def compute_loss(self, *args, **kwargs):\n raise NotImplementedError", "def model_loss(inp, fake, real_label, fake_label):\n \n \n Dreal,realcls,R1 = gradpen(inp)\n [Dfake,fakecls] = D(fake)\n # 1. Adversarial loss\n \n glabel = tf.ones_like(Dfake)#tf.random.uniform((Dfake.shape), 1-LN, 1)\n dlabelr = tf.ones_like(Dreal)#tf.random.uniform((Dreal.shape), 1-LN, 1)\n dlabelf = tf.zeros_like(Dfake)#tf.random.uniform((Dfake.shape), 0, LN)\n \n \n \n # D has no sigmoid activation: \"from_logits=True\"\n real_loss = tf.keras.losses.binary_crossentropy(\n dlabelr, Dreal, from_logits=True)\n real_loss = tf.reduce_mean(real_loss)\n \n fake_loss = tf.keras.losses.binary_crossentropy(\n dlabelf, Dfake, from_logits=True)\n fake_loss = tf.reduce_mean(fake_loss)\n \n Dadv = 0.5*(real_loss+fake_loss)\n \n Gadv = tf.keras.losses.binary_crossentropy(\n glabel, Dfake, from_logits=True)\n Gadv = tf.reduce_mean(Gadv)\n \n # 2. Classification loss\n \n Dcls = tf.keras.losses.binary_crossentropy(real_label, realcls, from_logits=True)\n Dcls = tf.reduce_mean(Dcls)\n \n Gcls = tf.keras.losses.binary_crossentropy(fake_label, fakecls, from_logits=True)\n Gcls = tf.reduce_mean(Gcls)\n \n # 3. Total loss\n \n Dloss = Dadv + (GAMMA/2)*R1 + LAMBDA_CLS*Dcls\n \n Gloss = Gadv + LAMBDA_CLS*Gcls\n \n return (Dloss, Dadv, Dcls, R1), (Gloss, Gadv, Gcls)", "def loss(self, X, labels):\n features = self.get_conv_feats(X)\n loss = blah\n return loss", "def loss(loss_name):\n \n def contrastive_loss(y_true, y_pred, margin = 1):\n \"\"\"Implementation of the triplet loss function\n\n\n Contrastive loss = 0.5 * mean( (1-true_value) * square(distance) + true_value * square( max(margin-distance, 0) ))\n\n Args:\n y_true (int): true label, positive pair (same class) -> 0, \n negative pair (different class) -> 1\n \n y_pred (list): python list containing two objects in a pair of tensors:\n left : the encodings for one image data in a pair\n right : the encodings for the other image data in a pair\n margin (float, optional): m > 0 determines how far the embeddings of \n a negative pair should be pushed apart. Defaults to 1.\n\n\n Returns:\n loss (float): real number, value of the loss\n \"\"\"\n\n left = y_pred[0]\n right = y_pred[1]\n\n distance = tf.math.sqrt(tf.math.reduce_sum(tf.math.square(left - right), axis=-1))\n\n loss_positive = tf.math.square(distance)\n loss_negative = tf.math.square(tf.maximum(0., margin - distance))\n \n loss = y_true * loss_negative + (1 - y_true) * loss_positive\n loss = 0.5 * tf.math.reduce_mean(loss)\n\n return loss\n\n def triplet_loss(y_true, y_pred, margin = 1):\n \"\"\"Implementation of the triplet loss function\n\n Arguments:\n y_true : true labels, required when you define a loss in Keras, \n not applied in this function.\n\n y_pred (list): python list containing three objects:\n anchor : the encodings for the anchor data\n positive : the encodings for the positive data (similar to anchor)\n negative : the encodings for the negative data (different from anchor)\n \n margin (float, optional): m > 0 determines how far the embeddings of \n a negative data should be pushed apart. Defaults to 1.\n\n Returns:\n loss (float): real number, value of the loss\n \"\"\"\n\n anchor = y_pred[0]\n positive = y_pred[1]\n negative = y_pred[2]\n\n # squared distance between the anchor and the positive\n pos_dist = tf.math.reduce_sum(tf.math.square(anchor - positive), axis=-1)\n\n # squared distance between the anchor and the negative\n neg_dist = tf.math.reduce_sum(tf.math.square(anchor - negative), axis=-1)\n\n # compute loss\n basic_loss = margin + pos_dist - neg_dist\n loss = tf.math.maximum(basic_loss,0.0)\n loss = tf.math.reduce_mean(loss)\n return loss\n\n \n if loss_name == 'contrastive_loss':\n return contrastive_loss\n \n if loss_name == 'triplet_loss':\n return triplet_loss", "def loss(A, Y):\n return A - Y", "def _create_loss(self):\n with tf.device('/cpu:0'):\n with tf.name_scope('loss'):\n self.loss = tf.reduce_mean(\n tf.nn.softmax_cross_entropy_with_logits(\n labels=self.labels_placeholder, \n logits=self.logits, name='loss'))", "def _create_loss_op(self):\n # 1.) The reconstruction loss, which forces the NN towards reconstructing more accurately the\n # given input. This function is configurable, but usually it is the Bernoulli negative log-likelihood.\n if self.cost_function == 'abs':\n reconstr_loss = tf.reduce_sum(tf.abs(self.x_decoded - self.x_in), 1)\n elif self.cost_function in ('mse', 'l2', 'square'):\n reconstr_loss = tf.reduce_sum(tf.squared_difference(self.x_in, self.x_decoded), 1)\n elif self.cost_function in ('xentropy', 'log'):\n reconstr_loss = \\\n -tf.reduce_sum(self.x_in * tf.log(1e-10 + self.x_decoded)\n + (1 - self.x_in) * tf.log(1e-10 + 1 - self.x_decoded),\n 1)\n else:\n raise ValueError(self.cost_function, \"Unknown cost function name!\")\n\n # 2.) The latent loss, which is defined as the Kullback Leibler divergence\n ## between the distribution in latent space induced by the encoder on\n # the data and some prior. This acts as a kind of regularizer.\n # This can be interpreted as the number of \"nats\" required\n # for transmitting the the latent space distribution given\n # the prior.\n latent_loss = -0.5 * tf.reduce_sum(1. + self.z_log_sigma_sq\n - tf.square(self.z_mean)\n - tf.exp(self.z_log_sigma_sq), 1)\n\n self.loss_op = tf.reduce_mean(reconstr_loss + latent_loss) # average over batch\n tf.add_to_collection(\"losses\", self.loss_op)\n\n if self.learning_rate is not None:\n global_step = tf.train.get_or_create_global_step()\n self.train_op = tf.train.AdamOptimizer(learning_rate=self.learning_rate).minimize(\n self.loss_op,\n global_step=global_step,\n var_list=tf.get_collection(self.training_scope) if self.training_scope is not None else None)\n\n tf.add_to_collection(\"train_ops\", self.train_op)\n tf_logging.info(\"Added AdamOptimizer with learning rate: %.8f\" % self.learning_rate)\n\n tf.summary.scalar(\"latent_loss\", tf.reduce_mean(latent_loss))\n tf.summary.scalar(\"reconstruction_loss\", tf.reduce_mean(reconstr_loss))\n tf.summary.scalar(\"vae_loss\", self.loss_op)", "def loss_(self, batch):\n raise NotImplementedError", "def build_loss(self):\n if self.mode != \"encode\":\n total_loss = tf.losses.get_total_loss()\n tf.summary.scalar(\"losses/total\", total_loss)\n\n self.total_loss = total_loss", "def build_loss(self):\n\n opt = tf.train.AdamOptimizer(self.learning_rate)\n mse = tf.losses.mean_squared_error(self.label[-1], self.outputs[-1])\n loss = tf.losses.get_total_loss()\n\n return mse, loss", "def define_loss(name_loss):\n call_dict = {\n \"pixel_weighted_cross_entropy\": pixel_weighted_cross_entropy,\n \"MeanSquaredLogarithmicError\": tf.keras.losses.MeanSquaredLogarithmicError(),\n \"MeanAbsolutePercentageError\": tf.keras.losses.MeanAbsolutePercentageError(),\n \"MeanSquaredError\": tf.keras.losses.MeanSquaredError(),\n \"MeanAbsoluteError\": tf.keras.losses.MeanAbsoluteError(), \n }\n loss = call_dict[name_loss]\n return loss", "def loss_fun(model: GPModel, params: dict) -> float:\n py = model.module.call(params, train_ds['index_points'])\n return -py.log_prob(train_ds['y'])", "def get_loss(name: str):\n if name == 'mse' or name is None:\n loss = nn.MSELoss()\n elif name == 'cp':\n loss = CosineProximityLoss()\n elif name == 'mae':\n loss = nn.L1Loss()\n elif name == 'bce':\n loss = nn.BCEWithLogitsLoss()\n else:\n raise ValueError(f'Loss function {name} not supported.')\n return loss", "def but_test(self):\n if not self.path:\n tk.messagebox.showerror(\"Error\", \"Open file first\")\n return\n elif not self.nn_obj:\n tk.messagebox.showerror(\"Error\", \"Create or open NN\")\n return\n elif not self.nn_obj and not self.nn_in:\n tk.messagebox.showerror(\"Error\", \"Open file and create NN\")\n return\n if isinstance(self.nn_obj, dict):\n y_pred=pred(self.nn_obj, self.nn_in).reshape(self.nn_out.shape)\n test_loss=loss(y_pred, self.nn_out, self.nn_obj) \n elif isinstance(self.nn_obj, Net_tr):\n y_pred=pred(self.nn_obj, self.nn_in)\n test_loss=loss(y_pred, self.nn_out, self.nn_obj).item()\n tk.messagebox.showinfo(\"Loss\", \"Loss=\" + str(test_loss)+\" %\")", "def get_loss(self, xs, y):\n \"*** YOUR CODE HERE ***\"\n y_pred = self.run(xs)\n return nn.SoftmaxLoss(y_pred,y)", "def getLoss(self, x_test, t_test):\n x_t = Variable(x_test, requires_grad=False)\n #Feed inputes into neural network\n t_pred = self.model(x_t)\n #Now lets compute out loss\n loss = self.loss_fn(t_pred, t_test)\n return loss", "def get_loss(self, xs, y):\n \"*** YOUR CODE HERE ***\"\n predictedY = self.run(xs)\n return nn.SoftmaxLoss(predictedY, y)\n # return nn.SquareLoss(predictedY, y)", "def loss_function(self, train_head, train_tail, train_relation, train_head_corrupted, train_tail_corrupted):\n\n # train_head = tf.nn.l2_normalize(train_head, 1)\n # train_tail = tf.nn.l2_normalize(train_tail, 1)\n # train_head_corrupted = tf.nn.l2_normalize(train_head_corrupted, 1)\n # train_tail_corrupted = tf.nn.l2_normalize(train_tail_corrupted, 1)\n\n # loss = tf.reduce_mean(\n # tf.maximum(self.dict_paras['margin']\n # + self.distance(tf.add(train_head, train_relation), train_tail)\n # - self.distance(tf.add(train_head_corrupted, train_relation), train_tail_corrupted), 0.))\n\n loss = tf.reduce_mean(self.distance(tf.add(train_head, train_relation), train_tail))\n\n return loss", "def loss_fn(self, recons, inputs, mu, log_var, **kwargs):\n# kld_weight = kwargs['M_N'] # Account for the minibatch samples from the dataset\n recons_loss = F.mse_loss(recons, inputs)\n# recons_loss = F.binary_cross_entropy(recons, inputs)\n KLD = torch.mean(-0.5 * torch.sum(1 + log_var - mu**2 - log_var.exp(), dim=1), dim=0)\n loss = recons_loss - KLD\n return loss, recons_loss, KLD", "def _define_loss(self):\n\n cost = []\n unit_cost = []\n for nn in range(len(self.ffnet_out)):\n data_out = self.data_out_batch[nn]\n if self.filter_data:\n # this will zero out predictions where there is no data,\n # matching Robs here\n pred = tf.multiply(\n self.networks[self.ffnet_out[nn]].layers[-1].outputs,\n self.data_filter_batch[nn])\n else:\n pred = self.networks[self.ffnet_out[nn]].layers[-1].outputs\n\n nt = tf.cast(tf.shape(pred)[0], tf.float32)\n # define cost function\n if self.noise_dist == 'gaussian':\n with tf.name_scope('gaussian_loss'):\n cost.append(tf.nn.l2_loss(data_out - pred) / nt)\n unit_cost.append(tf.reduce_mean(tf.square(data_out-pred), axis=0))\n\n elif self.noise_dist == 'poisson':\n with tf.name_scope('poisson_loss'):\n\n if self.poisson_unit_norm is not None:\n # normalize based on rate * time (number of spikes)\n cost_norm = tf.multiply(self.poisson_unit_norm[nn], nt)\n else:\n cost_norm = nt\n\n cost.append(-tf.reduce_sum(tf.divide(\n tf.multiply(data_out, tf.log(self._log_min + pred)) - pred,\n cost_norm)))\n\n unit_cost.append(-tf.divide(\n tf.reduce_sum(\n tf.multiply(\n data_out, tf.log(self._log_min + pred)) - pred, axis=0),\n cost_norm))\n\n elif self.noise_dist == 'bernoulli':\n with tf.name_scope('bernoulli_loss'):\n # Check per-cell normalization with cross-entropy\n # cost_norm = tf.maximum(\n # tf.reduce_sum(data_out, axis=0), 1)\n cost.append(tf.reduce_mean(\n tf.nn.sigmoid_cross_entropy_with_logits(\n labels=data_out, logits=pred)))\n unit_cost.append(tf.reduce_mean(\n tf.nn.sigmoid_cross_entropy_with_logits(\n labels=data_out, logits=pred), axis=0))\n else:\n TypeError('Cost function not supported.')\n\n self.cost = tf.add_n(cost)\n self.unit_cost = unit_cost\n\n # Add regularization penalties\n reg_costs = []\n with tf.name_scope('regularization'):\n for nn in range(self.num_networks):\n reg_costs.append(self.networks[nn].define_regularization_loss())\n self.cost_reg = tf.add_n(reg_costs)\n\n self.cost_penalized = tf.add(self.cost, self.cost_reg)\n\n # save summary of cost\n # with tf.variable_scope('summaries'):\n tf.summary.scalar('cost', self.cost)\n tf.summary.scalar('cost_penalized', self.cost_penalized)\n tf.summary.scalar('reg_pen', self.cost_reg)", "def _compute_loss(self, parameters, inputs, ground_truth):\n predictions = self.network_forward(parameters, inputs)\n loss = np.mean((ground_truth - predictions) ** 2)\n return loss", "def _compute_loss(self, parameters, inputs, ground_truth):\n predictions = self.network_forward(parameters, inputs)\n loss = np.mean((ground_truth - predictions) ** 2)\n return loss", "def loss_fn(self, lbl, y):\n\n binlbl = self._to_device(lbl[:,0]>.5)\n # center = self._to_device(lbl[:,3]) \n offset = 5. * self._to_device(lbl[:,1:]) \n\n loss = self.criterion(y[:,:2], offset) \n loss2 = self.criterion2(y[:,2], binlbl)\n\n # loss3 = self.criterion(y[:,3], center)\n\n loss = loss + loss2\n return loss", "def loss_fn(model):\n with flax.nn.stateful(state) as new_state:\n with flax.nn.stochastic(prng_key):\n logits = model(batch['image'])\n loss = cross_entropy_loss(logits, batch['label'])\n # TODO(britefury): check if applying L2 regularization to weights but\n # *not* biases improves results\n weight_penalty_params = jax.tree_leaves(model.params)\n weight_l2 = sum([jnp.sum(x ** 2)\n for x in weight_penalty_params\n if x.ndim > 1])\n weight_penalty = l2_reg * 0.5 * weight_l2\n loss = loss + weight_penalty\n return loss, (new_state, logits)", "def get_loss(self, x, y):\n \"*** YOUR CODE HERE question 3 ***\"\n return nn.SoftmaxLoss(self.run(x), y)", "def loss_fn(model):\n with flax.deprecated.nn.stateful() as state:\n with flax.deprecated.nn.stochastic(dropout_rng):\n logits = model(example, train=True)\n loss, weight_sum = compute_weighted_cross_entropy(logits, targets)\n mean_loss = loss / weight_sum\n return mean_loss, (logits, state)", "def loss_perceptual(self, vgg_out, vgg_gt, vgg_comp): \n loss = 0\n for o, c, g in zip(vgg_out, vgg_comp, vgg_gt):\n loss += self.l1(o, g) + self.l1(c, g)\n return loss", "def make_loss(self, label, loss_type='xent'):\n with tf.variable_scope('loss'):\n pred_flat = tf.reshape(self.pred, [-1, self.class_num])\n label = tf.image.resize_nearest_neighbor(label, tf.stack(self.pred.get_shape()[1:3]))\n y_flat = tf.reshape(tf.squeeze(label, axis=[3]), [-1, ])\n indices = tf.squeeze(tf.where(tf.less_equal(y_flat, self.class_num - 1)), 1)\n gt = tf.gather(y_flat, indices)\n prediction = tf.gather(pred_flat, indices)\n\n pred = tf.argmax(prediction, axis=-1, output_type=tf.int32)\n self.loss_iou = self.create_resetable_metric(tf.metrics.mean_iou, var_name='loss_iou',\n scope=tf.get_variable_scope().name,\n labels=gt, predictions=pred, num_classes=self.class_num,\n name='loss_iou')\n\n if loss_type == 'xent':\n loss = tf.reduce_mean(tf.nn.sparse_softmax_cross_entropy_with_logits(logits=prediction, labels=gt))\n l2_losses = [self.weight_decay * tf.nn.l2_loss(v) for v in tf.trainable_variables()\n if 'weight' in v.name]\n self.loss = tf.reduce_mean(loss) + tf.add_n(l2_losses)\n self.loss_xent = self.create_resetable_metric(tf.metrics.mean, var_name='loss_xent',\n scope=tf.get_variable_scope().name,\n values=self.loss, name='loss_xent')", "def evaluate_loss(\n model,\n ds,\n loss_func_name = 'CE'\n):\n loss = 0\n if loss_func_name == 'CE':\n loss_func = tf.keras.losses.SparseCategoricalCrossentropy(\n reduction=tf.keras.losses.Reduction.SUM\n )\n else:\n raise ValueError(f'Not supported loss function {loss_func_name}!')\n n = 0\n for batch_x, batch_y in ds:\n batch_output = get_model_output(model, batch_x)\n loss += loss_func(batch_y, batch_output)\n n += batch_y.shape[0]\n return loss / n", "def get_loss(self, xs, y):\n \"*** YOUR CODE HERE question 4 ***\"\n return nn.SoftmaxLoss(self.run(xs), y)", "def get_loss(self):\r\n\r\n if F.loss_type==\"cosine\":\r\n self.losscos = r2d*tf.acos(1-tf.losses.cosine_distance(tf.nn.l2_normalize(self.labels,1), tf.nn.l2_normalize(self.out, 1), dim=1))\r\n self.loss = tf.losses.cosine_distance(tf.nn.l2_normalize(self.labels,1), tf.nn.l2_normalize(self.out, 1), dim=1)\r\n elif F.loss_type==\"mse2d\":\r\n xl, yl, zl = tf.split(self.labels, 3, axis=1)\r\n xo, yo, zo = tf.split(self.out, 3, axis=1)\r\n thetal, thetao = tf.asin(-yl), tf.asin(-yo)\r\n phil, phio = tf.atan2(-zl, -xl), tf.atan2(-zo, -xo)\r\n self.lb = tf.concat([thetal, phil], axis=1)\r\n self.ob = tf.concat([thetao, phio], axis=1)\r\n self.loss = tf.scalar_mul(tf.constant(r2d), tf.losses.mean_squared_error(self.lb, self.ob, 2))\r\n elif F.loss_type==\"mse3d\":\r\n self.loss = tf.losses.mean_squared_error(tf.nn.l2_normalize(self.labels, 0), tf.nn.l2_normalize(self.out, 0))", "def loss_op(self):\n return self.loss", "def loss(returns, predicted_output):\n ################################\n # YOUR IMPLEMENTATION HERE #\n ################################\n raise NotImplementedError", "def get_loss(self, x, y):\n \"*** YOUR CODE HERE ***\"\n y_pred = self.run(x)\n return nn.SoftmaxLoss(y_pred,y)", "def make_loss(self, logit=None, labels=None):\r\n return nn.functional.mse_loss(logit, labels, reduction='mean') # The MSE Loss\r", "def get_loss_fn():\n return reconstruction", "def tv_loss(x, name='tv_loss'):\n raise NotImplementedError(\"Please use tensorflow total_variation loss.\")", "def loss(self, x, y):\n raise NotImplementedError", "def get_loss(self, x, y):\n \"*** YOUR CODE HERE question 2 ***\"\n return nn.SquareLoss(self.run(x), y)", "def compute_loss(self):\n self.prototypes = self.compute_prototypes()\n self.test_logits = self.compute_logits()\n loss = tf.nn.sparse_softmax_cross_entropy_with_logits(\n labels=self.episode.test_labels, logits=self.test_logits)\n cross_entropy_loss = tf.reduce_mean(loss)\n regularization = tf.reduce_sum(\n tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES))\n loss = cross_entropy_loss + self.weight_decay * regularization\n return loss", "def add_loss_op(self, pred):\n ### YOUR CODE HERE\n loss = cross_entropy_loss(self.labels_placeholder,pred)\n ### END YOUR CODE\n return loss", "def loss(self, T, mode):\n self.circuit.loss(T, mode)", "def genLoss(self, *data):\r\n _, (x_unlab, _) = data\r\n z = self.getInputNoise(self.hypers['ul_BS'])\r\n fake_logits = self.D(self.G(z))\r\n g_losses = -1*logOneMinusSoftmax(fake_logits)[:,self.D.numClasses-1]\r\n return torch.mean(g_losses)", "def calculate_loss(self, output, target, **kwargs):\n ##dont do aggregation\n raise NotImplementedError", "def calculate_loss(self, output, target, **kwargs):\n ##dont do aggregation\n raise NotImplementedError", "def meta_amortized_loss(network, model_indices, params, sim_data):\n\n out_inference, out_evidence = network(model_indices, params, sim_data)\n if out_inference is not None:\n z, log_det_J = out_inference\n kl_loss = tf.reduce_mean(0.5 * tf.square(tf.norm(z, axis=-1)) - log_det_J)\n else:\n kl_loss = 0\n \n if out_evidence is not None:\n model_probs = out_evidence\n model_probs = tf.clip_by_value(model_probs, 1e-15, 1 - 1e-15)\n log_loss = -tf.reduce_mean(tf.reduce_sum(model_indices * tf.math.log(model_probs), axis=1))\n else:\n log_loss = 0\n return kl_loss + log_loss", "def get_loss(self, x, y):\n \"*** YOUR CODE HERE ***\"\n predictedY = self.run(x)\n return nn.SoftmaxLoss(predictedY, y)", "def _initLoss(self):\n\n return torch.nn.MSELoss()", "def get_loss(self, outputs, labels):\n try:\n assert self._loss in ['mse','mae','l1','l2','huber','logcosh','bce','contrastive'], 'Specify correct loss function'\n except AssertionError as msg:\n sys.exit(msg)\n \n if self._loss == 'mse' or self._loss == 'l2':\n # L2 loss function\n self.criterion = lambda x,y: torch.pow(x - y,2)\n loss = self.criterion(outputs, labels)\n \n # Adding up the losses (L1 loss) or meaning the losses (MAE loss)\n # of all batch instances\n if self._loss == 'mse':\n loss = torch.mean(loss)\n elif self._loss == 'l2':\n loss = torch.sum(loss)\n \n elif self._loss == 'mae' or self._loss == 'l1':\n # L1 loss function\n self.criterion = lambda x,y: torch.abs(x - y)\n loss = self.criterion(outputs, labels)\n \n # Adding up the losses (L1 loss) or meaning the losses (MAE loss)\n # of all batch instances\n if self._loss == 'mae':\n loss = torch.mean(loss)\n elif self._loss == 'l1':\n loss = torch.sum(loss)\n \n elif self._loss == 'huber':\n # Huber loss function\n self.criterion = torch.nn.SmoothL1Loss()\n loss = self.criterion(outputs.float(), labels.float())\n \n # Adding up the losses of all batch instances\n loss = torch.mean(loss)\n \n elif self._loss == 'logcosh':\n # Log-cosh loss function\n loss = torch.log(torch.cosh(outputs.float() - labels.float()))\n \n # Adding up the losses of all batch instances\n loss = torch.sum(loss) \n \n elif self._loss == 'bce':\n if self._dist_fn == 'cos':\n self.criterion = nn.BCEWithLogitsLoss()\n else:\n self.criterion = nn.BCELoss()\n loss = self.criterion(outputs.float(), labels.float())\n \n elif self._loss == 'contrastive':\n margin = 1\n loss = torch.sum((1-labels) * torch.pow(outputs,2)+ labels * torch.pow(torch.clamp(margin - outputs, min = 0.0),2))\n\n return loss", "def compute_loss(y, tx, w):\n e = y - tx.dot(w)\n return calculate_mse(e)", "def detector_loss(self, input, target, mask=None, loss_type=\"softmax\"):\n if loss_type == \"l2\":\n loss_func = nn.MSELoss(reduction=\"mean\")\n loss = loss_func(input, target)\n elif loss_type == \"softmax\":\n loss_func_BCE = nn.BCELoss(reduction='none').cuda()\n loss = loss_func_BCE(nn.functional.softmax(input, dim=1), target)\n loss = (loss.sum(dim=1) * mask).sum()\n loss = loss / (mask.sum() + 1e-10)\n return loss", "def eval_loss(self, input_dataset, target_dataset):\n\t\t#######################################################################\n\t\t# ** START OF YOUR CODE **\n\t\t#######################################################################\n\t\tprediction = self.network.forward(input_dataset)\n\t\tloss = self._loss_layer.forward(prediction, target_dataset)\n\t\t\n\t\treturn loss\n\t\t#######################################################################\n\t\t# ** END OF YOUR CODE **\n\t\t#######################################################################", "def compute_loss(self, model_out: Dict[str, torch.Tensor], *args) -> torch.Tensor:\n ref_anchor = model_out[\"ref_sentemb\"]\n src_anchor = model_out[\"src_sentemb\"]\n positive = model_out[\"pos_sentemb\"]\n negative = model_out[\"neg_sentemb\"]\n return self.loss(src_anchor, positive, negative) + self.loss(\n ref_anchor, positive, negative\n )", "def _loss_out(self, e_out):\n prior_out = 1 # self.prior\n\n if self.loss_type_out == 'exponential':\n loss = torch.sum(e_out, -1, keepdim=True)\n\n elif self.loss_type_out == 'chi_squared':\n a_out = e_out + prior_out\n df = prior_out + 2\n loss = torch.sum((df / 2 - 1) * torch.log(a_out) + a_out / 2, dim=-1, keepdim=True)\n\n elif self.loss_type_out == 'uncertainty':\n S_out = (e_out + prior_out).sum(-1, keepdim=True)\n unc = self.n_classes / S_out\n loss = -unc\n\n elif self.loss_type_out == 'kl':\n a_out = e_out + prior_out\n a_target = torch.full(a_out.size(), prior_out, dtype=float, device=e_out.device)\n kl = kl_dirichlet(a_out, a_target)\n loss = kl\n\n elif self.loss_type_out == 'kl_reverse':\n a_out = e_out + prior_out\n a_target = torch.full(a_out.size(), prior_out, dtype=float, device=e_out.device)\n kl = kl_dirichlet(a_target, a_out)\n loss = kl\n\n elif self.loss_type_out == 'kl_symmetric':\n a_out = e_out + prior_out\n a_target = torch.full(a_out.size(), prior_out, dtype=float, device=e_out.device)\n kl = .5*(kl_dirichlet(a_out, a_target) + kl_dirichlet(a_target, a_out))\n loss = kl\n\n elif self.loss_type_out == 'chernoff_distance':\n a_out = e_out + prior_out\n a_target = torch.full(a_out.size(), prior_out, dtype=float, device=e_out.device)\n dist = chernoff_distance(a_out, a_target)\n loss = dist\n\n return loss", "def _make_loss_test(self):\n with context.context(training=False):\n prediction = self(*self.inputs)\n thecost = self.cost(self.target, prediction)\n return theano.function(self.inputs + [self.target], thecost)", "def loss_op(pred, y, index, loss_func):\n loss = 0\n for node_type in pred:\n idx = index[node_type]\n loss += loss_func(pred[node_type][idx], y[node_type])\n return loss", "def compute_loss(\n action_probs: tf.Tensor, values: tf.Tensor, returns: tf.Tensor\n) -> tf.Tensor:\n\n advantage = returns - values\n td = tf.subtract(returns, values)\n\n # actor\n # action_log_probs = tf.math.log(action_probs)\n # actor_loss = -tf.math.reduce_sum(action_log_probs * advantage)\n action_log_probs = tf.math.log(action_probs)\n actor_loss = -tf.math.reduce_mean(action_log_probs * td)\n\n # critic\n # td = tf.subtract(returns, values)\n # critic_loss = tf.reduce_mean(tf.square(td))\n critic_loss = huber_loss(values, returns)\n\n tf.print(\"a_loss:\", actor_loss, \"c_loss:\", critic_loss)\n\n return actor_loss + critic_loss", "def heteroscedastic_loss(network, params, x):\n\n pred_mean, pred_var = network(x)\n logvar = tf.reduce_sum(0.5 * tf.math.log(pred_var), axis=-1)\n squared_error = tf.reduce_sum(0.5 * tf.math.square(params - pred_mean) / pred_var, axis=-1)\n loss = tf.reduce_mean(squared_error + logvar)\n return loss", "def compute_loss(self):\n self.test_logits = self.compute_logits()\n loss = tf.nn.sparse_softmax_cross_entropy_with_logits(\n labels=self.data.test_labels, logits=self.test_logits)\n cross_entropy_loss = tf.reduce_mean(loss)\n regularization = tf.reduce_sum(\n tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES))\n loss = cross_entropy_loss + self.weight_decay * regularization\n return loss", "def loss_vgg(style_images, content_image, output_images, vggfile):\n c_layers = C_WEIGHTS.keys()\n s_layers = S_WEIGHTS.keys()\n vgg16_filters = load_vgg(vggfile)\n vgg16 = nn_build.Network(\n VGG16DESC, 'vgg16', initial=vgg16_filters, process=True)\n c_net = vgg16.layers(content_image, c_layers)\n\n c_loss = 0.\n s_loss = 0.\n tv_loss = 0.\n for style in style_images:\n s_net = vgg16.layers(style_images[style], s_layers)\n o_net = vgg16.layers(output_images[style], set(c_layers+s_layers))\n for layer in c_layers:\n _, h, w, c = c_net[layer].get_shape().as_list()\n c_loss += C_WEIGHTS[layer]*tf.nn.l2_loss(\n o_net[layer]-c_net[layer])/(h*w*c)\n for layer in s_layers:\n bs, _, _, c = o_net[layer].get_shape().as_list()\n s_loss += S_WEIGHTS[layer]*tf.nn.l2_loss(\n Gram(o_net[layer], bs) - Gram(s_net[layer], bs))\n tv_loss += TV_WEIGHTS*(\n tf.nn.l2_loss(output_images[style][:,1:,:,:]\n - output_images[style][:,:-1,:,:])\n + tf.nn.l2_loss(output_images[style][:,:,1:,:]\n - output_images[style][:,:,:-1,:]))\n style_num = len(style_images)\n return c_loss/style_num, s_loss/style_num, tv_loss/style_num", "def loss_total(self, mask):\n\n def loss(y_true, y_pred):\n\n # Compute predicted image with non-hole pixels set to ground truth\n y_comp = mask * y_true + (1-mask) * y_pred\n\n # Compute the vgg features. \n if self.vgg_device:\n with tf.device(self.vgg_device):\n vgg_out = self.vgg(y_pred)\n vgg_gt = self.vgg(y_true)\n vgg_comp = self.vgg(y_comp)\n else:\n vgg_out = self.vgg(y_pred)\n vgg_gt = self.vgg(y_true)\n vgg_comp = self.vgg(y_comp)\n \n # Compute loss components\n l1 = self.loss_valid(mask, y_true, y_pred)\n l2 = self.loss_hole(mask, y_true, y_pred)\n l3 = self.loss_perceptual(vgg_out, vgg_gt, vgg_comp)\n l4 = self.loss_tv(mask, y_comp)\n l5 = - 0.5 * K.sum(1 + self.z_log_var -self.cl - K.square(self.z_mean)/K.exp(self.cl) - K.exp(self.z_log_var)/K.exp(self.cl))\n # Return loss function\n return l1 + 6*l2 + 0.05*l3 + 0.1*l4 +l5 \n return loss", "def loss(self, input_val_dict):\n\n sess = tf.get_default_session()\n feed_dict = self.create_feed_dict(input_val_dict)\n loss = sess.run(self._loss, feed_dict=feed_dict)\n return loss", "def compute_loss(y, tx, w):\n # ***************************************************\n # INSERT YOUR CODE HERE\n # TODO: compute loss by MSE / MAE\n # ***************************************************\n \n # vector e\n e = compute_e(y, tx, w)\n N = compute_N(e)\n L_MSE = np.dot(np.matrix.transpose(e), e)\n L_MSE = L_MSE / (2 * N)\n \n return L_MSE", "def get_loss_func(args: Namespace) -> nn.Module:\n if args.dataset_type == 'classification':\n return nn.BCEWithLogitsLoss(reduction='none')\n\n if args.dataset_type == 'regression':\n return nn.MSELoss(reduction='none')\n\n if args.dataset_type == 'multiclass':\n return nn.CrossEntropyLoss(reduction='none')\n\n raise ValueError(f'Dataset type \"{args.dataset_type}\" not supported.')", "def loss_graph(self, training_costs, test_costs, learning_rate, training_accuracy, test_accuracy, val_accuracy,\n layers, data_size, n_neighbours, dropout_layer, dropout_rate):\n\n plt.plot(training_costs, label=\"Training loss\")\n plt.plot(test_costs, label=\"Test loss\")\n plt.xlabel(\"Iterations\", size='medium')\n plt.ylabel(\"Cost function (%)\", size='medium')\n plt.suptitle(\"Cost function while training the neural network\", size='medium', ha='center')\n if True in dropout_layer:\n plt.title(\"layers: {}, dropout rate: {}, learning rate: {}\".format(layers, dropout_rate, learning_rate),\n size='small', ha='center')\n else:\n plt.title(\"layers: {}, learning rate: {}\".format(layers, learning_rate), size='small', ha='center')\n plt.figtext(0.77, 0.35, \"Training accuracy\\n{0:.2f}%\".format(training_accuracy), size='medium')\n plt.figtext(0.77, 0.25, \"Test accuracy\\n{0:.2f}%\".format(test_accuracy), size='medium')\n plt.figtext(0.77, 0.15, \"Validation accuracy\\n{0:.2f}%\".format(val_accuracy), size='medium')\n if n_neighbours == 0:\n plt.figtext(0.77, 0.80, \"Neighbours\\nexcluded\", size='medium')\n else:\n plt.figtext(0.77, 0.80, \"{} neighbours\\nincluded\".format(n_neighbours), size='medium')\n plt.figtext(0.77, 0.70, \"{}\\nsamples\".format(data_size))\n plt.legend(loc='right', bbox_to_anchor=(1.39, 0.5))\n plt.subplots_adjust(right=0.75)\n working_dir = os.path.dirname(os.path.abspath(__file__))\n saving(working_dir + \"/output_ANN/loss_plots/{}_error_{}\".format(n_neighbours, data_size))", "def loss(self, X, y=None):\n scores = None\n ############################################################################\n # TODO: Implement the forward pass for the two-layer net, computing the #\n # class scores for X and storing them in the scores variable. #\n ############################################################################\n hid1, hid1cache = affine_relu_forward(X, self.params['W1'], self.params['b1'])\n scores, scorecache = affine_forward(hid1, self.params['W2'], self.params['b2'])\n ############################################################################\n # END OF YOUR CODE #\n ############################################################################\n\n # If y is None then we are in test mode so just return scores\n if y is None:\n return scores\n\n loss, grads = 0, {}\n ############################################################################\n # TODO: Implement the backward pass for the two-layer net. Store the loss #\n # in the loss variable and gradients in the grads dictionary. Compute data #\n # loss using softmax, and make sure that grads[k] holds the gradients for #\n # self.params[k]. Don't forget to add L2 regularization! #\n # #\n # NOTE: To ensure that your implementation matches ours and you pass the #\n # automated tests, make sure that your L2 regularization includes a factor #\n # of 0.5 to simplify the expression for the gradient. #\n ############################################################################\n loss, dscores = softmax_loss(scores, y)\n loss += 0.5 * self.reg *( np.sum(self.params['W1']**2) + np.sum(self.params['W2']**2) )\n\n dhid1, grads['W2'], grads['b2'] = affine_backward(dscores, scorecache)\n dx, grads['W1'], grads['b1'] = affine_relu_backward(dhid1, hid1cache)\n\n grads['W1'] += self.reg * self.params['W1']\n grads['W2'] += self.reg * self.params['W2']\n ############################################################################\n # END OF YOUR CODE #\n ############################################################################\n\n return loss, grads", "def loss(self, input, reconstruction, mu, log_var):\n reconstruction_loss = F.mse_loss(reconstruction, input)\n\n regularisation_loss = - 0.5 * torch.sum(1 + log_var - torch.exp(log_var) - torch.square(mu), 1)\n regularisation_loss = torch.mean(regularisation_loss)\n\n beta = self.beta0 + (self.beta1 - self.beta0) * self.epoch / self.epochs\n\n loss = reconstruction_loss + beta * regularisation_loss\n\n return {'loss': loss, \n 'reconstruction_loss': reconstruction_loss,\n 'regularisation_loss': regularisation_loss,\n 'regularisation_loss_beta': beta * regularisation_loss,\n 'beta': beta}", "def geo_loss_interface(pred_odo):\n def geo_loss_det(y_true, y_pred):\n odo_pose = pred_odo[:, 0:3]\n odo_orien = pred_odo[:, 3:]\n geo_pose = 0\n print('In Construction')\n return geo_loss_det", "def get_loss(self, x, y):\n \"*** YOUR CODE HERE ***\"\n #make your predictions using run\n #compute loss nn.squareloss\n y_pred = self.run(x)\n return nn.SquareLoss(y_pred,y)", "def tv_loss(input: th.Tensor):\n input = tf.pad(input, (0, 1, 0, 1), \"replicate\")\n x_diff = input[..., :-1, 1:] - input[..., :-1, :-1]\n y_diff = input[..., 1:, :-1] - input[..., :-1, :-1]\n return (x_diff ** 2 + y_diff ** 2).mean([1, 2, 3])", "def create_metric(self) -> 'LossMetric':\n raise NotImplementedError()", "def loss(self, x, y):\n return x", "def loss(val0, val1):\n return 0.5 * tf.reduce_sum(tf.squared_difference(val0, val1), axis=1)", "def calculate_loss(self, pred, gold, smoothing=False):\n gold = gold.contiguous().view(-1)\n if smoothing:\n epsilon = 0.1\n n_class = pred.size(1)\n one_hot = torch.zeros_like(pred).scatter(1, gold.view(-1, 1), 1)\n one_hot = one_hot * (1 - epsilon) + \\\n (1 - one_hot) * epsilon / (n_class - 1)\n\n log_prb = F.log_softmax(pred, dim=1)\n # create non-padding mask with torch.ne()\n non_pad_mask = gold.ne(self.constants.PAD)\n loss = -(one_hot * log_prb).sum(dim=1)\n # losses are averaged later\n loss = loss.masked_select(non_pad_mask).sum()\n else:\n loss = F.cross_entropy(\n pred, gold, ignore_index=self.constants.PAD, reduction='sum')\n return loss", "def loss(self, T, mode):\n self.circuit.loss(T, self._remap_modes(mode))", "def loss_func(pred, conv, label, bboxes, num_classes, train_input_size, iou_loss_threshold):\n \"\"\" giou replaces l2 norm losses of x, y, w, h as an improvement from original yolo_v3 \"\"\"\n \n # obtain number of classes\n num_classes = num_classes\n \n # obtain shape of raw yolo_v3 output (pre-decode)\n conv_shape = tf.shape(conv)\n \n # obtain batch size of raw yolo_v3 output (pre-decode)\n batch_size = conv_shape[0]\n \n # obtain output size of raw yolo_v3 output (pre-decode)\n output_size = conv_shape[1]\n \n # obtain train input size\n train_input_size = tf.cast(train_input_size, tf.float32)\n \n # reshape raw conv output \n conv = tf.reshape(conv, (batch_size, output_size, output_size, 3, 5 + num_classes))\n \n # obtain objectiveness scores and class probabilites for batch from raw conv output\n conv_raw_objectiveness = conv[:, :, :, :, 4:5]\n conv_raw_prob = conv[:, :, :, :, 5:]\n \n # obtain predicted x, y, w, h and objectiveness scores for batch based on train_input_size post decode\n pred_xywh = pred[:, :, :, :, 0:4]\n pred_conf = pred[:, :, :, :, 4:5]\n \n # obtain label x, y, w, h and objectiveness scores for batch based on train_input_size\n label_xywh = label[:, :, :, :, 0:4]\n respond_bbox = label[:, :, :, :, 4:5]\n label_prob = label[:, :, :, :, 5:]\n \n # obtain giou between predictions and labels \n giou = tf.expand_dims(bbox_giou(pred_xywh, label_xywh), axis = -1)\n\n # loss factor that gives higher weight to smaller boxes \n bbox_loss_scale = 2.0 - 1.0 * label_xywh[:, :, :, :, 2:3] * label_xywh[:, :, :, :, 3:4] / (train_input_size ** 2)\n \n # obtain giou loss \n giou_loss = respond_bbox * bbox_loss_scale * (1 - giou)\n \n # obtain iou between predictions and labels \n iou = bbox_iou(pred_xywh[:, :, :, :, np.newaxis, :], bboxes[:, np.newaxis, np.newaxis, np.newaxis, :, :])\n \n # find the value of iou with the largest prediction box\n max_iou = tf.reduce_max(iou, axis = -1, keepdims = True)\n\n # if the largest iou is less than the threshold, it is considered that the prediction box contains no objects, \n # then the background box\n respond_bgd = (1.0 - respond_bbox) * tf.cast(max_iou < iou_loss_threshold, tf.float32)\n \n # focal factor on objectiveness loss \n conf_focal = tf.pow(respond_bbox - pred_conf, 2)\n\n # calculate the objectiveness loss \n # we hope that if the grid contains objects, then the network output prediction box has a confidence of 1 and 0 \n # when there is no object.\n conf_loss = conf_focal * (respond_bbox + respond_bgd) * \\\n tf.nn.sigmoid_cross_entropy_with_logits(labels = respond_bbox, logits = conv_raw_objectiveness)\n \n # class probabilities loss\n prob_loss = respond_bbox * tf.nn.sigmoid_cross_entropy_with_logits(labels = label_prob, logits = conv_raw_prob)\n \n # sum up losses and take mean accross batch\n giou_loss = tf.reduce_mean(tf.reduce_sum(giou_loss, axis = [1,2,3,4]))\n conf_loss = tf.reduce_mean(tf.reduce_sum(conf_loss, axis = [1,2,3,4]))\n prob_loss = tf.reduce_mean(tf.reduce_sum(prob_loss, axis = [1,2,3,4]))\n \n if np.isnan(giou_loss):\n \n giou_loss = tf.Variable(0, trainable = False, dtype = tf.float32)\n \n return giou_loss, conf_loss, prob_loss", "def vae_loss_function_factory(reduction='mean'):\n def vae_loss_function(outputs, targets, mean, std_dev):\n outputs_flat = outputs.view(-1, 28 * 28)\n targets_flat = targets.view(-1, 28 * 28)\n if reduction == 'mean':\n image_loss = torch.mean((outputs_flat - targets_flat).pow(2).sum(dim=1))\n latent_loss = -0.5 * torch.mean((1 + 2 * std_dev - mean.pow(2) - torch.exp(2 * std_dev)).sum(dim=1))\n elif reduction == 'sum':\n image_loss = torch.sum((outputs_flat - targets_flat).pow(2).sum(dim=1))\n latent_loss = -0.5 * torch.sum((1 + 2 * std_dev - mean.pow(2) - torch.exp(2 * std_dev)).sum(dim=1))\n elif reduction == 'none':\n image_loss = (outputs_flat - targets_flat).pow(2).sum(dim=1)\n latent_loss = -0.5 * (1 + 2 * std_dev - mean.pow(2) - torch.exp(2 * std_dev)).sum(dim=1)\n else:\n raise NotImplementedError('Reduction ' + reduction + ' not implemented.')\n return image_loss + latent_loss\n return vae_loss_function", "def _get_loss(self):\n raise NotImplementedError", "def get_loss(self, x, y):\n \"*** YOUR CODE HERE ***\"\n predictedY = self.run(x)\n return nn.SquareLoss(predictedY, y)", "def compute_loss(self, model, inputs, return_outputs=False):\n if self.label_smoother is not None and \"labels\" in inputs:\n labels = inputs.pop(\"labels\")\n else:\n labels = None\n outputs = model(**inputs)\n\n if self.args.past_index >= 0:\n self._past = outputs[self.args.past_index]\n\n if labels is not None:\n loss = self.label_smoother(outputs, labels)\n else:\n loss = outputs[\"loss\"] if isinstance(outputs, dict) else outputs[0]\n\n return (loss, outputs) if return_outputs else loss", "def loss(self, prediction_dict, groundtruth_lists):\r\n pass", "def loss(self, z1_rec, z2_con_z1_rec):\n pass", "def update_network(self, loss_dict):\r\n loss = sum(loss_dict.values())\r\n self.optimizer.zero_grad()\r\n loss.backward()\r\n self.optimizer.step()" ]
[ "0.70169455", "0.6844153", "0.6804448", "0.6769743", "0.66364557", "0.6544467", "0.6499708", "0.64170283", "0.6415858", "0.64099944", "0.6390037", "0.63685757", "0.6367827", "0.6360576", "0.6336654", "0.6305561", "0.6284038", "0.6282407", "0.6270986", "0.62703526", "0.6265863", "0.62592566", "0.6254272", "0.6237803", "0.6226145", "0.62236613", "0.620478", "0.61998236", "0.61816925", "0.6181011", "0.6171648", "0.6155519", "0.6150142", "0.6148968", "0.61387765", "0.612997", "0.6118568", "0.61173546", "0.6107259", "0.6107259", "0.6105883", "0.61057514", "0.6101817", "0.61018157", "0.6100278", "0.6098093", "0.6097182", "0.60939795", "0.60921097", "0.6089354", "0.608733", "0.6077302", "0.6067202", "0.60317534", "0.60184395", "0.6001588", "0.59968054", "0.5993935", "0.59903", "0.59846586", "0.5978102", "0.5964714", "0.5964714", "0.5950565", "0.59258556", "0.5922316", "0.59127736", "0.59044015", "0.58982986", "0.5896814", "0.58915997", "0.58900076", "0.5884423", "0.5873604", "0.5873545", "0.5867907", "0.58647406", "0.5862674", "0.58624864", "0.58564395", "0.5851197", "0.58470285", "0.5839756", "0.5835289", "0.5821977", "0.58192974", "0.5816661", "0.5813165", "0.58115274", "0.580759", "0.5806457", "0.5805161", "0.5798688", "0.57948774", "0.5794579", "0.5793226", "0.57908565", "0.5787291", "0.57840997", "0.578384", "0.5778565" ]
0.0
-1
create a OCSVM loss for network given in argument with rho=1 fixed
def compile_update_ocsvm_rho_fixed(nnet, inputs, targets): floatX = Cfg.floatX C = Cfg.C nu = Cfg.nu if len(nnet.all_layers) > 1: feature_layer = nnet.all_layers[-2] else: feature_layer = nnet.input_layer final_layer = nnet.ocsvm_layer trainable_params = lasagne.layers.get_all_params(final_layer, trainable=True) # Regularization Wsvm_penalty = T.sum(abs(final_layer.W) ** Cfg.pow) l2_penalty = get_l2_penalty(nnet, include_bias=Cfg.include_bias, pow=Cfg.pow) l2_penalty += Wsvm_penalty l2_penalty *= (1/C) # Backpropagation prediction = lasagne.layers.get_output(final_layer, inputs=inputs, deterministic=False) scores = T.ones_like(prediction) - prediction objective, train_acc = final_layer.objective(-scores, targets) # OC-SVM loss train_loss = T.cast(objective / (targets.shape[0] * nu), dtype='floatX') train_acc = T.cast(train_acc * 1. / targets.shape[0], dtype='floatX') train_obj = T.cast(floatX(0.5) * l2_penalty + train_loss, dtype='floatX') updates = get_updates(nnet, train_obj, trainable_params, solver=nnet.solver) nnet.backprop = theano.function([inputs, targets], [train_obj, train_acc], updates=updates) # Forwardpropagation test_prediction = lasagne.layers.get_output(final_layer, inputs=inputs, deterministic=True) test_scores = T.ones_like(prediction) - test_prediction objective, test_acc = final_layer.objective(-test_scores, targets) # Get network feature representation test_rep = lasagne.layers.get_output(feature_layer, inputs=inputs, deterministic=True) test_rep_norm = test_rep.norm(L=2, axis=1) test_ball_penalty = T.cast(0, dtype='floatX') test_l2_output = T.cast(0, dtype='floatX') # OC-SVM test loss test_loss = T.cast(objective / (targets.shape[0] * nu), dtype='floatX') test_acc = T.cast(test_acc * 1. / targets.shape[0], dtype='floatX') test_obj = T.cast(floatX(0.5) * l2_penalty + test_loss, dtype='floatX') nnet.forward = theano.function([inputs, targets], [test_obj, test_acc, -test_scores, floatX(0.5) * l2_penalty, floatX(0.5) * test_l2_output, test_rep, test_rep_norm, test_loss, floatX(0.5) * test_ball_penalty])
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _create_loss(self):\n\n with tf.name_scope(\"loss\"):\n \n # gini=(tf.nn.l2_loss( self.score))/100000\n gini = tf.losses.softmax_cross_entropy(self.score, 0*self.score)\n \n promo_prob=tf.reduce_sum(tf.multiply(self.score, self.cohort_weight),\n axis=1)\n inc_value = tf.reduce_mean(tf.multiply(promo_prob, self.value))- self.control_value\n inc_cost = tf.reduce_mean( tf.multiply(promo_prob, self.cost)) - self.control_cost\n \n\n\n # determine loss function based on self.obj_rule\n if self.obj_rule == 'cpiv':\n self.objective = inc_cost / inc_value\n\n elif self.obj_rule == 'ivc':\n # maximize ivc\n self.objective = - inc_value / inc_cost\n\n elif self.obj_rule == 'lagrangian':\n assert self.shadow is not None, 'Need to pass in shadow value if use lagrangian as obj_rule.'\n self.objective = inc_cost - self.shadow * inc_value\n\n elif self.obj_rule == 'value':\n # maximize delta values\n self.objective = - inc_value\n\n # use only cost as objective\n elif self.obj_rule == 'cost':\n # maximize delta cost\n self.objective = - inc_cost\n\n else:\n raise Exception('Invalid obj_rule!')\n\n # regularization\n reg_loss = tf.reduce_sum(tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES))\n # weights = tf.trainable_variables() # all vars of your graph\n # reg_loss = tf.norm( weights,ord=1)\n\n # final loss\n self.loss = self.objective +reg_loss+.1*gini", "def compute_loss(self, obs, returns):", "def loss_creator(config):\n return torch.nn.BCELoss()", "def heteroscedastic_loss(network, params, x):\n\n pred_mean, pred_var = network(x)\n logvar = tf.reduce_sum(0.5 * tf.math.log(pred_var), axis=-1)\n squared_error = tf.reduce_sum(0.5 * tf.math.square(params - pred_mean) / pred_var, axis=-1)\n loss = tf.reduce_mean(squared_error + logvar)\n return loss", "def compile_update_svdd(nnet, inputs, targets):\n\n floatX = Cfg.floatX\n B = Cfg.B\n C = Cfg.C\n nu = Cfg.nu\n\n # initialize R\n if nnet.R_init > 0:\n nnet.Rvar = shared(floatX(nnet.R_init), name=\"R\")\n else:\n nnet.Rvar = shared(floatX(1), name=\"R\") # initialization with R=1\n\n # Loss\n feature_layer = nnet.all_layers[-1]\n rep = lasagne.layers.get_output(feature_layer, inputs=inputs,\n deterministic=False)\n\n # initialize c (0.5 in every feature representation dimension)\n rep_dim = feature_layer.num_units\n # nnet.cvar = shared(floatX(np.ones(rep_dim) * (1. / (rep_dim ** 0.5))),\n # name=\"c\")\n nnet.cvar = shared(floatX(np.ones(rep_dim) * 0.5), name=\"c\")\n\n dist = T.sum(((rep - nnet.cvar.dimshuffle('x', 0)) ** 2),\n axis=1, dtype='floatX')\n scores = dist - nnet.Rvar\n stack = T.stack([T.zeros_like(scores), scores], axis=1)\n loss = T.cast(T.sum(T.max(stack, axis=1)) / (inputs.shape[0] * nu),\n dtype='floatX')\n\n y_pred = T.argmax(stack, axis=1)\n acc = T.cast((T.sum(T.eq(y_pred.flatten(), targets), dtype='int32')\n * 1. / targets.shape[0]), 'floatX')\n\n # Network weight decay\n if Cfg.weight_decay:\n l2_penalty = (1/C) * get_l2_penalty(nnet,\n include_bias=Cfg.include_bias,\n pow=Cfg.pow)\n else:\n l2_penalty = T.cast(0, dtype='floatX')\n\n # Network activation sparsity regularization\n if Cfg.sparsity_penalty:\n sparsity_penalty = (1/B) * get_sparsity_penalty(nnet, inputs,\n Cfg.sparsity,\n mode=Cfg.sparsity_mode,\n deterministic=False)\n else:\n sparsity_penalty = T.cast(0, dtype='floatX')\n\n # Backpropagation (hard-margin: only minimizing everything to a ball\n # centered at c)\n trainable_params = lasagne.layers.get_all_params(feature_layer,\n trainable=True)\n if Cfg.gaussian_blob:\n avg_dist = T.mean(1-T.exp(-dist), dtype=\"floatX\")\n else:\n avg_dist = T.mean(dist, dtype=\"floatX\")\n obj_ball = T.cast(floatX(0.5) * l2_penalty + avg_dist + sparsity_penalty,\n dtype='floatX')\n updates_ball = get_updates(nnet, obj_ball, trainable_params,\n solver=nnet.solver)\n nnet.backprop_ball = theano.function([inputs, targets], [obj_ball, acc],\n updates=updates_ball)\n\n # Backpropagation (without training R)\n obj = T.cast(floatX(0.5) * l2_penalty + nnet.Rvar + loss + sparsity_penalty,\n dtype='floatX')\n updates = get_updates(nnet, obj, trainable_params, solver=nnet.solver)\n nnet.backprop_without_R = theano.function([inputs, targets], [obj, acc],\n updates=updates)\n\n # Backpropagation (with training R)\n trainable_params.append(nnet.Rvar) # add radius R to trainable parameters\n updates = get_updates(nnet, obj, trainable_params, solver=nnet.solver)\n nnet.backprop = theano.function([inputs, targets], [obj, acc],\n updates=updates)\n\n\n # Forwardpropagation\n test_rep = lasagne.layers.get_output(feature_layer, inputs=inputs,\n deterministic=True)\n test_rep_norm = test_rep.norm(L=2, axis=1)\n\n test_dist = T.sum(((test_rep - nnet.cvar.dimshuffle('x', 0)) ** 2),\n axis=1, dtype='floatX')\n\n test_scores = test_dist - nnet.Rvar\n test_stack = T.stack([T.zeros_like(test_scores), test_scores], axis=1)\n test_loss = T.cast(T.sum(T.max(test_stack, axis=1)) / (inputs.shape[0]*nu),\n dtype='floatX')\n\n test_y_pred = T.argmax(test_stack, axis=1)\n test_acc = T.cast((T.sum(T.eq(test_y_pred.flatten(), targets),\n dtype='int32')\n * 1. / targets.shape[0]), dtype='floatX')\n\n # Network activation sparsity regularization (with determinisitc=True)\n if Cfg.sparsity_penalty:\n test_sparsity_penalty = ((1 / B) *\n get_sparsity_penalty(nnet, inputs,\n Cfg.sparsity,\n mode=Cfg.sparsity_mode,\n deterministic=True))\n else:\n test_sparsity_penalty = T.cast(0, dtype='floatX')\n\n test_obj = T.cast(floatX(0.5) * l2_penalty + nnet.Rvar + test_loss\n + test_sparsity_penalty, dtype='floatX')\n nnet.forward = theano.function([inputs, targets],\n [test_obj, test_acc, test_scores,\n floatX(0.5) * l2_penalty,\n test_sparsity_penalty, test_rep,\n test_rep_norm, test_loss, nnet.Rvar])", "def loss_fun(para):\n\n return -data_processing(my_cir(para))", "def compute_loss(self):", "def loss_fn(self, targets, outputs, model):", "def funcs(dataset, network, batch_size=BATCH_SIZE, learning_rate=LEARNING_RATE, sparsity=0.02, beta=0.5, momentum=MOMENTUM):\n\n # symbolic variables \n X_batch = T.tensor4()\n y_batch = T.tensor4()\n\n layers = lasagne.layers.get_all_layers(network)\n num_layers = len(layers)\n print(num_layers)\n\n code_layer = layers[num_layers/2]\n\n # code output \n code_output = lasagne.layers.get_output(code_layer, X_batch, deterministic=True)\n rho_hat = T.mean(code_output,axis=1)\n # L = T.sum(sparsity * T.log(sparsity/rho_hat) + (1 - sparsity) * T.log((1 - sparsity)/(1 - rho_hat)))\n l = T.sub(1,code_output)\n ll = T.mul(code_output,l)\n L = T.mul(4,ll)\n L = L.mean()\n\n\n # reg = 0.0001*lasagne.regularization.l2(network)\n # this is the cost of the network when fed throught the noisey network\n train_output = lasagne.layers.get_output(network, X_batch)\n cost = lasagne.objectives.mse(train_output, y_batch) \n cost = cost.mean() + beta * L\n # validation cost\n valid_output = lasagne.layers.get_output(network, X_batch)\n valid_cost = lasagne.objectives.mse(valid_output, y_batch) \n valid_cost = valid_cost.mean() \n\n # test the performance of the netowork without noise\n pred = lasagne.layers.get_output(network, X_batch, deterministic=True)\n # pred = T.argmax(test, axis=1)\n accuracy = 1 - T.mean(lasagne.objectives.mse(pred, y_batch), dtype=theano.config.floatX)\n\n all_params = lasagne.layers.get_all_params(network)\n updates = lasagne.updates.nesterov_momentum(cost, all_params, learning_rate, momentum)\n\n train = theano.function(inputs=[X_batch, y_batch], outputs=cost, updates=updates, allow_input_downcast=True)\n valid = theano.function(inputs=[X_batch, y_batch], outputs=valid_cost, allow_input_downcast=True)\n predict = theano.function(inputs=[X_batch], outputs=pred, allow_input_downcast=True)\n accuracy = theano.function(inputs=[X_batch,y_batch], outputs=accuracy, allow_input_downcast=True)\n code = theano.function(inputs=[X_batch], outputs=code_output, allow_input_downcast=True)\n\n return dict(\n train=train,\n valid=valid,\n predict=predict,\n accuracy=accuracy,\n code=code\n )", "def model_loss(inp, fake, real_label, fake_label):\n \n \n Dreal,realcls,R1 = gradpen(inp)\n [Dfake,fakecls] = D(fake)\n # 1. Adversarial loss\n \n glabel = tf.ones_like(Dfake)#tf.random.uniform((Dfake.shape), 1-LN, 1)\n dlabelr = tf.ones_like(Dreal)#tf.random.uniform((Dreal.shape), 1-LN, 1)\n dlabelf = tf.zeros_like(Dfake)#tf.random.uniform((Dfake.shape), 0, LN)\n \n \n \n # D has no sigmoid activation: \"from_logits=True\"\n real_loss = tf.keras.losses.binary_crossentropy(\n dlabelr, Dreal, from_logits=True)\n real_loss = tf.reduce_mean(real_loss)\n \n fake_loss = tf.keras.losses.binary_crossentropy(\n dlabelf, Dfake, from_logits=True)\n fake_loss = tf.reduce_mean(fake_loss)\n \n Dadv = 0.5*(real_loss+fake_loss)\n \n Gadv = tf.keras.losses.binary_crossentropy(\n glabel, Dfake, from_logits=True)\n Gadv = tf.reduce_mean(Gadv)\n \n # 2. Classification loss\n \n Dcls = tf.keras.losses.binary_crossentropy(real_label, realcls, from_logits=True)\n Dcls = tf.reduce_mean(Dcls)\n \n Gcls = tf.keras.losses.binary_crossentropy(fake_label, fakecls, from_logits=True)\n Gcls = tf.reduce_mean(Gcls)\n \n # 3. Total loss\n \n Dloss = Dadv + (GAMMA/2)*R1 + LAMBDA_CLS*Dcls\n \n Gloss = Gadv + LAMBDA_CLS*Gcls\n \n return (Dloss, Dadv, Dcls, R1), (Gloss, Gadv, Gcls)", "def model_loss(self,input_real,input_z,out_channel_dim):\t\r\n label_smooth = 0.9 \r\n \r\n #get output of generator\r\n gen_img, gen_logits = self.generator(input_z,out_channel_dim,True)\r\n\r\n\t#pass real image to dicriminator\r\n disc_model_real, disc_logits_real = self.discriminator(input_real)\r\n\t\r\n\t#pass generated image to dicriminator\r\n disc_model_fake, disc_logits_fake = self.discriminator(gen_img,reuse=True)\r\n \r\n\t \t\r\n disc_loss_real = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits=disc_logits_real,labels=label_smooth*tf.ones_like(disc_model_real))) \r\n disc_loss_fake = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits=disc_logits_fake,labels=tf.zeros_like(disc_model_fake)))\r\n \r\n\r\n\t\"\"\"\r\n\tLoss for discriminator is sum of loss for real image and fake image \r\n\t\"\"\"\t\r\n disc_loss = disc_loss_real + disc_loss_fake\r\n \r\n\r\n \"\"\"\r\n\tTo find loss for generator, fake image is passed with label= real (0.9)\r\n\t\"\"\"\r\n gen_loss = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits=disc_logits_fake,labels=label_smooth*tf.ones_like(disc_model_fake)))\r\n \r\n return disc_loss,gen_loss,gen_img", "def __init__(self, S=7, B=2, C=20): \n super().__init__()\n self.mse = nn.MSELoss(reduction=\"sum\")\n self.S = S\n self.B = B\n self.C = C\n self.l_noobl = 0.5\n self.l_coord = 5", "def geo_loss_interface(pred_odo):\n def geo_loss_det(y_true, y_pred):\n odo_pose = pred_odo[:, 0:3]\n odo_orien = pred_odo[:, 3:]\n geo_pose = 0\n print('In Construction')\n return geo_loss_det", "def funcs(dataset, network, batch_size=BATCH_SIZE, learning_rate=LEARNING_RATE, sparsity=0.02, beta=0.01, momentum=MOMENTUM):\n\n # symbolic variables \n X_batch = T.matrix()\n y_batch = T.matrix()\n\n layers = lasagne.layers.get_all_layers(network)\n num_layers = len(layers)\n\n code_layer = layers[num_layers/2]\n activations_2_layer = layers[num_layers/2 - 1]\n activations_1_layer = layers[num_layers/2 - 2]\n\n # code output \n code_output = lasagne.layers.get_output(code_layer, X_batch, deterministic=True)\n\n l = T.sub(1,code_output)\n ll = T.mul(code_output,l)\n L = T.mul(4,ll)\n L = L.mean()\n\n rho_hat = T.mean(code_output,axis=1)\n # L = T.sum(sparsity * T.log(sparsity/rho_hat) + (1 - sparsity) * T.log((1 - sparsity)/(1 - rho_hat)))\n\n # reg = 0.0001*lasagne.regularization.l2(network)\n # this is the cost of the network when fed throught the noisey network\n train_output = lasagne.layers.get_output(network, X_batch)\n cost = lasagne.objectives.mse(train_output, y_batch) \n cost = cost.mean() + beta * L\n\n all_params = lasagne.layers.get_all_params(network)\n updates = lasagne.updates.nesterov_momentum(cost, all_params, learning_rate, momentum)\n\n \n\n # code and activation outputs\n \n activations_1_output = lasagne.layers.get_output(activations_1_layer, X_batch, deterministic=True)\n activations_2_output = lasagne.layers.get_output(activations_2_layer, X_batch, deterministic=True)\n\n train = theano.function(inputs=[X_batch, y_batch], outputs=cost, updates=updates, allow_input_downcast=True)\n code = theano.function(inputs=[X_batch], outputs=code_output, allow_input_downcast=True)\n activations_1 = theano.function(inputs=[X_batch], outputs=activations_1_output, allow_input_downcast=True)\n activations_2 = theano.function(inputs=[X_batch], outputs=activations_2_output, allow_input_downcast=True)\n\n return dict(\n train=train,\n code=code,\n activations_1=activations_1,\n activations_2=activations_2\n )", "def loss_fn(outputs, labels):\n #print('this is outputs', outputs.shape) # 2,3,128,128\n #print('this is labels', labels.shape) # 2,3,128,128\n N, C, H, W = outputs.shape\n \n# outputs = unnormalize(outputs, mean=[0.51371954, 0.40949144, 0.35572536], std= [0.2926419, 0.26180502, 0.25512055])\n # check if we normalize label images #labels = unnormalize(labels, mean=[0.53459634,0.39673596,0.33788489], std= [0.29101071,0.26140346,0.25485687])\n \n mse_loss = torch.sum((outputs - labels) ** 2) / N / C # each photo, each channel\n mse_loss *= 255 * 255\n mse_loss /= H * W \n # average loss on each pixel(0-255)\n return mse_loss", "def __rho2v(self, vm, beta, rhoc, w, rho):\n if rho < 0:\n return float(vm)\n elif rho <= rhoc:\n return float(vm - vm * rho / beta)\n else:\n rhom = rhoc - (vm * rhoc - vm * (rhoc ** 2) / beta) / w\n # print('rho {0}; rhoc {1}'.format(rho, rhoc))\n return float(w * (rho - rhom) / rho)", "def define_loss_variables(n, sns, c):\n if n.df(c).empty or c not in n.passive_branch_components:\n return\n\n active = get_activity_mask(n, c, sns) if n._multi_invest else None\n coords = [sns, n.df(c).index.rename(c)]\n n.model.add_variables(0, coords=coords, name=f\"{c}-loss\", mask=active)", "def loss(L, S, A, R, Y, alpha, beta, gamma, delta):\n return alpha*load_imbalance(Y,L,A) + beta*constraints(Y,A) + gamma*transfers(Y,R,S) + delta*choice(Y)", "def loss_fn(self, recons, inputs, mu, log_var, **kwargs):\n# kld_weight = kwargs['M_N'] # Account for the minibatch samples from the dataset\n recons_loss = F.mse_loss(recons, inputs)\n# recons_loss = F.binary_cross_entropy(recons, inputs)\n KLD = torch.mean(-0.5 * torch.sum(1 + log_var - mu**2 - log_var.exp(), dim=1), dim=0)\n loss = recons_loss - KLD\n return loss, recons_loss, KLD", "def _define_loss(self):\n\n cost = []\n unit_cost = []\n for nn in range(len(self.ffnet_out)):\n data_out = self.data_out_batch[nn]\n if self.filter_data:\n # this will zero out predictions where there is no data,\n # matching Robs here\n pred = tf.multiply(\n self.networks[self.ffnet_out[nn]].layers[-1].outputs,\n self.data_filter_batch[nn])\n else:\n pred = self.networks[self.ffnet_out[nn]].layers[-1].outputs\n\n nt = tf.cast(tf.shape(pred)[0], tf.float32)\n # define cost function\n if self.noise_dist == 'gaussian':\n with tf.name_scope('gaussian_loss'):\n cost.append(tf.nn.l2_loss(data_out - pred) / nt)\n unit_cost.append(tf.reduce_mean(tf.square(data_out-pred), axis=0))\n\n elif self.noise_dist == 'poisson':\n with tf.name_scope('poisson_loss'):\n\n if self.poisson_unit_norm is not None:\n # normalize based on rate * time (number of spikes)\n cost_norm = tf.multiply(self.poisson_unit_norm[nn], nt)\n else:\n cost_norm = nt\n\n cost.append(-tf.reduce_sum(tf.divide(\n tf.multiply(data_out, tf.log(self._log_min + pred)) - pred,\n cost_norm)))\n\n unit_cost.append(-tf.divide(\n tf.reduce_sum(\n tf.multiply(\n data_out, tf.log(self._log_min + pred)) - pred, axis=0),\n cost_norm))\n\n elif self.noise_dist == 'bernoulli':\n with tf.name_scope('bernoulli_loss'):\n # Check per-cell normalization with cross-entropy\n # cost_norm = tf.maximum(\n # tf.reduce_sum(data_out, axis=0), 1)\n cost.append(tf.reduce_mean(\n tf.nn.sigmoid_cross_entropy_with_logits(\n labels=data_out, logits=pred)))\n unit_cost.append(tf.reduce_mean(\n tf.nn.sigmoid_cross_entropy_with_logits(\n labels=data_out, logits=pred), axis=0))\n else:\n TypeError('Cost function not supported.')\n\n self.cost = tf.add_n(cost)\n self.unit_cost = unit_cost\n\n # Add regularization penalties\n reg_costs = []\n with tf.name_scope('regularization'):\n for nn in range(self.num_networks):\n reg_costs.append(self.networks[nn].define_regularization_loss())\n self.cost_reg = tf.add_n(reg_costs)\n\n self.cost_penalized = tf.add(self.cost, self.cost_reg)\n\n # save summary of cost\n # with tf.variable_scope('summaries'):\n tf.summary.scalar('cost', self.cost)\n tf.summary.scalar('cost_penalized', self.cost_penalized)\n tf.summary.scalar('reg_pen', self.cost_reg)", "def objective(rp,n=5000,C=-2*10**11,a=300,b=1):\n l = log(rp)/n\n r = exp(l)\n rm1 = r-1\n return (rp-1)*((a-b*n)*rm1 + 1) - C*(rm1)*(rm1)\n #return rm1", "def meta_amortized_loss(network, model_indices, params, sim_data):\n\n out_inference, out_evidence = network(model_indices, params, sim_data)\n if out_inference is not None:\n z, log_det_J = out_inference\n kl_loss = tf.reduce_mean(0.5 * tf.square(tf.norm(z, axis=-1)) - log_det_J)\n else:\n kl_loss = 0\n \n if out_evidence is not None:\n model_probs = out_evidence\n model_probs = tf.clip_by_value(model_probs, 1e-15, 1 - 1e-15)\n log_loss = -tf.reduce_mean(tf.reduce_sum(model_indices * tf.math.log(model_probs), axis=1))\n else:\n log_loss = 0\n return kl_loss + log_loss", "def compute_loss(theta_vector, *args):\n\n psi = args[0]\n circ_depth = args[1]\n num_qbits = args[2]\n theta = np.reshape(theta_vector, (circ_depth, num_qbits))\n\n fidelity = get_fidelity(theta, psi)\n loss = get_loss(fidelity)\n return loss", "def loss(self, **kwargs):\n pass", "def loss(posterior, pars_to_penalize, c_rim):\n marginal = posterior.mean(axis=0)\n cond_entropy = misc.cat_entropy(posterior).mean()\n entropy = misc.cat_entropy(marginal.dimshuffle('x', 0)).sum()\n\n nmi = cond_entropy - entropy\n\n n_samples = posterior.shape[0]\n penalties = [(i ** 2).sum() / n_samples for i in pars_to_penalize]\n penalty = sum(penalties)\n\n loss = nmi + c_rim * penalty\n\n return get_named_variables(locals())", "def genLoss(self, *data):\r\n _, (x_unlab, _) = data\r\n z = self.getInputNoise(self.hypers['ul_BS'])\r\n fake_logits = self.D(self.G(z))\r\n g_losses = -1*logOneMinusSoftmax(fake_logits)[:,self.D.numClasses-1]\r\n return torch.mean(g_losses)", "def compute_loss(self, features, mode, params, precomputed):\n raise NotImplementedError(\"Model does not implement loss.\")", "def compile_update_ocsvm(nnet, inputs, targets):\n\n floatX = Cfg.floatX\n C = Cfg.C\n A = Cfg.A\n nu = Cfg.nu\n\n if len(nnet.all_layers) > 1:\n feature_layer = nnet.all_layers[-2]\n else:\n feature_layer = nnet.input_layer\n final_layer = nnet.ocsvm_layer\n trainable_params = lasagne.layers.get_all_params(final_layer,\n trainable=True)\n\n # Regularization (up to feature map)\n if Cfg.weight_decay:\n if Cfg.prod_penalty:\n l2_penalty = (1/C) * get_prod_penalty(nnet)\n elif Cfg.spec_penalty:\n l2_penalty = (1/C) * get_spectral_penalty(nnet, Cfg.include_bias)\n else:\n l2_penalty = ((1/C) * get_l2_penalty(nnet,\n include_bias=Cfg.include_bias,\n pow=Cfg.pow))\n else:\n l2_penalty = T.cast(0, dtype='floatX')\n\n # Bias offset\n if Cfg.bias_offset:\n bias_offset = get_bias_offset(nnet)\n else:\n bias_offset = T.cast(0, dtype='floatX')\n\n # Backpropagation\n prediction = lasagne.layers.get_output(final_layer, inputs=inputs,\n deterministic=False)\n objective, train_acc = final_layer.objective(prediction, targets)\n\n # Normalization\n rep = lasagne.layers.get_output(feature_layer, inputs=inputs,\n deterministic=False)\n rep_norm = rep.norm(L=2, axis=1).dimshuffle((0, 'x'))\n if Cfg.ball_penalty:\n ball_penalty, _ = final_layer.objective(\n T.ones_like(rep_norm) - (rep_norm ** 2), targets)\n else:\n ball_penalty = T.cast(0, dtype='floatX')\n ball_penalty = (1/A) * T.cast(ball_penalty / targets.shape[0],\n dtype='floatX')\n\n # Output regularization\n if Cfg.output_penalty:\n l2_output = (1/C) * (T.sum(abs(final_layer.W) ** Cfg.pow)\n * T.sum(abs(rep) ** 2))\n else:\n l2_output = T.cast(0, dtype='floatX')\n l2_output = T.cast(l2_output / targets.shape[0], dtype='floatX')\n\n # SVM parameter regularization\n if Cfg.Wsvm_penalty:\n Wsvm_penalty = T.sum(abs(final_layer.W) ** Cfg.pow)\n else:\n Wsvm_penalty = T.cast(0, dtype='floatX')\n\n # OC SVM loss has nu parameter and adds margin from origin to objective\n train_loss = T.cast(objective / (targets.shape[0] * nu), dtype='floatX')\n train_acc = T.cast(train_acc * 1. / targets.shape[0], dtype='floatX')\n train_obj = T.cast(floatX(0.5) * l2_penalty\n + floatX(0.5) * ball_penalty\n + floatX(0.5) * l2_output\n + floatX(0.5) * Wsvm_penalty\n + train_loss\n + T.sum(final_layer.b)\n + bias_offset, dtype='floatX')\n updates = get_updates(nnet, train_obj, trainable_params, solver=nnet.solver)\n nnet.backprop = theano.function([inputs, targets],\n [train_obj, train_acc],\n updates=updates)\n\n # Forwardpropagation\n test_prediction = lasagne.layers.get_output(final_layer, inputs=inputs,\n deterministic=True)\n # get network feature representation\n test_rep = lasagne.layers.get_output(feature_layer, inputs=inputs,\n deterministic=True)\n test_rep_norm = test_rep.norm(L=2, axis=1)\n if Cfg.ball_penalty:\n test_ball_penalty, _ = final_layer.objective(\n T.ones_like(test_rep_norm.dimshuffle((0, 'x')))\n - (test_rep_norm.dimshuffle((0, 'x')) ** 2), targets)\n else:\n test_ball_penalty = T.cast(0, dtype='floatX')\n test_ball_penalty = ((1/A) * T.cast(\n test_ball_penalty / targets.shape[0], dtype='floatX'))\n\n # Output regularization\n if Cfg.output_penalty:\n test_l2_output = (1/C) * (T.sum(abs(final_layer.W) ** Cfg.pow)\n * T.sum(abs(test_rep) ** 2))\n else:\n test_l2_output = T.cast(0, dtype='floatX')\n test_l2_output = T.cast(test_l2_output / targets.shape[0], dtype='floatX')\n\n objective, test_acc = final_layer.objective(test_prediction, targets)\n test_loss = T.cast(objective / (targets.shape[0] * nu), dtype='floatX')\n test_acc = T.cast(test_acc * 1. / targets.shape[0], dtype='floatX')\n test_obj = T.cast(floatX(0.5) * l2_penalty\n + floatX(0.5) * test_ball_penalty\n + floatX(0.5) * test_l2_output\n + floatX(0.5) * Wsvm_penalty\n + test_loss\n + T.sum(final_layer.b), dtype='floatX')\n nnet.forward = theano.function([inputs, targets],\n [test_obj, test_acc, test_prediction,\n floatX(0.5) * l2_penalty,\n floatX(0.5) * test_l2_output, test_rep,\n test_rep_norm, test_loss,\n floatX(0.5) * test_ball_penalty])", "def compute_loss(\n action_probs: tf.Tensor, values: tf.Tensor, returns: tf.Tensor\n) -> tf.Tensor:\n\n advantage = returns - values\n td = tf.subtract(returns, values)\n\n # actor\n # action_log_probs = tf.math.log(action_probs)\n # actor_loss = -tf.math.reduce_sum(action_log_probs * advantage)\n action_log_probs = tf.math.log(action_probs)\n actor_loss = -tf.math.reduce_mean(action_log_probs * td)\n\n # critic\n # td = tf.subtract(returns, values)\n # critic_loss = tf.reduce_mean(tf.square(td))\n critic_loss = huber_loss(values, returns)\n\n tf.print(\"a_loss:\", actor_loss, \"c_loss:\", critic_loss)\n\n return actor_loss + critic_loss", "def get_loss_fn():\n return reconstruction", "def _create_loss_op(self):\n # 1.) The reconstruction loss, which forces the NN towards reconstructing more accurately the\n # given input. This function is configurable, but usually it is the Bernoulli negative log-likelihood.\n if self.cost_function == 'abs':\n reconstr_loss = tf.reduce_sum(tf.abs(self.x_decoded - self.x_in), 1)\n elif self.cost_function in ('mse', 'l2', 'square'):\n reconstr_loss = tf.reduce_sum(tf.squared_difference(self.x_in, self.x_decoded), 1)\n elif self.cost_function in ('xentropy', 'log'):\n reconstr_loss = \\\n -tf.reduce_sum(self.x_in * tf.log(1e-10 + self.x_decoded)\n + (1 - self.x_in) * tf.log(1e-10 + 1 - self.x_decoded),\n 1)\n else:\n raise ValueError(self.cost_function, \"Unknown cost function name!\")\n\n # 2.) The latent loss, which is defined as the Kullback Leibler divergence\n ## between the distribution in latent space induced by the encoder on\n # the data and some prior. This acts as a kind of regularizer.\n # This can be interpreted as the number of \"nats\" required\n # for transmitting the the latent space distribution given\n # the prior.\n latent_loss = -0.5 * tf.reduce_sum(1. + self.z_log_sigma_sq\n - tf.square(self.z_mean)\n - tf.exp(self.z_log_sigma_sq), 1)\n\n self.loss_op = tf.reduce_mean(reconstr_loss + latent_loss) # average over batch\n tf.add_to_collection(\"losses\", self.loss_op)\n\n if self.learning_rate is not None:\n global_step = tf.train.get_or_create_global_step()\n self.train_op = tf.train.AdamOptimizer(learning_rate=self.learning_rate).minimize(\n self.loss_op,\n global_step=global_step,\n var_list=tf.get_collection(self.training_scope) if self.training_scope is not None else None)\n\n tf.add_to_collection(\"train_ops\", self.train_op)\n tf_logging.info(\"Added AdamOptimizer with learning rate: %.8f\" % self.learning_rate)\n\n tf.summary.scalar(\"latent_loss\", tf.reduce_mean(latent_loss))\n tf.summary.scalar(\"reconstruction_loss\", tf.reduce_mean(reconstr_loss))\n tf.summary.scalar(\"vae_loss\", self.loss_op)", "def get_loss(self):\r\n\r\n if F.loss_type==\"cosine\":\r\n self.losscos = r2d*tf.acos(1-tf.losses.cosine_distance(tf.nn.l2_normalize(self.labels,1), tf.nn.l2_normalize(self.out, 1), dim=1))\r\n self.loss = tf.losses.cosine_distance(tf.nn.l2_normalize(self.labels,1), tf.nn.l2_normalize(self.out, 1), dim=1)\r\n elif F.loss_type==\"mse2d\":\r\n xl, yl, zl = tf.split(self.labels, 3, axis=1)\r\n xo, yo, zo = tf.split(self.out, 3, axis=1)\r\n thetal, thetao = tf.asin(-yl), tf.asin(-yo)\r\n phil, phio = tf.atan2(-zl, -xl), tf.atan2(-zo, -xo)\r\n self.lb = tf.concat([thetal, phil], axis=1)\r\n self.ob = tf.concat([thetao, phio], axis=1)\r\n self.loss = tf.scalar_mul(tf.constant(r2d), tf.losses.mean_squared_error(self.lb, self.ob, 2))\r\n elif F.loss_type==\"mse3d\":\r\n self.loss = tf.losses.mean_squared_error(tf.nn.l2_normalize(self.labels, 0), tf.nn.l2_normalize(self.out, 0))", "def node_loss_construct(cfg, model_name='node_loss', **kwargs):\n losses = node_loss_dict()\n loss_cfg = cfg[model_name]\n name = loss_cfg.get('name', 'type')\n if not name in losses:\n raise Exception(\"Unknown node loss name provided:\", name)\n\n return losses[name](loss_cfg, **kwargs)", "def get_loss(self, x, y):\n \"*** YOUR CODE HERE question 2 ***\"\n return nn.SquareLoss(self.run(x), y)", "def loss_total(self, mask):\n\n def loss(y_true, y_pred):\n\n # Compute predicted image with non-hole pixels set to ground truth\n y_comp = mask * y_true + (1-mask) * y_pred\n\n # Compute the vgg features. \n if self.vgg_device:\n with tf.device(self.vgg_device):\n vgg_out = self.vgg(y_pred)\n vgg_gt = self.vgg(y_true)\n vgg_comp = self.vgg(y_comp)\n else:\n vgg_out = self.vgg(y_pred)\n vgg_gt = self.vgg(y_true)\n vgg_comp = self.vgg(y_comp)\n \n # Compute loss components\n l1 = self.loss_valid(mask, y_true, y_pred)\n l2 = self.loss_hole(mask, y_true, y_pred)\n l3 = self.loss_perceptual(vgg_out, vgg_gt, vgg_comp)\n l4 = self.loss_tv(mask, y_comp)\n l5 = - 0.5 * K.sum(1 + self.z_log_var -self.cl - K.square(self.z_mean)/K.exp(self.cl) - K.exp(self.z_log_var)/K.exp(self.cl))\n # Return loss function\n return l1 + 6*l2 + 0.05*l3 + 0.1*l4 +l5 \n return loss", "def loss(self, input, reconstruction, mu, log_var):\n reconstruction_loss = F.mse_loss(reconstruction, input)\n\n regularisation_loss = - 0.5 * torch.sum(1 + log_var - torch.exp(log_var) - torch.square(mu), 1)\n regularisation_loss = torch.mean(regularisation_loss)\n\n beta = self.beta0 + (self.beta1 - self.beta0) * self.epoch / self.epochs\n\n loss = reconstruction_loss + beta * regularisation_loss\n\n return {'loss': loss, \n 'reconstruction_loss': reconstruction_loss,\n 'regularisation_loss': regularisation_loss,\n 'regularisation_loss_beta': beta * regularisation_loss,\n 'beta': beta}", "def loss(self, T, mode):\n self.circuit.loss(T, mode)", "def _initLoss(self):\n\n return torch.nn.MSELoss()", "def compute_loss(self):\n self.prototypes = self.compute_prototypes()\n self.test_logits = self.compute_logits()\n loss = tf.nn.sparse_softmax_cross_entropy_with_logits(\n labels=self.episode.test_labels, logits=self.test_logits)\n cross_entropy_loss = tf.reduce_mean(loss)\n regularization = tf.reduce_sum(\n tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES))\n loss = cross_entropy_loss + self.weight_decay * regularization\n return loss", "def loss(self, X, y=None):\n\n # In dev testing, the loss fnc stops at \"scores\" , unfollowed by \"softmax\" probability prediction.\n # In real testing, \"self.predict()\" needs to be implemented in Solver() class.\n \n if y is None:\n for bn_param in self.bn_params:\n bn_param[\"mode\"] = \"test\"\n\n\n W1, b1 = self.params['W1'], self.params['b1']\n gamma1, beta1 = self.params[\"sbnGamma1\"], self.params[\"sbnBeta1\"]\n bn_param1 = self.bn_params[0]\n\n W2, b2 = self.params['W2'], self.params['b2']\n gamma2, beta2 = self.params[\"sbnGamma2\"], self.params[\"sbnBeta2\"]\n bn_param2 = self.bn_params[1]\n\n W3, b3 = self.params['W3'], self.params['b3']\n gamma3, beta3 = self.params[\"bnGamma3\"], self.params[\"bnBeta3\"]\n bn_param3 = self.bn_params[2]\n\n W4, b4 = self.params['W4'], self.params['b4']\n \n # pass conv_param to the forward pass for the convolutional layer\n conv_param = self.conv_param\n\n # pass pool_param to the forward pass for the max-pooling layer\n pool_param = self.maxpool_params\n\n ############################################################################\n # TODO: Implement the forward pass for the three-layer convolutional net, #\n # computing the class scores for X and storing them in the scores #\n # variable. #\n ############################################################################\n \n scores = None \n cache = {}\n # def conv_sbn_relu_forward(x, w, b, gamma, beta, conv_param, bn_param): return out, cache;\n out, cache[\"layer1\"] = layer_utils.conv_sbn_relu_forward(X, W1, b1, gamma1, beta1, conv_param, bn_param1) \n out, cache[\"layer2\"] = layer_utils.conv_sbn_relu_forward(out, W2, b2, gamma2, beta2, conv_param, bn_param2)\n\n # def max_pool_forward_fast(x, pool_param): return out, cache;\n out, cache[\"maxpool\"] = fast_layers.max_pool_forward_fast(out, pool_param)\n\n # def affine_bn_relu_forward(x, w, b, gamma, beta, bn_param): return out, cache;\n \n out, cache[\"layer3\"] = layer_utils.affine_bn_relu_forward(out, W3, b3, gamma3, beta3, bn_param3)\n\n # def affine_forward(x, w, b): return out, cache;\n scores, cache[\"layer4\"] = layers.affine_forward(out, W4, b4)\n\n\n ############################################################################\n # END OF YOUR CODE #\n ############################################################################\n \n if y is None:\n return scores\n \n ############################################################################\n # TODO: Implement the backward pass for the three-layer convolutional net, #\n # storing the loss and gradients in the loss and grads variables. Compute #\n # data loss using softmax, and make sure that grads[k] holds the gradients #\n # for self.params[k]. Don't forget to add L2 regularization! #\n ############################################################################\n \n loss, grads = 0, {}\n\n # def softmax_loss(x, y): return loss, dscore;\n loss, dscores = layers.softmax_loss(scores, y)\n loss += 0.5 * self.reg * (np.sum(W1 * W1) + np.sum(W2 * W2) + np.sum(W3 * W3) + np.sum(W4 * W4))\n\n # def affine_backward(dout, cache): return dx, dw, db;\n dout, dW4, db4 = layers.affine_backward(dscores, cache[\"layer4\"]) \n\n # def affine_bn_relu_backward(dout, cache): return dx, dw, db, dgamma, dbeta;\n dout, dW3, db3, dgamma3, dbeta3 = layer_utils.affine_bn_relu_backward(dout, cache[\"layer3\"])\n\n # print cache[\"layer3\"]\n\n # def max_pool_backward_fast(dout, cache): return max_pool_backward_im2col(dout, real_cache);\n # def max_pool_backward_im2col(dout, cache): return dx;\n dout = fast_layers.max_pool_backward_fast(dout, cache[\"maxpool\"])\n\n # def conv_sbn_relu_backward(dout, cache): return dx, dw, db, dgamma, dbeta;\n dout, dW2, db2, dgamma2, dbeta2 = layer_utils.conv_sbn_relu_backward(dout, cache[\"layer2\"])\n _, dW1, db1, dgamma1, dbeta1 = layer_utils.conv_sbn_relu_backward(dout, cache[\"layer1\"])\n\n # reg\n grads['W4'], grads['b4'] = dW4 + self.reg * W4, db4\n \n grads['W3'], grads['b3'] = dW3 + self.reg * W3, db3\n grads[\"bnGamma3\"], grads[\"bnBeta3\"] = dgamma3, dbeta3\n\n grads['W2'], grads['b2'] = dW2 + self.reg * W2, db2\n grads[\"sbnGamma2\"], grads[\"sbnBeta2\"] = dgamma2, dbeta2\n\n grads['W1'], grads['b1'] = dW1 + self.reg * W1, db1\n grads[\"sbnGamma1\"], grads[\"sbnBeta1\"] = dgamma1, dbeta1\n\n ############################################################################\n # END OF YOUR CODE #\n ############################################################################\n \n return loss, grads", "def loss_function(recon_x, x, mu, logvar, cl, target, natural):\n\tglobal decoder_nat_loss, decoder_syn_loss, KLD_syn_loss, KLD_nat_loss, regressor_nat, regressor_syn\n\t#decoder_loss = F.binary_cross_entropy(recon_x, x.view(-1, 1, img_size, img_size), reduction='sum') * decoder_l_factor\n\tdecoder_loss = F.mse_loss(recon_x, x) * decoder_l_factor\n\n\t# see Appendix B from VAE paper: Kingma and Welling. Auto-Encoding Variational Bayes. ICLR, 2014\n\t# https://arxiv.org/abs/1312.6114\n\t# 0.5 * sum(1 + log(sigma^2) - mu^2 - sigma^2)\n\tbeta = 1 / (batch_size * boundary_dim) # disentanglement factor#extremely small\n\tKLD = -0.5 * beta * torch.sum(1 + logvar - mu.pow(2) - logvar.exp()) * KLD_l_factor\n\n\tregressor_loss = F.mse_loss(cl, target.view(-1, 1)) * regressor_l_factor\n\tif epoch < regressor_start:\n\t\tregressor_loss *= 0\n\n\tif natural:\n\t\tdecoder_nat_loss += decoder_loss\n\t\tKLD_nat_loss += KLD\n\t\tregressor_nat += regressor_loss\n\telse:\n\t\tdecoder_syn_loss += decoder_loss\n\t\tKLD_syn_loss += KLD\n\t\tregressor_syn += regressor_loss\n\n\tif KLD > 1e10:\n\t\tplaySound()\n\t\tsys.exit('KLD diverged')\n\n\treturn decoder_loss + KLD + regressor_loss, KLD, regressor_loss", "def loss(self, x, a, log_p, log_p_to_optimize, r):\n pass", "def loss(self, X, labels):\n features = self.get_conv_features(X)\n loss = blah\n return loss", "def objective(hyperparams): \n global iteration #necessary with a global variable because of implementation from hyperopt. \n iteration += 1\n\n result = run_model(hyperparams, iteration)\n loss = -result #transform to loss in order to minimize\n\n return {'loss': loss, 'hyperparams': hyperparams, 'iteration': iteration, 'status': STATUS_OK}", "def loss(self,\n s0: np.ndarray,\n o: Option,\n r: float,\n s1: np.ndarray,\n done: bool):\n # TODO: rewrite in terms of experience instead\n γ = self.gamma\n ω = self.option_idx_dict[str(o)]\n \n δ = r - self.predict(s0)[ω]\n if not done:\n δ += γ * self.utility(s1, o)\n return δ", "def hpm_loss(self, x, y, t, Ex_u, Ex_v):\n\n x = x.view(-1)\n y = y.view(-1)\n t = t.view(-1)\n\n u, v, f_u, f_v = self.net_pde(x, y, t)\n\n Ex_u = Ex_u.view(-1)\n Ex_v = Ex_v.view(-1)\n\n hpmLoss = torch.mean(f_u ** 2) + torch.mean(f_v ** 2) + torch.mean((u - Ex_u) ** 2) + torch.mean((v - Ex_v) ** 2) \n return hpmLoss", "def _loss_out(self, e_out):\n prior_out = 1 # self.prior\n\n if self.loss_type_out == 'exponential':\n loss = torch.sum(e_out, -1, keepdim=True)\n\n elif self.loss_type_out == 'chi_squared':\n a_out = e_out + prior_out\n df = prior_out + 2\n loss = torch.sum((df / 2 - 1) * torch.log(a_out) + a_out / 2, dim=-1, keepdim=True)\n\n elif self.loss_type_out == 'uncertainty':\n S_out = (e_out + prior_out).sum(-1, keepdim=True)\n unc = self.n_classes / S_out\n loss = -unc\n\n elif self.loss_type_out == 'kl':\n a_out = e_out + prior_out\n a_target = torch.full(a_out.size(), prior_out, dtype=float, device=e_out.device)\n kl = kl_dirichlet(a_out, a_target)\n loss = kl\n\n elif self.loss_type_out == 'kl_reverse':\n a_out = e_out + prior_out\n a_target = torch.full(a_out.size(), prior_out, dtype=float, device=e_out.device)\n kl = kl_dirichlet(a_target, a_out)\n loss = kl\n\n elif self.loss_type_out == 'kl_symmetric':\n a_out = e_out + prior_out\n a_target = torch.full(a_out.size(), prior_out, dtype=float, device=e_out.device)\n kl = .5*(kl_dirichlet(a_out, a_target) + kl_dirichlet(a_target, a_out))\n loss = kl\n\n elif self.loss_type_out == 'chernoff_distance':\n a_out = e_out + prior_out\n a_target = torch.full(a_out.size(), prior_out, dtype=float, device=e_out.device)\n dist = chernoff_distance(a_out, a_target)\n loss = dist\n\n return loss", "def tv_loss(x, name='tv_loss'):\n raise NotImplementedError(\"Please use tensorflow total_variation loss.\")", "def but_test(self):\n if not self.path:\n tk.messagebox.showerror(\"Error\", \"Open file first\")\n return\n elif not self.nn_obj:\n tk.messagebox.showerror(\"Error\", \"Create or open NN\")\n return\n elif not self.nn_obj and not self.nn_in:\n tk.messagebox.showerror(\"Error\", \"Open file and create NN\")\n return\n if isinstance(self.nn_obj, dict):\n y_pred=pred(self.nn_obj, self.nn_in).reshape(self.nn_out.shape)\n test_loss=loss(y_pred, self.nn_out, self.nn_obj) \n elif isinstance(self.nn_obj, Net_tr):\n y_pred=pred(self.nn_obj, self.nn_in)\n test_loss=loss(y_pred, self.nn_out, self.nn_obj).item()\n tk.messagebox.showinfo(\"Loss\", \"Loss=\" + str(test_loss)+\" %\")", "def gen_criterion(dis_preds, ctc_loss):\n return ctc_loss - torch.mean(dis_preds)\n # return -torch.mean(dis_preds)", "def add_objective(self): \n \n if \"CSS\" in self.algorithm:\n \n if self.num_hidden == 0:\n \n data_term = self.compute_energy(self.x, self.batch_size)\n \n else:\n \n data_term = self.compute_free_energy(self.x)\n \n normalizer_term = self.add_css_approximation(data_term)\n \n if \"CD\" in self.algorithm and self.num_hidden ==0:\n \n data_term = self.compute_energy(self.x, self.batch_size)\n \n normalizer_term = self.compute_energy(self.x_gibbs, \n self.batch_size)\n \n normalizer_term = -T.mean(normalizer_term)\n \n if \"CD\" in self.algorithm and self.num_hidden > 0:\n \n data_term = self.compute_free_energy(self.x)\n \n normalizer_term = self.compute_free_energy(self.rbm_cd_samples)\n \n normalizer_term = -T.mean(normalizer_term)\n \n # cost is negative log likelihood \n self.cost = T.mean(data_term) + normalizer_term", "def __init__(self, \n n_neurons = \"micro\", # else: \"brunel\" or arrays\n C_ab = \"micro\", # else: \"brunel\" or arrays\n area = net.area, # simulation size\n neuron_model = net.neuron_model, # \"iaf_psc_delta\" or \"iaf_psc_exp\"\n connection_rule = net.connection_rule, # \"fixed_total_number\" or \"fixed_indegree\"\n j02 = net.j02, \n weight_rel_sd = net.weight_rel_sd, \n delay_rel_sd = net.delay_rel_sd, \n g = net.g, \n rate_ext = net.rate_ext):\n ###################################################\n ### \tNetwork parameters\t\t### \n ###################################################\n\n # area of network in mm^2; scales numbers of neurons\n # use 1 for the full-size network (77,169 neurons)\n self.area = area\n \n self.layers = net.layers #np.array([\"L23\", \"L4\", \"L5\", \"L6\"])\n self.types = net.types #np.array([\"e\", \"i\"]) \n self.populations = np.array([layer + typus for layer in self.layers for typus in self.types])\n self.n_populations = len(self.populations)\n self.n_layers = len(self.layers)\n self.n_types = len(self.types)\n \n # Neuron numbers\n if n_neurons == \"micro\":\n self.n_neurons = np.int_(net.full_scale_n_neurons * self.area)\n elif n_neurons == \"brunel\":\n # Provide an array of equal number of neurons in each exc./inh. population\n gamma = 0.25\n inh_factor = 1. / (gamma + 1.)\n exc_factor = 1. - inh_factor \n n_total_micro = np.sum(net.full_scale_n_neurons * self.area)\n N_exc = n_total_micro/self.n_populations * exc_factor\n N_inh = n_total_micro/self.n_populations * inh_factor\n self.n_neurons = np.tile([N_exc, N_inh], self.n_layers).astype(int)\n else:\n if type(n_neurons) == np.ndarray:\n if n_neurons.shape == (self.n_populations, ):\n self.n_neurons = np.int_(n_neurons)\n else:\n raise Exception(\"'n_neurons' has wrong shape. \"+\n \"Expects (%i,)\"%self.n_populations)\n else: \n raise Exception(\"'n_neurons' expects either numpy.ndarray or string \"+\n \"in {'micro', 'brunel'}\")\n self.n_total = np.sum(self.n_neurons)\n\n \n # Synapse numbers\n # Connection probabilities: conn_probs[post, pre] = conn_probs[target, source]\n conn_probs = net.conn_probs\n # Scale synapse numbers of the C_ab\n if net.scale_C_linearly:\n n_outer_full = np.outer(net.full_scale_n_neurons, net.full_scale_n_neurons)\n C_full_scale = np.log(1. - conn_probs) / np.log(1. - 1. / n_outer_full)\n C_scaled = np.int_(C_full_scale * self.area)\n else:\n n_outer = np.outer(self.n_neurons, self.n_neurons)\n C_scaled = np.int_(np.log(1. - conn_probs) / np.log(1. - 1. / n_outer))\n\n self.connection_rule = connection_rule\n if self.connection_rule == \"fixed_total_number\":\n C_ab_micro = C_scaled # total number, do not divide! \n elif self.connection_rule == \"fixed_indegree\":\n C_ab_micro = (C_scaled.T / (net.full_scale_n_neurons * self.area)).T\n else:\n raise Exception(\"Unexpected connection type. Use 'fixed_total_number' for microcircuit \" + \n \"model or 'fixed_indegree' for Brunel's model!\")\n\n if C_ab == \"micro\":\n self.C_ab = C_ab_micro # shall not be integer at this point!\n elif C_ab == \"brunel\":\n C_e = np.mean(C_ab_micro) # mean for microcircuit (= 501 in full scale)\n C_i = gamma * C_e\n self.C_ab = np.tile([C_e, C_i], (self.n_populations, self.n_layers)).astype(int) \n else:\n if type(C_ab) == np.ndarray:\n if C_ab.shape == (self.n_populations, self.n_populations):\n self.C_ab = np.int_(C_ab)\n else:\n raise Exception(\"'C_ab' has wrong shape. \"+\n \"Expects (%i, %i)\"%(self.n_populations, self.n_populations))\n else: \n raise Exception(\"'C_ab' expects either numpy.ndarray or string \"+\n \"in {'micro', 'brunel'}\")\n\n\n ###################################################\n ### Single-neuron parameters\t\t### \n ###################################################\n self.neuron_model = neuron_model\n self.Vm0_mean = net.Vm0_mean # mean of initial membrane potential (mV)\n self.Vm0_std = net.Vm0_std # std of initial membrane potential (mV)\n self.model_params = net.model_params\n if not self.neuron_model==\"iaf_psc_delta\":\n self.model_params[\"tau_syn_ex\"] = net.tau_syn_ex # excitatory synaptic time constant (ms)\n self.model_params[\"tau_syn_in\"] = net.tau_syn_in # inhibitory synaptic time constant (ms)\n self.tau_syn_ex = net.tau_syn_ex # ms\n self.tau_syn_in = net.tau_syn_in # ms\n self.tau_syn = np.tile([self.tau_syn_ex, self.tau_syn_in], (self.n_populations, self.n_layers))\n # Rescaling for model calculations: these values are not used in the simulation!\n self.tau_m = self.model_params[\"tau_m\"] # ms\n self.t_ref = self.model_params[\"t_ref\"] # ms\n self.E_L = self.model_params[\"E_L\"] # mV\n self.V_r = self.model_params[\"V_reset\"] - self.E_L # mV\n self.theta = self.model_params[\"V_th\"] - self.E_L # mV\n self.C_m = self.model_params[\"C_m\"] # pF\n\n\n ######################################################\n # Synaptic weights. Depend on neuron_model! ##\n ######################################################\n self.g = g\n self.j02 = j02\n\n g_all = np.tile([1., -self.g], (self.n_populations, self.n_layers))\n L23e_index = np.where(self.populations == \"L23e\")[0][0]\n L4e_index = np.where(self.populations == \"L4e\")[0][0]\n g_all[L23e_index, L4e_index] *= self.j02\n \n self.J = net.PSP_e # mv; mean PSP, used as reference PSP\n self.J_ab = self.J * g_all\n self.weight_rel_sd = weight_rel_sd # Standard deviation of weight relative to mean weight\n # Transformation from peak PSP to PSC\n delta_tau = self.tau_syn - self.tau_m\n ratio_tau = self.tau_m / self.tau_syn\n PSC_over_PSP = self.C_m * delta_tau / (self.tau_m * self.tau_syn * \\\n (ratio_tau**(self.tau_m / delta_tau) - ratio_tau**(self.tau_syn / delta_tau))) \n # Actual weights have to be adapted: from peak PSP to PSC (and back...)\n if self.neuron_model==\"iaf_psc_exp\": # PSCs calculated from PSP amplitudes\n self.weights = self.J_ab * PSC_over_PSP # neuron populations\n elif self.neuron_model==\"iaf_psc_delta\":\n self.weights = self.J_ab * PSC_over_PSP * (self.tau_syn_ex) / self.C_m\n # This might be an overkill / doing things twice...\n elif self.neuron_model==\"iaf_psc_alpha\": # PSCs calculated from PSP amplitudes\n self.weights = self.J_ab * np.exp(1) / (self.tau_syn_ex) / self.C_m\n else:\n raise Exception(\"Neuron model should be iaf_psc_ - {delta, exp, alpha}!\")\n\n\n ###################################################\n ### Delays and dicts ### \n ###################################################\n # mean dendritic delays for excitatory and inhibitory transmission (ms)\n self.delay_e = net.delay_e # ms, excitatory synapses\n self.delay_i = net.delay_i # ms, inhibitory synapses\n\n self.delays = np.tile([self.delay_e, self.delay_i], (self.n_populations, self.n_layers)) # adapt...\n self.delay_rel_sd = delay_rel_sd \n \n # Synapse dictionaries\n # default connection dictionary\n self.conn_dict = {\"rule\": connection_rule}\n # weight distribution of connections between populations\n self.weight_dict_exc = net.weight_dict_exc\n self.weight_dict_inh = net.weight_dict_inh\n # delay distribution of connections between populations\n self.delay_dict = net.delay_dict\n # default synapse dictionary\n self.syn_dict = net.syn_dict\n \n \n ###################################################\n ### External stimuli ## \n ###################################################\n # rate of background Poisson input at each external input synapse (spikes/s) \n self.rate_ext = rate_ext # Hz \n self.J_ext = net.PSP_ext # external synaptic weight\n self.delay_ext = self.delay_e # ms; mean delay of external input\n self.dc_amplitude = net.dc_amplitude # constant bg amplitude\n self.C_aext = net.C_aext # in-degrees for background input\n # Adapt weights\n if self.neuron_model==\"iaf_psc_exp\": # PSCs calculated from PSP amplitudes\n self.weight_ext = self.J_ext * PSC_over_PSP[0, 0] \n elif self.neuron_model==\"iaf_psc_delta\":\n self.weight_ext = self.J_ext * PSC_over_PSP[0, 0] * self.tau_syn_ex / self.C_m\n elif self.neuron_model==\"iaf_psc_alpha\": # PSCs calculated from PSP amplitudes\n self.weight_ext = self.J_ext * np.exp(1) / self.tau_syn_ex / self.C_m\n\n # optional additional thalamic input (Poisson)\n self.n_th = net.n_th # size of thalamic population\n self.th_start = net.th_start # onset of thalamic input (ms)\n self.th_duration = net.th_duration # duration of thalamic input (ms)\n self.th_rate = net.th_rate # rate of thalamic neurons (spikes/s)\n self.J_th = net.PSP_th # mean EPSP amplitude (mV) for thalamic input\n # Adapt weights\n if self.neuron_model==\"iaf_psc_exp\": # PSCs calculated from PSP amplitudes\n self.weight_th = self.J_th * PSC_over_PSP[0, 0] \n elif self.neuron_model==\"iaf_psc_delta\":\n self.weight_th = self.J_th * PSC_over_PSP[0, 0] * self.tau_syn_ex / self.C_m\n elif self.neuron_model==\"iaf_psc_alpha\": # PSCs calculated from PSP amplitudes\n self.weight_th = self.J_th * np.exp(1) / self.tau_syn_ex / self.C_m\n\n \n # connection probabilities for thalamic input\n conn_probs_th = net.conn_probs_th\n if net.scale_C_linearly:\n if not self.n_th == 0:\n C_th_full_scale = np.log(1. - conn_probs_th) / \\\n np.log(1. - 1. / (self.n_th * net.full_scale_n_neurons))\n self.C_th_scaled = np.int_(C_th_full_scale * self.area)\n else:\n if not self.n_th == 0:\n self.C_th_scaled = np.int_(np.log(1. - conn_probs_th) / \\\n np.log(1. - 1. / (self.n_th * self.n_neurons_micro)))\n if self.n_th == 0:\n self.C_th_scaled = None\n \n # mean delay of thalamic input (ms)\n self.delay_th = net.delay_th\n # standard deviation relative to mean delay of thalamic input\n self.delay_th_rel_sd = net.delay_th_rel_sd\n\n\n ######################################################\n # Predefine matrices for mean field ##\n ######################################################\n if self.neuron_model==\"iaf_psc_delta\":\n self.J_mu = self.weights\n self.J_sd = self.weights\n self.J_mu_ext = self.weight_ext \n self.J_sd_ext = self.weight_ext\n elif self.neuron_model==\"iaf_psc_exp\":\n self.J_mu = self.weights * self.tau_syn / self.C_m\n self.J_sd = self.weights * np.sqrt(self.tau_syn / 2.) / self.C_m\n self.J_mu_ext = self.weight_ext * self.tau_syn_ex / self.C_m\n self.J_sd_ext = self.weight_ext * np.sqrt(self.tau_syn_ex / 2.) / self.C_m\n elif self.neuron_model==\"iaf_psc_alpha\":\n self.J_mu = self.weights * self.tau_syn**2 / self.C_m\n self.J_sd = self.weights * self.tau_syn**(3./2.) / (self.C_m * 2.)\n self.J_mu_ext = self.weight_ext * self.tau_syn_ex**2 / self.C_m\n self.J_sd_ext = self.weight_ext * self.tau_syn_ex**(3./2.) / (self.C_m * 2.)\n self.mat_mu = self.tau_m * 1e-3 * self.J_mu * self.C_ab\n self.mu_ext = self.tau_m * 1e-3 * self.J_mu_ext * self.C_aext * self.rate_ext\n self.mat_var = self.tau_m * 1e-3 * (1 + self.weight_rel_sd ** 2) * self.J_sd**2 * self.C_ab\n self.var_ext = self.tau_m * 1e-3 * (1 + self.weight_rel_sd ** 2) * self.J_sd_ext**2 * self.C_aext * self.rate_ext", "def nnObjFunction(params, *args):\n \n n_input, n_hidden, n_class, training_data, training_label, lambdaval = args\n \n w1 = params[0:n_hidden * (n_input + 1)].reshape( (n_hidden, (n_input + 1)))\n w2 = params[(n_hidden * (n_input + 1)):].reshape((n_class, (n_hidden + 1)))\n obj_val = 0\n obj_grad = np.array([])\n \n \n \n # initialize a gradient vector matrix with w1\n gradient_matrix_w1 = np.zeros(w1.shape) \n \n # initialize a gradient vector matrix with w2\n gradient_matrix_w2 = np.zeros(w2.shape) \n \n #transpose of input matrix\n input_mat = training_data.transpose() \n \n #transpose of label matrix(expected output)\n label_expected_mat = training_label.transpose() \n \n #get number of coulumns in matrix a1 which is equal to the total number of inputs\n number_of_input_columns = input_mat.shape[1] \n \n #Calculate the bias as input from hiddenlayer\n input_bias = np.tile(1,(1,np.size(input_mat,1)))\n \n #adding bias column in matrix\n input_mat=np.concatenate((input_mat,input_bias), axis=0) \n \n # multipy input with the corresponding wieght vector\n input_hidden_layer = np.dot(w1,input_mat)\n \n # output of hidden layer \n output_hidden_layer = sigmoid(input_hidden_layer) \n \n # calculate bias for hidden layer\n hidden_layer_bias = np.tile(1,(1,np.size(output_hidden_layer,1)))\n \n #adding bias column in hidden layer\n output_hidden_layer = np.concatenate((output_hidden_layer,hidden_layer_bias), axis=0) \n \n # multipy output of hidden layer with the corresponding wieght vector\n input_output_layer = np.dot(w2,output_hidden_layer) \n \n # output of output layer\n output_output_layer = sigmoid(input_output_layer) \n \n # Regularization of neural network\n error_function_mat=label_expected_mat*np.log(output_output_layer)+(1-label_expected_mat)*np.log(1-output_output_layer) \n obj_val= - np.sum(error_function_mat[:])/number_of_input_columns\n \n #ew1=w1**2\n #ew2=w2**2\n error_function = ((np.sum(pow(w1[:],2)) + np.sum(pow(w2[:],2)))/(2*number_of_input_columns))*lambdaval\n obj_val=obj_val+error_function\n \n print obj_val\n # claculate delta for hidden and output layer \n delta_output_layer = (output_output_layer - label_expected_mat) \n delta_hidden_layer = np.dot(w2.transpose(), delta_output_layer)*(output_hidden_layer*(1-output_hidden_layer))\n change_in_W1 = np.dot(delta_hidden_layer , input_mat.transpose())\n change_in_W2 = np.dot(delta_output_layer , output_hidden_layer.transpose())\n \n #remove the bias row\n change_in_W1 = change_in_W1[:-1,:]\n \n \n #update weight matrix\n gradient_matrix_w1 = gradient_matrix_w1 + change_in_W1\n gradient_matrix_w2 = gradient_matrix_w2 + change_in_W2\n \n #regularization of updated weight matrix\n gradient_matrix_w1 = (gradient_matrix_w1 + lambdaval*w1)/number_of_input_columns\n gradient_matrix_w2 = (gradient_matrix_w2 + lambdaval*w2)/number_of_input_columns\n obj_grad = np.concatenate((gradient_matrix_w1.flatten(),gradient_matrix_w2.flatten()), axis=0) \n \n print obj_grad\n return (obj_val,obj_grad)", "def TCVAE_loss_function(input, recons, mu, log_var, z, dataset_size, batch_iter, \n anneal_steps=200., alpha=1., beta=6., gamma=1., train=True):\n weight = 1 # Account for the minibatch samples from the dataset\n batch_size, z_dim = z.shape\n \n recons_loss = F.mse_loss(recons, input, reduction='sum')/(19*batch_size)\n\n log_q_zx = log_density_gaussian(z, mu, log_var).sum(dim = 1)\n\n zeros = torch.zeros_like(z)\n log_p_z = log_density_gaussian(z, zeros, zeros).sum(dim = 1)\n\n mat_log_q_z = log_density_gaussian( z.view(batch_size, 1, z_dim),\n mu.view(1, batch_size, z_dim),\n log_var.view(1, batch_size, z_dim))\n\n # Reference\n # [1] https://github.com/YannDubs/disentangling-vae/blob/535bbd2e9aeb5a200663a4f82f1d34e084c4ba8d/disvae/utils/math.py#L54\n strat_weight = (dataset_size - batch_size + 1) / (dataset_size * (batch_size - 1))\n importance_weights = torch.Tensor(batch_size, batch_size).fill_(1 / (batch_size -1)).to(input.device)\n importance_weights.view(-1)[::batch_size] = 1 / dataset_size\n importance_weights.view(-1)[1::batch_size] = strat_weight\n importance_weights[batch_size - 2, 0] = strat_weight\n log_importance_weights = importance_weights.log()\n\n mat_log_q_z += log_importance_weights.view(batch_size, batch_size, 1)\n\n log_q_z = torch.logsumexp(mat_log_q_z.sum(2), dim=1, keepdim=False)\n log_prod_q_z = torch.logsumexp(mat_log_q_z, dim=1, keepdim=False).sum(1)\n\n mi_loss = (log_q_zx - log_q_z).mean()/19\n tc_loss = (log_q_z - log_prod_q_z).mean()/19\n kld_loss = (log_prod_q_z - log_p_z).mean()/19\n\n if train:\n batch_iter += 1\n anneal_rate = min(0 + 1 * batch_iter / anneal_steps, 1)\n else:\n anneal_rate = 1.\n\n loss = recons_loss + alpha*mi_loss + weight*(beta*tc_loss + \n anneal_rate*gamma*kld_loss)\n \n return loss, recons_loss, kld_loss, tc_loss, mi_loss", "def loss(self, X, labels):\n features = self.get_conv_feats(X)\n loss = blah\n return loss", "def create_criterion(loss_name: str) -> torch.nn.Module:\n # TODO: Move to models/common_model_lib/losses.py\n if loss_name.lower() == \"crossentropy\":\n criterion = torch.nn.CrossEntropyLoss()\n elif loss_name.lower() == \"nll\":\n criterion = torch.nn.NLLLoss()\n else:\n raise ValueError(\"SystemLog: The loss function must CrossEntropy or NLL \"\n \"(loss function: {})\".format(loss_name))\n\n return criterion", "def loss_fun(model: GPModel, params: dict) -> float:\n py = model.module.call(params, train_ds['index_points'])\n return -py.log_prob(train_ds['y'])", "def l1_loss(params):\n \"\"\" It is a vec for each branch\"\"\"\n loss_branches_vec = []\n # TODO This is hardcoded but all our cases rigth now uses four branches\n for i in range(len(params['branches']) -1):\n loss_branches_vec.append(torch.abs((params['branches'][i] - params['targets'])\n * params['controls_mask'][i])\n * params['branch_weights'][i])\n \"\"\" The last branch is a speed branch\"\"\"\n # TODO: Activate or deactivate speed branch loss\n loss_branches_vec.append(torch.abs(params['branches'][-1] - params['inputs'])\n * params['branch_weights'][-1])\n return loss_branches_vec, {}", "def loss_fn(params):\n logits = models.ProgramTransformer(config).apply(\n {'params': params},\n inputs,\n outputs,\n programs,\n rngs={'dropout': train_rng})\n loss, weight_sum = compute_weighted_cross_entropy(logits, programs, weights)\n mean_loss = loss / weight_sum\n return mean_loss, logits", "def loss_wd(model, input_x, input_y, alpha):\n out_x = model(input_x)\n out_y = model(input_y)\n\n obj = ( torch.sum(out_x) - torch.sum(out_y) )/ batch_size\n\n a = torch.rand( (input_x.size(0),1) )\n input_z = a * input_x + (1-a) * input_y\n input_z.requires_grad_(True)\n\n out_z = model(input_z)\n gradient_z = autograd.grad(out_z.sum(), input_z, create_graph=True)[0]\n\n norm_gradient = torch.norm(gradient_z, dim=1)\n\n penalty = ( torch.sum((norm_gradient - 1).pow(2)) )/ batch_size\n\n obj = obj - alpha * penalty\n\n loss = - obj\n return loss", "def loss_fn(input_d, reconstructed, mean, logvar, beta=1, batch_size=1, input_size=1):\n\n # mse_criterion = nn.MSELoss() # reduction=sum ?\n # mse_loss = mse_criterion(input_d, reconstructed)\n\n # bce_criterion = nn.BCELoss(size_average=False) # reduction=sum ?\n bce_criterion = nn.BCELoss() # reduction=sum ?\n bce_loss = bce_criterion(input_d, reconstructed)\n\n # see Appendix B from VAE paper:\n # Kingma and Welling. Auto-Encoding Variational Bayes. ICLR, 2014\n # https://arxiv.org/abs/1312.6114\n # 0.5 * sum(1 + log(sigma^2) - mu^2 - sigma^2)\n\n # for gaussian distribution when\n # generated data passed to the encorder is z~ N(0,1) and generated data is x~N(m,var)\n\n kl_loss = -0.5 * torch.sum(1 + logvar - mean.pow(2) - logvar.exp())\n\n normalized_kl_loss = kl_loss / (batch_size * input_size)\n scaled_kl_loss = beta*normalized_kl_loss\n # scaled_kl_loss = beta*kl_loss\n\n # return bce_loss + kl_loss, bce_loss, kl_loss\n return bce_loss + scaled_kl_loss, bce_loss, normalized_kl_loss\n # return mse_loss + scaled_kl_loss, mse_loss, kl_loss", "def loss_fn(model):\n with flax.nn.stateful(state) as new_state:\n with flax.nn.stochastic(prng_key):\n logits = model(batch['image'])\n loss = cross_entropy_loss(logits, batch['label'])\n # TODO(britefury): check if applying L2 regularization to weights but\n # *not* biases improves results\n weight_penalty_params = jax.tree_leaves(model.params)\n weight_l2 = sum([jnp.sum(x ** 2)\n for x in weight_penalty_params\n if x.ndim > 1])\n weight_penalty = l2_reg * 0.5 * weight_l2\n loss = loss + weight_penalty\n return loss, (new_state, logits)", "def tv_loss(input: th.Tensor):\n input = tf.pad(input, (0, 1, 0, 1), \"replicate\")\n x_diff = input[..., :-1, 1:] - input[..., :-1, :-1]\n y_diff = input[..., 1:, :-1] - input[..., :-1, :-1]\n return (x_diff ** 2 + y_diff ** 2).mean([1, 2, 3])", "def __init__(self,args):\n self.G = Generator(args).cuda()\n self.D = Discriminator(args).cuda()\n self.gen_optim = optim.Adam(self.G.parameters(),lr=args.lrG,betas=(args.Gbeta1,args.Gbeta2))\n self.dis_optim = optim.Adam(self.D.parameters(),lr=args.lrD,betas=(args.Dbeta1,args.Dbeta2))\n self.l2 = nn.MSELoss()\n self.bce = nn.BCELoss()\n self.save_dir = \"./checkpoints/SVSGan\"\n self.batch_size = args.batch_size\n self.real = Variable(torch.ones(args.batch_size,args.sample_length , 1)).cuda()\n self.fake = Variable(torch.zeros(args.batch_size,args.sample_length, 1)).cuda()\n self.model_name = \"SVSGan\"", "def convert_to_risk_slim_cplex_solution(rho, indices, loss=None, objval=None):\n solution_idx = range(0, indices['n_variables'])\n solution_val = np.zeros(indices['n_variables'])\n\n # rho\n solution_val[indices['rho']] = rho\n\n # alpha\n alpha = np.zeros(len(indices['alpha']))\n alpha[np.flatnonzero(rho[indices['L0_reg_ind']])] = 1.0\n solution_val[indices['alpha']] = alpha\n L0_penalty = np.sum(indices['C_0_alpha'] * alpha)\n\n # add loss / objval\n need_loss = 'loss' in indices\n need_objective_val = 'objval' in indices\n need_L0_norm = 'L0_norm' in indices\n need_sigma = 'sigma_names' in indices\n\n # check that we have the right length\n # COMMENT THIS OUT FOR DEPLOYMENT\n # if need_sigma:\n # pass\n # else:\n # assert (indices['n_variables'] == (len(rho) + len(alpha) + need_loss + need_objective_val + need_L0_norm))\n\n if need_loss:\n if loss is None:\n if objval is None:\n loss = compute_loss(rho)\n else:\n loss = objval - L0_penalty\n\n solution_val[indices['loss']] = loss\n\n if need_objective_val:\n if objval is None:\n if loss is None:\n objval = compute_loss(rho) + L0_penalty\n else:\n objval = loss + L0_penalty\n\n solution_val[indices['objval']] = objval\n\n if need_L0_norm:\n solution_val[indices['L0_norm']] = np.sum(alpha)\n\n if need_sigma:\n rho_for_sigma = np.array([indices['rho'][int(s.strip('sigma_'))] for s in indices['sigma_names']])\n solution_val[indices['sigma']] = np.abs(solution_val[rho_for_sigma])\n\n solution_cpx = cplex.SparsePair(ind=solution_idx, val=solution_val.tolist())\n return solution_cpx, objval", "def __init__(self, generator, tgt_vocab,\n normalization=\"sents\",\n label_smoothing=0.0,\n use_kl_annealing=False,\n use_kl_freebits=False,\n kl_freebits_margin=0.0,\n kl_annealing_current=0.0,\n kl_annealing_increment=0.0001,\n kl_annealing_warmup_steps=1000,\n image_loss_type='logprob',\n use_local_image_features=False,\n two_step_image_prediction=False\n ):\n self.multimodal_model_type = 'vi-model1'\n\n super(NMTVIModel1LossCompute, self).__init__(generator, tgt_vocab,\n normalization, label_smoothing)\n\n # kl annealing parameters\n self.n_model_updates = 0\n self.use_kl_annealing = use_kl_annealing\n if use_kl_annealing:\n self.kl_annealing_current = kl_annealing_current\n self.kl_annealing_increment = kl_annealing_increment\n self.kl_annealing_warmup_steps = kl_annealing_warmup_steps\n else:\n self.kl_annealing_current = 1.0\n self.kl_annealing_increment = 0.0\n self.kl_annealing_warmup_steps = 0\n\n self.use_kl_freebits = use_kl_freebits\n if use_kl_freebits:\n self.kl_freebits_margin = kl_freebits_margin\n else:\n self.kl_freebits_margin = 0.0\n\n self.image_loss_type = image_loss_type\n self.use_local_image_features = use_local_image_features\n self.two_step_image_prediction = two_step_image_prediction\n self._statistics = onmt.VIStatistics\n\n if image_loss_type == 'categorical':\n self.image_loss_criterion = nn.NLLLoss2d()", "def policy_loss(sal_box_prob, oracle_action, sample_weights):\n loss = tf.nn.softmax_cross_entropy_with_logits(logits=sal_box_prob, labels=oracle_action)\n \n return tf.reduce_mean(loss) * (1.0 - sample_weights / 10.0)", "def create_ars_model(odim=10,adim=2,hdims=[128],\n actv=tf.nn.relu,out_actv=tf.nn.tanh):\n import tensorflow as tf\n \n def mlp(x,hdims=[256,256],actv=tf.nn.relu,out_actv=tf.nn.relu):\n ki = tf.truncated_normal_initializer(stddev=0.1)\n for hdim in hdims[:-1]:\n x = tf.layers.dense(x,units=hdim,activation=actv,kernel_initializer=ki)\n return tf.layers.dense(x,units=hdims[-1],\n activation=out_actv,kernel_initializer=ki)\n def placeholder(dim=None):\n return tf.placeholder(dtype=tf.float32,shape=(None,dim) if dim else (None,))\n def placeholders(*args):\n \"\"\"\n Usage: a_ph,b_ph,c_ph = placeholders(adim,bdim,None)\n \"\"\"\n return [placeholder(dim) for dim in args]\n def get_vars(scope):\n return [x for x in tf.compat.v1.global_variables() if scope in x.name]\n \n # Have own session\n config = tf.ConfigProto()\n config.gpu_options.allow_growth = True\n sess = tf.Session(config=config)\n \n # Placeholders\n o_ph = placeholder(odim)\n \n # Policy \n with tf.variable_scope('main'):\n mu = mlp(o_ph,hdims=hdims+[adim],actv=actv,out_actv=out_actv)\n \n # Params\n main_vars = get_vars('main')\n \n model = {'o_ph':o_ph,'mu':mu,'main_vars':main_vars}\n return model, sess", "def MVAE_objective(ce_weight, modal_loss_funcs, recon_weights, input_to_float=True, annealing=1.0, criterion=torch.nn.CrossEntropyLoss()):\n recon_loss_func = elbo_loss(modal_loss_funcs, recon_weights, annealing)\n\n def allnonebuti(i, item):\n ret = [None for w in modal_loss_funcs]\n ret[i] = item\n return ret\n\n def actualfunc(pred, truth, args):\n training = args['training']\n reps = args['reps']\n fusedmu, fusedlogvar = args['fused']\n decoders = args['decoders']\n inps = args['inputs']\n reconsjoint = []\n\n if input_to_float:\n inputs = [i.float().cuda() for i in inps]\n else:\n inputs = [i.cuda() for i in inps]\n for i in range(len(inps)):\n reconsjoint.append(decoders[i](\n reparameterize(fusedmu, fusedlogvar, training)))\n total_loss = recon_loss_func(reconsjoint, inputs, fusedmu, fusedlogvar)\n for i in range(len(inps)):\n mu, logvar = reps[i]\n recon = decoders[i](reparameterize(mu, logvar, training))\n total_loss += recon_loss_func(allnonebuti(i, recon),\n allnonebuti(i, inputs[i]), mu, logvar)\n total_loss += ce_weight * criterioning(pred, truth, criterion)\n return total_loss\n return actualfunc", "def objective(self, params):\n \n model_params = dict()\n training_params = dict()\n for param_name in self.model_param_names:\n model_params[param_name] = params[param_name]\n for param_name in self.training_param_names:\n training_params[param_name] = params[param_name]\n \n copy = self.m['model']\n self.m['model'] = self.m['model'](**model_params)\n self.m.update(training_params)\n \n model = Model(**self.m)\n data = self.datasets[model.data_type]\n\n start = timer()\n res, full_res = CV_fit(model, data, self.datasets)\n run_time = timer()-start\n\n loss = res[0]\n self.m['model'] = copy\n \n return {'loss': loss, 'params': params, 'run_time': run_time, 'status': STATUS_OK}", "def get_loss(self, x, y):\n \"*** YOUR CODE HERE ***\"\n #make your predictions using run\n #compute loss nn.squareloss\n y_pred = self.run(x)\n return nn.SquareLoss(y_pred,y)", "def getLoss(self, x_test, t_test):\n x_t = Variable(x_test, requires_grad=False)\n #Feed inputes into neural network\n t_pred = self.model(x_t)\n #Now lets compute out loss\n loss = self.loss_fn(t_pred, t_test)\n return loss", "def l2_loss(params):\n \"\"\" It is a vec for each branch\"\"\"\n loss_branches_vec = []\n # TODO This is hardcoded but all our cases rigth now uses four branches\n for i in range(len(params['branches']) -1):\n loss_branches_vec.append(((params['branches'][i] - params['targets']) **2\n * params['controls_mask'][i])\n * params['branch_weights'][i])\n \"\"\" The last branch is a speed branch\"\"\"\n # TODO: Activate or deactivate speed branch loss\n loss_branches_vec.append((params['branches'][-1] - params['inputs']) ** 2\n * params['branch_weights'][-1])\n return loss_branches_vec, {}", "def loss_func(pred, conv, label, bboxes, num_classes, train_input_size, iou_loss_threshold):\n \"\"\" giou replaces l2 norm losses of x, y, w, h as an improvement from original yolo_v3 \"\"\"\n \n # obtain number of classes\n num_classes = num_classes\n \n # obtain shape of raw yolo_v3 output (pre-decode)\n conv_shape = tf.shape(conv)\n \n # obtain batch size of raw yolo_v3 output (pre-decode)\n batch_size = conv_shape[0]\n \n # obtain output size of raw yolo_v3 output (pre-decode)\n output_size = conv_shape[1]\n \n # obtain train input size\n train_input_size = tf.cast(train_input_size, tf.float32)\n \n # reshape raw conv output \n conv = tf.reshape(conv, (batch_size, output_size, output_size, 3, 5 + num_classes))\n \n # obtain objectiveness scores and class probabilites for batch from raw conv output\n conv_raw_objectiveness = conv[:, :, :, :, 4:5]\n conv_raw_prob = conv[:, :, :, :, 5:]\n \n # obtain predicted x, y, w, h and objectiveness scores for batch based on train_input_size post decode\n pred_xywh = pred[:, :, :, :, 0:4]\n pred_conf = pred[:, :, :, :, 4:5]\n \n # obtain label x, y, w, h and objectiveness scores for batch based on train_input_size\n label_xywh = label[:, :, :, :, 0:4]\n respond_bbox = label[:, :, :, :, 4:5]\n label_prob = label[:, :, :, :, 5:]\n \n # obtain giou between predictions and labels \n giou = tf.expand_dims(bbox_giou(pred_xywh, label_xywh), axis = -1)\n\n # loss factor that gives higher weight to smaller boxes \n bbox_loss_scale = 2.0 - 1.0 * label_xywh[:, :, :, :, 2:3] * label_xywh[:, :, :, :, 3:4] / (train_input_size ** 2)\n \n # obtain giou loss \n giou_loss = respond_bbox * bbox_loss_scale * (1 - giou)\n \n # obtain iou between predictions and labels \n iou = bbox_iou(pred_xywh[:, :, :, :, np.newaxis, :], bboxes[:, np.newaxis, np.newaxis, np.newaxis, :, :])\n \n # find the value of iou with the largest prediction box\n max_iou = tf.reduce_max(iou, axis = -1, keepdims = True)\n\n # if the largest iou is less than the threshold, it is considered that the prediction box contains no objects, \n # then the background box\n respond_bgd = (1.0 - respond_bbox) * tf.cast(max_iou < iou_loss_threshold, tf.float32)\n \n # focal factor on objectiveness loss \n conf_focal = tf.pow(respond_bbox - pred_conf, 2)\n\n # calculate the objectiveness loss \n # we hope that if the grid contains objects, then the network output prediction box has a confidence of 1 and 0 \n # when there is no object.\n conf_loss = conf_focal * (respond_bbox + respond_bgd) * \\\n tf.nn.sigmoid_cross_entropy_with_logits(labels = respond_bbox, logits = conv_raw_objectiveness)\n \n # class probabilities loss\n prob_loss = respond_bbox * tf.nn.sigmoid_cross_entropy_with_logits(labels = label_prob, logits = conv_raw_prob)\n \n # sum up losses and take mean accross batch\n giou_loss = tf.reduce_mean(tf.reduce_sum(giou_loss, axis = [1,2,3,4]))\n conf_loss = tf.reduce_mean(tf.reduce_sum(conf_loss, axis = [1,2,3,4]))\n prob_loss = tf.reduce_mean(tf.reduce_sum(prob_loss, axis = [1,2,3,4]))\n \n if np.isnan(giou_loss):\n \n giou_loss = tf.Variable(0, trainable = False, dtype = tf.float32)\n \n return giou_loss, conf_loss, prob_loss", "def model_loss(input_real, input_z, out_channel_dim):\n # TODO: Implement Function \n \n smooth = 0.1\n g_model = generator(input_z, out_channel_dim)\n d_model_real, d_logits_real = discriminator(input_real)\n d_model_fake, d_logits_fake = discriminator(g_model, reuse=True)\n\n d_loss_real = tf.reduce_mean(\n tf.nn.sigmoid_cross_entropy_with_logits(logits=d_logits_real,\n labels=tf.ones_like(d_model_real) * (1 - smooth)))\n d_loss_fake = tf.reduce_mean(\n tf.nn.sigmoid_cross_entropy_with_logits(logits=d_logits_fake, labels=tf.zeros_like(d_model_fake)))\n g_loss = tf.reduce_mean(\n tf.nn.sigmoid_cross_entropy_with_logits(logits=d_logits_fake, labels=tf.ones_like(d_model_fake)))\n\n d_loss = d_loss_real + d_loss_fake\n print(d_loss.shape)\n print(g_loss.shape)\n return d_loss, g_loss", "def model_loss(input_real, input_z, out_channel_dim):\n # TODO: Implement Function \n \n smooth = 0.1\n g_model = generator(input_z, out_channel_dim)\n d_model_real, d_logits_real = discriminator(input_real)\n d_model_fake, d_logits_fake = discriminator(g_model, reuse=True)\n\n d_loss_real = tf.reduce_mean(\n tf.nn.sigmoid_cross_entropy_with_logits(logits=d_logits_real,\n labels=tf.ones_like(d_model_real) * (1 - smooth)))\n d_loss_fake = tf.reduce_mean(\n tf.nn.sigmoid_cross_entropy_with_logits(logits=d_logits_fake, labels=tf.zeros_like(d_model_fake)))\n g_loss = tf.reduce_mean(\n tf.nn.sigmoid_cross_entropy_with_logits(logits=d_logits_fake, labels=tf.ones_like(d_model_fake)))\n\n d_loss = d_loss_real + d_loss_fake\n print(d_loss.shape)\n print(g_loss.shape)\n return d_loss, g_loss", "def loss_fn(self, lbl, y):\n\n binlbl = self._to_device(lbl[:,0]>.5)\n # center = self._to_device(lbl[:,3]) \n offset = 5. * self._to_device(lbl[:,1:]) \n\n loss = self.criterion(y[:,:2], offset) \n loss2 = self.criterion2(y[:,2], binlbl)\n\n # loss3 = self.criterion(y[:,3], center)\n\n loss = loss + loss2\n return loss", "def _build_loss(self, **kwargs):\n pass", "def make_loss(self, label, loss_type='xent'):\n with tf.variable_scope('loss'):\n pred_flat = tf.reshape(self.pred, [-1, self.class_num])\n label = tf.image.resize_nearest_neighbor(label, tf.stack(self.pred.get_shape()[1:3]))\n y_flat = tf.reshape(tf.squeeze(label, axis=[3]), [-1, ])\n indices = tf.squeeze(tf.where(tf.less_equal(y_flat, self.class_num - 1)), 1)\n gt = tf.gather(y_flat, indices)\n prediction = tf.gather(pred_flat, indices)\n\n pred = tf.argmax(prediction, axis=-1, output_type=tf.int32)\n self.loss_iou = self.create_resetable_metric(tf.metrics.mean_iou, var_name='loss_iou',\n scope=tf.get_variable_scope().name,\n labels=gt, predictions=pred, num_classes=self.class_num,\n name='loss_iou')\n\n if loss_type == 'xent':\n loss = tf.reduce_mean(tf.nn.sparse_softmax_cross_entropy_with_logits(logits=prediction, labels=gt))\n l2_losses = [self.weight_decay * tf.nn.l2_loss(v) for v in tf.trainable_variables()\n if 'weight' in v.name]\n self.loss = tf.reduce_mean(loss) + tf.add_n(l2_losses)\n self.loss_xent = self.create_resetable_metric(tf.metrics.mean, var_name='loss_xent',\n scope=tf.get_variable_scope().name,\n values=self.loss, name='loss_xent')", "def model_loss(input_real, input_z, out_channel_dim):\r\n # TODO: Implement Function\r\n print(\"<<<<<<<<<<<input_z.shape\", input_z.shape)\r\n g_model = generator(input_z, out_channel_dim, is_train=True)\r\n\r\n print(\"<<<<<<<<<<<input_real.shape\", input_real.shape)\r\n d_model_real, d_logits_real = discriminator(input_real, reuse=False)\r\n\r\n print(\"<<<<<<<<<<<g_model.shape\", g_model.shape)\r\n d_model_fake, d_logits_fake = discriminator(g_model, reuse=True)\r\n print(\"<<<<<<<<<<<\", d_model_fake.shape, d_logits_fake.shape)\r\n\r\n ## add smooth here\r\n\r\n smooth = 0.1\r\n d_loss_real = tf.reduce_mean(\r\n tf.nn.sigmoid_cross_entropy_with_logits(logits=d_logits_real,\r\n labels=tf.ones_like(d_model_real) * (1 - smooth)))\r\n\r\n d_loss_fake = tf.reduce_mean(\r\n tf.nn.sigmoid_cross_entropy_with_logits(logits=d_logits_fake, labels=tf.zeros_like(d_model_fake)))\r\n\r\n g_loss = tf.reduce_mean(\r\n tf.nn.sigmoid_cross_entropy_with_logits(logits=d_logits_fake,\r\n labels=tf.ones_like(d_model_fake)))\r\n\r\n d_loss = d_loss_real + d_loss_fake\r\n\r\n return d_loss, g_loss, g_model", "def Tanimoto_loss(label, pred):\n smooth = 1e-5\n\n Vli = tf.reduce_mean(tf.reduce_sum(label, axis=[1,2]), axis=0)\n # wli = 1.0/Vli**2 # weighting scheme\n wli = tf.math.reciprocal(Vli**2) # weighting scheme\n\n # ---------------------This line is taken from niftyNet package --------------\n # ref: https://github.com/NifTK/NiftyNet/blob/dev/niftynet/layer/loss_segmentation.py, lines:170 -- 172\n # First turn inf elements to zero, then replace that with the maximum weight value\n new_weights = tf.where(tf.math.is_inf(wli), tf.zeros_like(wli), wli)\n wli = tf.where(tf.math.is_inf(wli), tf.ones_like(wli) * tf.reduce_max(new_weights), wli)\n # --------------------------------------------------------------------\n\n # print('[DEBUG LOSS]')\n # print(label.shape)\n # print(pred.shape)\n\n square_pred = tf.square(pred)\n square_label = tf.square(label)\n add_squared_label_pred = tf.add(square_pred, square_label)\n sum_square = tf.reduce_sum(add_squared_label_pred, axis=[1, 2])\n # print('sum square')\n # print(sum_square.shape)\n\n product = tf.multiply(pred, label)\n sum_product = tf.reduce_sum(product, axis=[1, 2])\n # print('sum product')\n # print(sum_product.shape)\n sum_product_labels = tf.reduce_sum(tf.multiply(wli, sum_product), axis=-1)\n # print('sum product labels')\n # print(sum_product_labels.shape)\n\n denomintor = tf.subtract(sum_square, sum_product)\n # print('denominator')\n # print(denomintor.shape)\n denomintor_sum_labels = tf.reduce_sum(tf.multiply(wli, denomintor), axis=-1)\n # print('denominator sum labels')\n # print(denomintor_sum_labels.shape)\n # Add smooth to avoid numerical instability\n loss = tf.divide(sum_product_labels + smooth, denomintor_sum_labels + smooth)\n # print('loss')\n # print(loss.shape)\n return loss", "def get_gen_loss(gen, disc, real, condition, adv_criterion, recon_criterion, lambda_recon):\n fake = gen(condition)\n disc_fake_pred = disc(fake, condition)\n adv_loss = adv_criterion(disc_fake_pred, torch.ones_like(disc_fake_pred))\n recon_loss = recon_criterion(real, fake)\n gen_loss = adv_loss + (recon_loss * lambda_recon)\n return gen_loss", "def objective(params):\n param0, param1, param2 = params\n model = Model(param0, param1, param3)\n\n train_loss = train(model)\n val_loss = validate(model)\n\n return val_loss", "def loss_test(params):\n nx, ny, lx, ly, dx, dy, dt, t, p, pmlc, source = params\n\n history1 = callers.call_bpml(nx, ny, t, dx, dy, dt, p, pmlc, 0.1, source)\n history2 = callers.call_bpml(nx, ny, t, dx, dy, dt, p, pmlc, 0.01, source)\n history3 = callers.call_bpml(nx, ny, t, dx, dy, dt, p, pmlc, 0.001, source)\n\n nyh = int(ny / 2)\n names = [\"sigma = 0\", \"sigma = 0.01\", \"sigma = 0.001\"]\n labels = [\"Hz [V/m]\", \"Space / [Cells]\"]\n snaps = [history1[:, nyh, 50], history2[:, nyh, 50], history3[:, nyh, 50]]\n common.plot_snaps(names, labels, snaps)", "def get_loss(self, xs, y):\n \"*** YOUR CODE HERE ***\"\n predictedY = self.run(xs)\n return nn.SoftmaxLoss(predictedY, y)\n # return nn.SquareLoss(predictedY, y)", "def compute_loss(self, **kwargs):\n raise NotImplementedError", "def loss(self, X, y):\n pass", "def train(net):\n\n # Set SGD hyperparameters\n n_iter = 200 # number of iterations of SGD\n learning_rate = 1e-3 # learning rate for SGD\n momentum = .99 # momentum parameter for SGD\n batch_size = 100 # number of data points in each mini-batch\n\n # Initialize binary cross-entropy loss function\n loss_fn = nn.BCELoss()\n\n # Initialize SGD optimizer with momentum\n optimizer = optim.SGD(net.parameters(), lr=learning_rate, momentum=momentum)\n\n # Placeholder to save loss at each iteration\n track_loss = []\n\n # Loop over iterations\n for i in range(n_iter):\n\n # Sample minibatch of oriented grating stimuli\n stimuli, tilt = sample_stimuli(batch_size)\n\n # Evaluate loss and update network weights\n out = net(stimuli) # predicted probability of tilt right\n loss = loss_fn(out, tilt) # evaluate loss\n optimizer.zero_grad() # clear gradients\n loss.backward() # compute gradients\n optimizer.step() # update weights\n \n # Keep track of loss at each iteration\n track_loss.append(loss.item())\n\n # Track progress\n if (i + 1) % (n_iter / 10) == 0:\n print('iteration %i | loss: %.3f | percent correct: %.2f%%' % (i + 1, loss.item(), 100 * pcorrect(out, tilt)))\n \n # Plot loss\n plt.plot(track_loss)\n plt.xlabel('iterations of SGD')\n plt.ylabel('binary cross-entropy loss')\n plt.xlim([0, None])\n plt.ylim([0, None])\n plt.show()", "def _create_loss(self):\n with tf.device('/cpu:0'):\n with tf.name_scope('loss'):\n self.loss = tf.reduce_mean(\n tf.nn.softmax_cross_entropy_with_logits(\n labels=self.labels_placeholder, \n logits=self.logits, name='loss'))", "def get_criterion(opt):\n\n if opt.loss_function == 'bce':\n criterion = nn.BCEWithLogitsLoss()\n if opt.loss_function == 'focal':\n criterion = FocalLoss(opt.focal_gamma)\n if opt.loss_function == 'dice':\n criterion = dice_loss\n if opt.loss_function == 'jaccard':\n criterion = jaccard_loss\n if opt.loss_function == 'tversky':\n criterion = TverskyLoss(alpha=opt.tversky_alpha, beta=opt.tversky_beta)\n\n return criterion", "def loss(self, log_prob, C):\n W = self.W\n T = self.T\n average_log_loss = -C * log_prob\n W_norm = torch.sum(torch.tensor([(torch.norm(Wy.double())) ** 2 for Wy in W])) / 2\n T_norm = torch.sum(torch.tensor([torch.sum(torch.tensor([Tij ** 2 for Tij in row])) for row in T])) / 2\n loss = average_log_loss + W_norm + T_norm\n return loss", "def get_loss(self, Loss, results, inputs, device):\n return", "def modify(nets, probs, ranks, desc, hypers, seed=0, seed2=0):\n\n name = str(seed)\n\n np.random.seed(seed2)\n tf.random.set_random_seed(seed2)\n random.seed(seed2)\n\n if not rnd: # If randomness is not applied\n print(ranks.sum(axis=1))\n if (ranks.sum(axis=1) == 0).any(): # If there are any network in the bottom three in importance in all objectives\n probs = (ranks.sum(axis=1) == 0) * probs # Only accept a network as modifiable if they rank between 3 least important networks in all three objectives\n probs = probs / np.sum(probs) # Update probabilities once the networks more important than bottom three have been taken away\n trainables, res, mutation, comp, reaching_outs = reducing_mutations(nets, probs, desc)\n else:\n trainables, res, mutation, comp, reaching_outs = increasing_mutations(nets, probs, desc)\n else: # Random application\n comp = np.random.choice(nets)\n _, in_conns, out_conns, _ = desc.get_net_context(comp)\n conns = in_conns + out_conns # Checka si esto da error\n reaching_outs = list(set([x for x in desc.reachable[comp] if \"o\" in x])) # Outputs affected by the mutation\n mutations = [con for con in conns if is_deletable(desc, con)]\n\n mutations += [\"add_con\", \"divide_con\", \"reinit\"]\n\n if is_bypassable(desc, comp):\n mutations += [\"bypass\"]\n\n mutation = np.random.choice(mutations)\n res, trainables = mutate(mutation, desc, comp, conns)\n print(mutation)\n model = MNM(desc, hypers[\"btch_sz\"], data_inputs[\"Train\"], data_outputs[\"Train\"], loss_func_weights={\"o0\": hypers[\"wo0\"], \"o1\": hypers[\"wo1\"], \"o2\": hypers[\"wo2\"]}, name=name, load=None, init=False, random_seed=seed2, lr=0.001)\n\n model.initialize(load=True, load_path=\"\", variables=trainables)\n\n model.convergence_train(hypers[\"btch_sz\"], iter_lim//100, conv_param, proportion, iter_lim//20, display_step=-1)\n\n results = evaluate_model(model)\n\n del model\n\n if rnd == 1:\n n = \"resultsrandom\"\n else:\n n = \"results\"\n\n np.save(n + str(seed) + \"_\" + str(seed2) + \".npy\", np.concatenate((results, [res, mutation, comp], reaching_outs)))", "def calculate_perplexity(loss):\n return math.exp(float(loss)) if loss < 300 else float(\"inf\")", "def __compile_model(self,\n network,\n loss=lasagne.objectives.categorical_crossentropy,\n learning_rate=0.001,\n momentum=0.1):\n print('Compiling model...')\n self.report['network'] = inspect.getsource(network)\n self.report['loss_function'] = loss.__name__\n self.report['learning_rate'] = learning_rate\n self.report['learning_momentum'] = momentum\n start_time = time.time()\n self.__input_var = T.tensor4('inputs')\n self.__target_var = T.ivector('targets')\n self.__network = network(self.__input_var)\n self.__loss = lambda t: loss(get_output(self.__network,\n deterministic=t),\n self.__target_var).mean()\n self.__optimizer = lasagne.updates.nesterov_momentum(\n self.__loss(False), # enable dropout during training\n get_all_params(self.__network, trainable=True),\n learning_rate=learning_rate,\n momentum=momentum)\n predictions = T.argmax(\n get_output(self.__network, deterministic=True),\n axis=1)\n # number of correct predictions\n n_correct = T.sum(T.eq(predictions, self.__target_var))\n # number of relevant images in the sample\n n_relevant = T.sum(self.__target_var)\n # number of images predicted to be relevant\n n_selected = T.sum(predictions)\n # number of correct predictions of relevance\n n_correct_relevant = T.sum(predictions & self.__target_var)\n statistics = [n_correct, n_selected, n_relevant, n_correct_relevant]\n self.__train_fn = theano.function(\n [self.__input_var, self.__target_var],\n [self.__loss(False)] + statistics,\n updates=self.__optimizer)\n self.__val_fn = theano.function(\n [self.__input_var, self.__target_var],\n [self.__loss(True)] + statistics)\n elapsed_time = time.time() - start_time\n self.report['time_to_compile'] = elapsed_time", "def loss(self, z1_rec):\n pass", "def discrete_descent(rho, Z, C_0, rho_ub, rho_lb, get_L0_penalty, compute_loss_from_scores, descent_dimensions = None, active_set_flag = True):\n \"\"\"\n \n \"\"\"\n assert callable(compute_loss_from_scores)\n assert callable(get_L0_penalty)\n\n # initialize key variables\n MAX_ITERATIONS = 500\n MIN_IMPROVEMENT_PER_STEP = float(1e-8)\n P = len(rho)\n\n # convert solution to integer\n rho = np.require(np.require(rho, dtype = np.int_), dtype = np.float_)\n\n # convert descent dimensions to integer values\n if descent_dimensions is None:\n descent_dimensions = np.arange(P)\n else:\n descent_dimensions = np.require(descent_dimensions, dtype = np.int_)\n\n if active_set_flag:\n descent_dimensions = np.intersect1d(np.flatnonzero(rho), descent_dimensions)\n\n descent_dimensions = descent_dimensions.tolist()\n\n base_scores = Z.dot(rho)\n base_loss = compute_loss_from_scores(base_scores)\n base_objval = base_loss + get_L0_penalty(rho)\n n_iterations = 0\n\n coefficient_values = {k: np.arange(int(rho_lb[k]), int(rho_ub[k]) + 1) for k in descent_dimensions}\n search_dimensions = descent_dimensions\n while n_iterations < MAX_ITERATIONS and len(search_dimensions) > 0:\n\n # compute the best objective value / step size in each dimension\n best_objval_by_dim = np.repeat(np.nan, P)\n best_coef_by_dim = np.repeat(np.nan, P)\n\n for k in search_dimensions:\n\n dim_objvals = _compute_objvals_at_dim(base_rho = rho,\n base_scores = base_scores,\n base_loss = base_loss,\n dim_idx = k,\n dim_coefs = coefficient_values[k],\n Z = Z,\n C_0 = C_0,\n compute_loss_from_scores = compute_loss_from_scores)\n\n # mark points that will improve the current objective value by at least MIN_IMPROVEMENT_PER_STEP\n best_dim_idx = np.nanargmin(dim_objvals)\n best_objval_by_dim[k] = dim_objvals[best_dim_idx]\n best_coef_by_dim[k] = coefficient_values[k][best_dim_idx]\n\n # recompute base objective value/loss/scores\n best_idx = np.nanargmin(best_objval_by_dim)\n next_objval = best_objval_by_dim[best_idx]\n threshold_objval = base_objval - MIN_IMPROVEMENT_PER_STEP\n\n if next_objval >= threshold_objval:\n break\n\n best_step = best_coef_by_dim[best_idx] - rho[best_idx]\n rho[best_idx] += best_step\n base_objval = next_objval\n base_loss = base_objval - get_L0_penalty(rho)\n base_scores = base_scores + (best_step * Z[:, best_idx])\n\n # remove the current best direction from the set of directions to explore\n search_dimensions = list(descent_dimensions)\n search_dimensions.remove(best_idx)\n n_iterations += 1\n\n return rho, base_loss, base_objval", "def liouvillian(rho, H, c_ops):\n rhs = -1j * comm(H, rho)\n for c_op in c_ops:\n rhs += lindbladian(c_op, rho)\n return rhs", "def construct_model():\n import lbann\n\n # Layer graph\n input = lbann.Input(target_mode='N/A', name='inp_data')\n # data is 64*64*4 images + 15 scalar + 5 param\n #inp_slice = lbann.Slice(input, axis=0, slice_points=\"0 16399 16404\",name='inp_slice')\n inp_slice = lbann.Slice(input, axis=0, slice_points=str_list([0,args.ydim,args.ydim+5]),name='inp_slice')\n gt_y = lbann.Identity(inp_slice,name='gt_y')\n gt_x = lbann.Identity(inp_slice, name='gt_x') #param not used\n\n zero = lbann.Constant(value=0.0,num_neurons='1',name='zero')\n one = lbann.Constant(value=1.0,num_neurons='1',name='one')\n\n z_dim = 20 #Latent space dim\n\n z = lbann.Gaussian(mean=0.0,stdev=1.0, neuron_dims=\"20\")\n model = macc_models.MACCWAE(args.zdim,args.ydim,cf=args.mcf,use_CNN=args.useCNN)\n d1_real, d1_fake, d_adv, pred_y = model(z,gt_y)\n\n d1_real_bce = lbann.SigmoidBinaryCrossEntropy([d1_real,one],name='d1_real_bce')\n d1_fake_bce = lbann.SigmoidBinaryCrossEntropy([d1_fake,zero],name='d1_fake_bce')\n d_adv_bce = lbann.SigmoidBinaryCrossEntropy([d_adv,one],name='d_adv_bce')\n img_loss = lbann.MeanSquaredError([pred_y,gt_y])\n rec_error = lbann.L2Norm2(lbann.WeightedSum([pred_y,gt_y], scaling_factors=\"1 -1\"))\n\n layers = list(lbann.traverse_layer_graph(input))\n # Setup objective function\n weights = set()\n src_layers = []\n dst_layers = []\n for l in layers:\n if(l.weights and \"disc0\" in l.name and \"instance1\" in l.name):\n src_layers.append(l.name)\n #freeze weights in disc2\n if(l.weights and \"disc1\" in l.name):\n dst_layers.append(l.name)\n for idx in range(len(l.weights)):\n l.weights[idx].optimizer = lbann.NoOptimizer()\n weights.update(l.weights)\n l2_reg = lbann.L2WeightRegularization(weights=weights, scale=1e-4)\n d_adv_bce = lbann.LayerTerm(d_adv_bce,scale=0.01)\n obj = lbann.ObjectiveFunction([d1_real_bce,d1_fake_bce,d_adv_bce,img_loss,rec_error,l2_reg])\n # Initialize check metric callback\n metrics = [lbann.Metric(img_loss, name='recon_error')]\n #pred_y = macc_models.MACCWAE.pred_y_name\n callbacks = [lbann.CallbackPrint(),\n lbann.CallbackTimer(),\n lbann.CallbackSaveModel(dir=args.dump_models),\n lbann.CallbackReplaceWeights(source_layers=list2str(src_layers),\n destination_layers=list2str(dst_layers),\n batch_interval=2)]\n\n if(args.ltfb_batch_interval > 0) :\n callbacks.append(lbann.CallbackLTFB(batch_interval=args.ltfb_batch_interval,metric='recon_error',\n low_score_wins=True,\n exchange_hyperparameters=True))\n\n # Construct model\n return lbann.Model(args.num_epochs,\n serialize_io=True,\n weights=weights,\n layers=layers,\n metrics=metrics,\n objective_function=obj,\n callbacks=callbacks)", "def loss_perceptual(self, vgg_out, vgg_gt, vgg_comp): \n loss = 0\n for o, c, g in zip(vgg_out, vgg_comp, vgg_gt):\n loss += self.l1(o, g) + self.l1(c, g)\n return loss" ]
[ "0.6449017", "0.6109472", "0.605694", "0.5996417", "0.593703", "0.589075", "0.5883496", "0.58318764", "0.5773389", "0.5742824", "0.5693317", "0.569238", "0.56884587", "0.5637557", "0.56182146", "0.5615014", "0.55934024", "0.55804175", "0.55716777", "0.5567387", "0.5556932", "0.5555117", "0.5550353", "0.5537782", "0.54960036", "0.5486453", "0.5465885", "0.5460442", "0.5458186", "0.54563606", "0.54526466", "0.54489887", "0.5435416", "0.5425833", "0.5420805", "0.54176384", "0.54175514", "0.54059434", "0.53990954", "0.5398425", "0.53926176", "0.53874165", "0.53841877", "0.53830284", "0.5372703", "0.5371663", "0.53559995", "0.53539175", "0.53447765", "0.53416103", "0.534072", "0.53363794", "0.53289384", "0.53210413", "0.5313553", "0.53102285", "0.5309431", "0.53084946", "0.5308032", "0.529859", "0.5298463", "0.5293365", "0.52826023", "0.5279655", "0.52761984", "0.5269021", "0.52684045", "0.52674675", "0.526089", "0.526062", "0.5260186", "0.5259445", "0.5254187", "0.5250333", "0.5250314", "0.5250314", "0.5247941", "0.52476144", "0.5246141", "0.52438915", "0.52435404", "0.5240859", "0.52313066", "0.52251047", "0.5219708", "0.521919", "0.52180916", "0.5215968", "0.52023107", "0.51996547", "0.51990205", "0.5195342", "0.5191988", "0.51896256", "0.51880527", "0.51847076", "0.51830035", "0.5181557", "0.5171853", "0.51651627" ]
0.6604869
0
create a SVDD loss for network given in argument
def compile_update_svdd(nnet, inputs, targets): floatX = Cfg.floatX B = Cfg.B C = Cfg.C nu = Cfg.nu # initialize R if nnet.R_init > 0: nnet.Rvar = shared(floatX(nnet.R_init), name="R") else: nnet.Rvar = shared(floatX(1), name="R") # initialization with R=1 # Loss feature_layer = nnet.all_layers[-1] rep = lasagne.layers.get_output(feature_layer, inputs=inputs, deterministic=False) # initialize c (0.5 in every feature representation dimension) rep_dim = feature_layer.num_units # nnet.cvar = shared(floatX(np.ones(rep_dim) * (1. / (rep_dim ** 0.5))), # name="c") nnet.cvar = shared(floatX(np.ones(rep_dim) * 0.5), name="c") dist = T.sum(((rep - nnet.cvar.dimshuffle('x', 0)) ** 2), axis=1, dtype='floatX') scores = dist - nnet.Rvar stack = T.stack([T.zeros_like(scores), scores], axis=1) loss = T.cast(T.sum(T.max(stack, axis=1)) / (inputs.shape[0] * nu), dtype='floatX') y_pred = T.argmax(stack, axis=1) acc = T.cast((T.sum(T.eq(y_pred.flatten(), targets), dtype='int32') * 1. / targets.shape[0]), 'floatX') # Network weight decay if Cfg.weight_decay: l2_penalty = (1/C) * get_l2_penalty(nnet, include_bias=Cfg.include_bias, pow=Cfg.pow) else: l2_penalty = T.cast(0, dtype='floatX') # Network activation sparsity regularization if Cfg.sparsity_penalty: sparsity_penalty = (1/B) * get_sparsity_penalty(nnet, inputs, Cfg.sparsity, mode=Cfg.sparsity_mode, deterministic=False) else: sparsity_penalty = T.cast(0, dtype='floatX') # Backpropagation (hard-margin: only minimizing everything to a ball # centered at c) trainable_params = lasagne.layers.get_all_params(feature_layer, trainable=True) if Cfg.gaussian_blob: avg_dist = T.mean(1-T.exp(-dist), dtype="floatX") else: avg_dist = T.mean(dist, dtype="floatX") obj_ball = T.cast(floatX(0.5) * l2_penalty + avg_dist + sparsity_penalty, dtype='floatX') updates_ball = get_updates(nnet, obj_ball, trainable_params, solver=nnet.solver) nnet.backprop_ball = theano.function([inputs, targets], [obj_ball, acc], updates=updates_ball) # Backpropagation (without training R) obj = T.cast(floatX(0.5) * l2_penalty + nnet.Rvar + loss + sparsity_penalty, dtype='floatX') updates = get_updates(nnet, obj, trainable_params, solver=nnet.solver) nnet.backprop_without_R = theano.function([inputs, targets], [obj, acc], updates=updates) # Backpropagation (with training R) trainable_params.append(nnet.Rvar) # add radius R to trainable parameters updates = get_updates(nnet, obj, trainable_params, solver=nnet.solver) nnet.backprop = theano.function([inputs, targets], [obj, acc], updates=updates) # Forwardpropagation test_rep = lasagne.layers.get_output(feature_layer, inputs=inputs, deterministic=True) test_rep_norm = test_rep.norm(L=2, axis=1) test_dist = T.sum(((test_rep - nnet.cvar.dimshuffle('x', 0)) ** 2), axis=1, dtype='floatX') test_scores = test_dist - nnet.Rvar test_stack = T.stack([T.zeros_like(test_scores), test_scores], axis=1) test_loss = T.cast(T.sum(T.max(test_stack, axis=1)) / (inputs.shape[0]*nu), dtype='floatX') test_y_pred = T.argmax(test_stack, axis=1) test_acc = T.cast((T.sum(T.eq(test_y_pred.flatten(), targets), dtype='int32') * 1. / targets.shape[0]), dtype='floatX') # Network activation sparsity regularization (with determinisitc=True) if Cfg.sparsity_penalty: test_sparsity_penalty = ((1 / B) * get_sparsity_penalty(nnet, inputs, Cfg.sparsity, mode=Cfg.sparsity_mode, deterministic=True)) else: test_sparsity_penalty = T.cast(0, dtype='floatX') test_obj = T.cast(floatX(0.5) * l2_penalty + nnet.Rvar + test_loss + test_sparsity_penalty, dtype='floatX') nnet.forward = theano.function([inputs, targets], [test_obj, test_acc, test_scores, floatX(0.5) * l2_penalty, test_sparsity_penalty, test_rep, test_rep_norm, test_loss, nnet.Rvar])
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def tv_loss(x, name='tv_loss'):\n raise NotImplementedError(\"Please use tensorflow total_variation loss.\")", "def loss_fn(self, targets, outputs, model):", "def tv_loss(input: th.Tensor):\n input = tf.pad(input, (0, 1, 0, 1), \"replicate\")\n x_diff = input[..., :-1, 1:] - input[..., :-1, :-1]\n y_diff = input[..., 1:, :-1] - input[..., :-1, :-1]\n return (x_diff ** 2 + y_diff ** 2).mean([1, 2, 3])", "def svm_loss(x, y):\n x = np.squeeze(x)\n N = x.shape[0]\n yt = y\n yt[y==0]=-1\n tmp = 1-yt*x\n mask = np.ones_like(tmp)\n mask[tmp<=0] = 0\n tmp = tmp*mask\n loss = np.sum(tmp)/N\n \n dx = -yt*mask/N\n # dx = np.reshape(dx,[dx.shape[0],1])\n return loss, dx", "def ss_loss_(self, batch):\n raise NotImplementedError", "def demo():\n def load_data():\n train = open(\"csv/svd_train.csv\", \"r\")\n r = csv.reader(train)\n next(r)\n\n data = []\n target = []\n\n print \"Prepping data...\"\n for row in r:\n aux = [0 for x in xrange(10)]\n aux[int(row[0])] = 1\n target.append(aux)\n data.append([float(x) for x in row[1:]])\n\n train.close()\n\n data = np.array(data)\n\n target = np.array(target)\n\n #train = [target[:35000],data[:35000]]\n #test = [target[35000:],data[35000:]]\n\n return [target, data]\n\n NN = MLP_NeuralNetwork(101, 75, 35, 10,\n iterations = 200,\n learning_rate = 0.5,\n momentum = 0.05,\n rate_decay = 0.005)\n\n train = load_data()\n\n NN.train(train)\n #NN.test_cross(test)\n #NN.test()\n NN.test_against()", "def compute_loss(self):", "def loss(self):\n return 'mse'", "def loss(self, **kwargs):\n pass", "def evaluate_loss(\n model,\n ds,\n loss_func_name = 'CE'\n):\n loss = 0\n if loss_func_name == 'CE':\n loss_func = tf.keras.losses.SparseCategoricalCrossentropy(\n reduction=tf.keras.losses.Reduction.SUM\n )\n else:\n raise ValueError(f'Not supported loss function {loss_func_name}!')\n n = 0\n for batch_x, batch_y in ds:\n batch_output = get_model_output(model, batch_x)\n loss += loss_func(batch_y, batch_output)\n n += batch_y.shape[0]\n return loss / n", "def svm_loss(x, y):\n N = x.shape[0]\n x = np.squeeze(x)\n loss = np.sum(((1-x*y)>0)*(1-x*y))/N\n dx = ((1-x*y)>0)*(-y)/N\n return loss, dx", "def snn(input_var=None):\n\n # Hyperparameters\n hp = Hyperparameters()\n hp('batch_size', 20)\n hp('n_epochs', 1000)\n hp('learning_rate', 0.01)\n hp('patience', 10000)\n\n # Create connected layers\n # Input layer\n l_in = InputLayer(input_shape=(None, 28 * 28), input_var=input_var, name='Input')\n # Dropout Layer\n l_dro1 = AlphaDropout(incoming=l_in, corruption_level=0.2, name='Dropout 1')\n # Dense Layer\n l_hid1 = DenseLayer(incoming=l_dro1, n_units=500, W=selu_normal,\n activation=selu, name='Hidden layer 1')\n # Dropout Layer\n l_dro2 = AlphaDropout(incoming=l_hid1, corruption_level=0.1, name='Dropout 2')\n # Dense Layer\n l_hid2 = DenseLayer(incoming=l_dro2, n_units=500, W=selu_normal,\n activation=selu, name='Hidden layer 2')\n # Logistic regression Layer\n l_out = LogisticRegression(incoming=l_hid2, n_class=10, name='Logistic regression')\n\n # Create network and add layers\n net = Network('dropout')\n net.add(l_in)\n net.add(l_dro1)\n net.add(l_hid1)\n net.add(l_dro2)\n net.add(l_hid2)\n net.add(l_out)\n\n return net, hp", "def get_loss(self, xs, y):\n \"*** YOUR CODE HERE ***\"\n predictedY = self.run(xs)\n return nn.SoftmaxLoss(predictedY, y)\n # return nn.SquareLoss(predictedY, y)", "def vae_loss_function_factory(reduction='mean'):\n def vae_loss_function(outputs, targets, mean, std_dev):\n outputs_flat = outputs.view(-1, 28 * 28)\n targets_flat = targets.view(-1, 28 * 28)\n if reduction == 'mean':\n image_loss = torch.mean((outputs_flat - targets_flat).pow(2).sum(dim=1))\n latent_loss = -0.5 * torch.mean((1 + 2 * std_dev - mean.pow(2) - torch.exp(2 * std_dev)).sum(dim=1))\n elif reduction == 'sum':\n image_loss = torch.sum((outputs_flat - targets_flat).pow(2).sum(dim=1))\n latent_loss = -0.5 * torch.sum((1 + 2 * std_dev - mean.pow(2) - torch.exp(2 * std_dev)).sum(dim=1))\n elif reduction == 'none':\n image_loss = (outputs_flat - targets_flat).pow(2).sum(dim=1)\n latent_loss = -0.5 * (1 + 2 * std_dev - mean.pow(2) - torch.exp(2 * std_dev)).sum(dim=1)\n else:\n raise NotImplementedError('Reduction ' + reduction + ' not implemented.')\n return image_loss + latent_loss\n return vae_loss_function", "def tv_loss(img, tv_weight):\n # Your implementation should be vectorized and not require any loops!\n # *****START OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****\n\n pass\n\n # *****END OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****", "def loss_fn(outputs, labels):\n #print('this is outputs', outputs.shape) # 2,3,128,128\n #print('this is labels', labels.shape) # 2,3,128,128\n N, C, H, W = outputs.shape\n \n# outputs = unnormalize(outputs, mean=[0.51371954, 0.40949144, 0.35572536], std= [0.2926419, 0.26180502, 0.25512055])\n # check if we normalize label images #labels = unnormalize(labels, mean=[0.53459634,0.39673596,0.33788489], std= [0.29101071,0.26140346,0.25485687])\n \n mse_loss = torch.sum((outputs - labels) ** 2) / N / C # each photo, each channel\n mse_loss *= 255 * 255\n mse_loss /= H * W \n # average loss on each pixel(0-255)\n return mse_loss", "def get_loss(self, xs, y):\n \"*** YOUR CODE HERE ***\"\n y_pred = self.run(xs)\n return nn.SoftmaxLoss(y_pred,y)", "def compute_loss(self, obs, returns):", "def wasserstein_d_update(loss, optimizer, clipping_value, var_list=None, name='d_update'):\n # gradients, var_list = zip(*optimizer.compute_gradients(loss, var_list=var_list))\n # optimizer.apply_gradients(zip(gradients, var_list), name=name)\n # return clip_discriminator_var_op\n\n opt_op = optimizer.minimize(loss, var_list=var_list, name=name)\n with tf.control_dependencies([opt_op]):\n clip_discriminator_var_op = [\n var.assign(tf.clip_by_value(var, -clipping_value, clipping_value))\n for var in var_list]\n return clip_discriminator_var_op", "def loss_(self, batch):\n raise NotImplementedError", "def loss(A, Y):\n return A - Y", "def heteroscedastic_loss(network, params, x):\n\n pred_mean, pred_var = network(x)\n logvar = tf.reduce_sum(0.5 * tf.math.log(pred_var), axis=-1)\n squared_error = tf.reduce_sum(0.5 * tf.math.square(params - pred_mean) / pred_var, axis=-1)\n loss = tf.reduce_mean(squared_error + logvar)\n return loss", "def dloss(self, output, labels):\n return 2*(output - labels)/labels.shape[1]", "def svm_loss(x, y):\n\n x = x.reshape((-1,1))\n y = y.reshape((-1,1))\n N,_ = x.shape\n \n y_p = np.where(y == 1,1,-1)\n \n losses = np.maximum(0,1-(x*y_p))\n loss = np.sum(losses)/N\n dx = np.where(losses > 0, 1, 0)*(-y_p)/N\n dx = dx.reshape((-1,))\n\n return loss, dx", "def make_loss(self, logit=None, labels=None):\r\n return nn.functional.mse_loss(logit, labels, reduction='mean') # The MSE Loss\r", "def model_loss(inp, fake, real_label, fake_label):\n \n \n Dreal,realcls,R1 = gradpen(inp)\n [Dfake,fakecls] = D(fake)\n # 1. Adversarial loss\n \n glabel = tf.ones_like(Dfake)#tf.random.uniform((Dfake.shape), 1-LN, 1)\n dlabelr = tf.ones_like(Dreal)#tf.random.uniform((Dreal.shape), 1-LN, 1)\n dlabelf = tf.zeros_like(Dfake)#tf.random.uniform((Dfake.shape), 0, LN)\n \n \n \n # D has no sigmoid activation: \"from_logits=True\"\n real_loss = tf.keras.losses.binary_crossentropy(\n dlabelr, Dreal, from_logits=True)\n real_loss = tf.reduce_mean(real_loss)\n \n fake_loss = tf.keras.losses.binary_crossentropy(\n dlabelf, Dfake, from_logits=True)\n fake_loss = tf.reduce_mean(fake_loss)\n \n Dadv = 0.5*(real_loss+fake_loss)\n \n Gadv = tf.keras.losses.binary_crossentropy(\n glabel, Dfake, from_logits=True)\n Gadv = tf.reduce_mean(Gadv)\n \n # 2. Classification loss\n \n Dcls = tf.keras.losses.binary_crossentropy(real_label, realcls, from_logits=True)\n Dcls = tf.reduce_mean(Dcls)\n \n Gcls = tf.keras.losses.binary_crossentropy(fake_label, fakecls, from_logits=True)\n Gcls = tf.reduce_mean(Gcls)\n \n # 3. Total loss\n \n Dloss = Dadv + (GAMMA/2)*R1 + LAMBDA_CLS*Dcls\n \n Gloss = Gadv + LAMBDA_CLS*Gcls\n \n return (Dloss, Dadv, Dcls, R1), (Gloss, Gadv, Gcls)", "def loss_fn(input_d, reconstructed, mean, logvar, beta=1, batch_size=1, input_size=1):\n\n # mse_criterion = nn.MSELoss() # reduction=sum ?\n # mse_loss = mse_criterion(input_d, reconstructed)\n\n # bce_criterion = nn.BCELoss(size_average=False) # reduction=sum ?\n bce_criterion = nn.BCELoss() # reduction=sum ?\n bce_loss = bce_criterion(input_d, reconstructed)\n\n # see Appendix B from VAE paper:\n # Kingma and Welling. Auto-Encoding Variational Bayes. ICLR, 2014\n # https://arxiv.org/abs/1312.6114\n # 0.5 * sum(1 + log(sigma^2) - mu^2 - sigma^2)\n\n # for gaussian distribution when\n # generated data passed to the encorder is z~ N(0,1) and generated data is x~N(m,var)\n\n kl_loss = -0.5 * torch.sum(1 + logvar - mean.pow(2) - logvar.exp())\n\n normalized_kl_loss = kl_loss / (batch_size * input_size)\n scaled_kl_loss = beta*normalized_kl_loss\n # scaled_kl_loss = beta*kl_loss\n\n # return bce_loss + kl_loss, bce_loss, kl_loss\n return bce_loss + scaled_kl_loss, bce_loss, normalized_kl_loss\n # return mse_loss + scaled_kl_loss, mse_loss, kl_loss", "def loss_fn(params):\n logits = models.ProgramTransformer(config).apply(\n {'params': params},\n inputs,\n outputs,\n programs,\n rngs={'dropout': train_rng})\n loss, weight_sum = compute_weighted_cross_entropy(logits, programs, weights)\n mean_loss = loss / weight_sum\n return mean_loss, logits", "def get_loss_fn():\n return reconstruction", "def get_loss(self, x, y):\n \"*** YOUR CODE HERE question 2 ***\"\n return nn.SquareLoss(self.run(x), y)", "def loss(self, X, y):\n pass", "def svm_loss(x, y):\n loss, dx = None, None\n ###########################################################################\n # TODO: Implement loss and gradient for multiclass SVM classification. #\n # This will be similar to the svm loss vectorized implementation in #\n # cs231n/classifiers/linear_svm.py. #\n ###########################################################################\n # *****START OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****\n y_temp = np.ones((x.shape[0], x.shape[1])) # 1로 구성된 x와 같은 쉐입의 매트릭스를 만든다\n #print(y_temp)\n y_score = x[np.arange(x.shape[0]), y] # 정답레이블의 스코어로만 구성된 하나의 컬럼 벡터를 만든다\n y_score = np.reshape(y_score, (x.shape[0], 1)) # 브로드캐스팅을 위해 리쉐입 해준다\n y_temp[np.arange(x.shape[0]), y] = 0 # 1로 구성된 템프매트릭스의 정답 레이블에 해당되는 인덱스에 0을 할당한다\n #print(y_temp)\n loss_temp = (x - y_score) - 1\n loss_temp = (-loss_temp * y_temp) / x.shape[0]\n loss = (np.sum(loss_temp))\n #print(loss_temp)\n\n #print(np.sum(loss_temp, axis = 1))\n \n temp = loss_temp * x.shape[0]\n temp[loss_temp > 0] = 1\n row_sum = np.sum(temp, axis = 1)\n temp[np.arange(x.shape[0]), y] = -row_sum.T\n dx = -temp\n\n dx /= x.shape[0]\n\n\n #print(dx)\n\n\n\n\n pass\n\n # *****END OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****\n ###########################################################################\n # END OF YOUR CODE #\n ###########################################################################\n return loss, dx", "def JS_loss_fun_grad(teacher_preds, student_pred, graph):\n # if FLAGS.heat:\n # student_pred = logit2prob_heat(student_pred)\n # else:\n # student_pred = logit2prob(student_pred) \n student_pred = logit2prob(student_pred)\n loss = noisy_op.compute_loss(student_pred, teacher_preds, graph, name=\"nosiy_loss\")\n # loss.set_shape((1,))\n tf.add_to_collection('losses', loss)\n return tf.add_n(tf.get_collection('losses'), name='total_loss'), loss", "def get_loss(self, xs, y):\n \"*** YOUR CODE HERE question 4 ***\"\n return nn.SoftmaxLoss(self.run(xs), y)", "def test_svd_sharpness2(self):\n \t\t\n\t\tprint(\"----test_svd_sharpness-----\")\n\n\t\t#model = models.vgg11(pretrained=True)\n\t\tmodel = models.vgg11(weights='VGG11_Weights.IMAGENET1K_V1')\n\n\t\tself.watcher = ww.WeightWatcher(model=model, log_level=logging.WARNING)\n\t\t\n\t\t\n\t\tesd_before = self.watcher.get_ESD(layer=self.third_layer) \n\t\t\n\t\tself.watcher.SVDSharpness(layers=[self.third_layer])\n\t\tesd_after = self.watcher.get_ESD(layer=self.third_layer) \n\t\t\n\t\tprint(\"max esd before {}\".format(np.max(esd_before)))\n\t\tprint(\"max esd after {}\".format(np.max(esd_after)))\n\n\t\tself.assertGreater(np.max(esd_before),np.max(esd_after))", "def train(net):\n\n # Set SGD hyperparameters\n n_iter = 200 # number of iterations of SGD\n learning_rate = 1e-3 # learning rate for SGD\n momentum = .99 # momentum parameter for SGD\n batch_size = 100 # number of data points in each mini-batch\n\n # Initialize binary cross-entropy loss function\n loss_fn = nn.BCELoss()\n\n # Initialize SGD optimizer with momentum\n optimizer = optim.SGD(net.parameters(), lr=learning_rate, momentum=momentum)\n\n # Placeholder to save loss at each iteration\n track_loss = []\n\n # Loop over iterations\n for i in range(n_iter):\n\n # Sample minibatch of oriented grating stimuli\n stimuli, tilt = sample_stimuli(batch_size)\n\n # Evaluate loss and update network weights\n out = net(stimuli) # predicted probability of tilt right\n loss = loss_fn(out, tilt) # evaluate loss\n optimizer.zero_grad() # clear gradients\n loss.backward() # compute gradients\n optimizer.step() # update weights\n \n # Keep track of loss at each iteration\n track_loss.append(loss.item())\n\n # Track progress\n if (i + 1) % (n_iter / 10) == 0:\n print('iteration %i | loss: %.3f | percent correct: %.2f%%' % (i + 1, loss.item(), 100 * pcorrect(out, tilt)))\n \n # Plot loss\n plt.plot(track_loss)\n plt.xlabel('iterations of SGD')\n plt.ylabel('binary cross-entropy loss')\n plt.xlim([0, None])\n plt.ylim([0, None])\n plt.show()", "def _compute_loss(self, predictions, targets, **params):\n pass", "def neural_network(X, Y, Xs_test, Ys_test):\n ## YOUR CODE HERE\n #################\n return 0", "def loss_creator(config):\n return torch.nn.BCELoss()", "def test_all_var_args_grad_with_sens():\n\n class GradNet(Cell):\n def __init__(self, net):\n super(GradNet, self).__init__()\n self.weights = ParameterTuple(net.trainable_params())\n self.net = net\n\n def construct(self, *inputs):\n return grad_by_list_with_sens(self.net, self.weights)(*inputs)\n\n x = Tensor(np.ones([3, 4, 5]), dtype=mstype.float32)\n y = Tensor(np.ones([3, 4, 5]), dtype=mstype.float32)\n sens = Tensor(np.ones([3, 4, 5]), dtype=mstype.float32)\n net = VarNet(SecondNet())\n grad_net = GradNet(net)\n _ = grad_net(x, y, sens)", "def loss_fn(model):\n with flax.deprecated.nn.stateful() as state:\n with flax.deprecated.nn.stochastic(dropout_rng):\n logits = model(example, train=True)\n loss, weight_sum = compute_weighted_cross_entropy(logits, targets)\n mean_loss = loss / weight_sum\n return mean_loss, (logits, state)", "def loss_fn(self, pred: Tensor, true: Tensor) -> Tensor:\n pass", "def svm_loss_naive(W, X, y, reg):\n dW = np.zeros(W.shape) # initialize the gradient as zero\n\n # compute the loss and the gradient\n num_classes = W.shape[1]\n num_train = X.shape[0]\n loss = 0.0\n for i in xrange(num_train):\n scores = X[i].dot(W)\n correct_class_score = scores[y[i]]\n for j in xrange(num_classes):\n if j == y[i]:\n continue\n margin = scores[j] - correct_class_score + 1 # note delta = 1\n if margin > 0:\n loss += margin\n dW[:,j] += X[i,:].T\n dW[:,y[i]] -= X[i,:].T\n \n\n # Right now the loss is a sum over all training examples, but we want it\n # to be an average instead so we divide by num_train.\n loss /= num_train\n dW/= num_train\n\n # Add regularization to the loss.\n loss += 0.5 * reg * np.sum(W * W)\n dW += reg*W\n\n #############################################################################\n # TODO: #\n # Compute the gradient of the loss function and store it dW. #\n # Rather that first computing the loss and then computing the derivative, #\n # it may be simpler to compute the derivative at the same time that the #\n # loss is being computed. As a result you may need to modify some of the #\n # code above to compute the gradient. #\n #############################################################################\n\n\n return loss, dW", "def loss(self, dataset=None, loss=None, training=None):\n # Recover the defaults, if missing\n dataset, loss = self._resolve_defaults(trainset=dataset, loss=loss)\n # Sample the train batch\n inputs, targets = dataset.sample(self._config)\n # Guess whether computation is for training, if necessary\n if training is None:\n training = torch.is_grad_enabled()\n # Forward pass\n return loss(self.run(inputs), targets, self._params)", "def getLoss(self, x_test, t_test):\n x_t = Variable(x_test, requires_grad=False)\n #Feed inputes into neural network\n t_pred = self.model(x_t)\n #Now lets compute out loss\n loss = self.loss_fn(t_pred, t_test)\n return loss", "def _define_loss(self):\n\n cost = []\n unit_cost = []\n for nn in range(len(self.ffnet_out)):\n data_out = self.data_out_batch[nn]\n if self.filter_data:\n # this will zero out predictions where there is no data,\n # matching Robs here\n pred = tf.multiply(\n self.networks[self.ffnet_out[nn]].layers[-1].outputs,\n self.data_filter_batch[nn])\n else:\n pred = self.networks[self.ffnet_out[nn]].layers[-1].outputs\n\n nt = tf.cast(tf.shape(pred)[0], tf.float32)\n # define cost function\n if self.noise_dist == 'gaussian':\n with tf.name_scope('gaussian_loss'):\n cost.append(tf.nn.l2_loss(data_out - pred) / nt)\n unit_cost.append(tf.reduce_mean(tf.square(data_out-pred), axis=0))\n\n elif self.noise_dist == 'poisson':\n with tf.name_scope('poisson_loss'):\n\n if self.poisson_unit_norm is not None:\n # normalize based on rate * time (number of spikes)\n cost_norm = tf.multiply(self.poisson_unit_norm[nn], nt)\n else:\n cost_norm = nt\n\n cost.append(-tf.reduce_sum(tf.divide(\n tf.multiply(data_out, tf.log(self._log_min + pred)) - pred,\n cost_norm)))\n\n unit_cost.append(-tf.divide(\n tf.reduce_sum(\n tf.multiply(\n data_out, tf.log(self._log_min + pred)) - pred, axis=0),\n cost_norm))\n\n elif self.noise_dist == 'bernoulli':\n with tf.name_scope('bernoulli_loss'):\n # Check per-cell normalization with cross-entropy\n # cost_norm = tf.maximum(\n # tf.reduce_sum(data_out, axis=0), 1)\n cost.append(tf.reduce_mean(\n tf.nn.sigmoid_cross_entropy_with_logits(\n labels=data_out, logits=pred)))\n unit_cost.append(tf.reduce_mean(\n tf.nn.sigmoid_cross_entropy_with_logits(\n labels=data_out, logits=pred), axis=0))\n else:\n TypeError('Cost function not supported.')\n\n self.cost = tf.add_n(cost)\n self.unit_cost = unit_cost\n\n # Add regularization penalties\n reg_costs = []\n with tf.name_scope('regularization'):\n for nn in range(self.num_networks):\n reg_costs.append(self.networks[nn].define_regularization_loss())\n self.cost_reg = tf.add_n(reg_costs)\n\n self.cost_penalized = tf.add(self.cost, self.cost_reg)\n\n # save summary of cost\n # with tf.variable_scope('summaries'):\n tf.summary.scalar('cost', self.cost)\n tf.summary.scalar('cost_penalized', self.cost_penalized)\n tf.summary.scalar('reg_pen', self.cost_reg)", "def _create_selu(cls, onnx_node, inputs, opset_version):\n alpha = onnx_node.getattr(\"alpha\", 1.67326)\n gamma = onnx_node.getattr(\"gamma\", 1.0507)\n _, forward = cls._common_onnx_node_to_singa_op(onnx_node, inputs,\n opset_version)\n return _, forward(alpha, gamma)", "def loss(data, y_pred):\n # TODO: Try using points other than the training data points for the divergence calculation.\n y_true = data[:,:2]\n p1 = data[:,2:5]\n p2 = data[:,5:8]\n p3 = data[:,8:11]\n p4 = data[:,11:14]\n\n ### Calculate divergence using model predictions:\n\n # Step 1: Use the model to calculate predicted wind field in the surrounding points p1, p2, p3 and p4.\n y_pred_p1 = model(p1)\n y_pred_p2 = model(p2)\n y_pred_p3 = model(p3)\n y_pred_p4 = model(p4)\n\n # Step 2: Calculate the partial derivatives with a three-point centered difference.\n scale_x = self.scaler_data.scale_[0] #scale-factor for x\n scale_y = self.scaler_data.scale_[1] #scale-factor for y\n\n dudx = (y_pred_p1[:, 0] - y_pred_p3[:, 0]) / (p1[:,0] - p3[:,0]) # <- pj = transformed data\n dvdy = (y_pred_p2[:, 1] - y_pred_p4[:, 1]) / (p2[:,1] - p4[:,1]) # <- pj = transformed data\n\n # Step 3: Calculate the divergence.\n divergence = ( dudx / scale_x + dvdy / scale_y ) * np.mean([scale_x, scale_y])\n #tf.print(K.mean(K.abs(divergence)))\n\n # Step 4: Calculate and return total loss.\n return K.mean(K.square(y_true - y_pred)) + gamma*K.mean(K.square(divergence))", "def _build_loss(self, **kwargs):\n pass", "def loss(self, x):\n return self._svi.evaluate_loss(*x)", "def __init__(self, S=7, B=2, C=20): \n super().__init__()\n self.mse = nn.MSELoss(reduction=\"sum\")\n self.S = S\n self.B = B\n self.C = C\n self.l_noobl = 0.5\n self.l_coord = 5", "def loss(self, x, y):\n raise NotImplementedError", "def svm_loss_naive(W, X, y, reg):\n dW = np.zeros(W.shape) # initialize the gradient as zero\n\n # compute the loss and the gradient\n num_classes = W.shape[0]\n num_train = X.shape[1]\n loss = 0.0\n for i in xrange(num_train):\n scores = W.dot(X[:, i])\n correct_class_score = scores[y[i]]\n for j in xrange(num_classes):\n if j == y[i]:\n continue\n margin = scores[j] - correct_class_score + 1 # note delta = 1\n if margin > 0:\n loss += margin\n dW[j] += X[:, i]\n dW[y[i]] -= X[:, i]\n\n # Right now the loss is a sum over all training examples, but we want it\n # to be an average instead so we divide by num_train.\n loss /= num_train\n\n # Add regularization to the loss.\n loss += 0.5 * reg * np.sum(W * W)\n\n #############################################################################\n # TODO: #\n # Compute the gradient of the loss function and store it dW. #\n # Rather that first computing the loss and then computing the derivative, #\n # it may be simpler to compute the derivative at the same time that the #\n # loss is being computed. As a result you may need to modify some of the #\n # code above to compute the gradient. #\n #############################################################################\n\n\n return loss, (dW / num_train)", "def svm_loss_naive(W, X, y, reg):\n dW = np.zeros(W.shape) # initialize the gradient as zero\n\n # compute the loss and the gradient\n num_classes = W.shape[1]\n num_train = X.shape[0]\n loss = 0.0\n for i in range(num_train):\n scores = X[i].dot(W)\n correct_class_score = scores[y[i]]\n for j in range(num_classes):\n if j == y[i]:\n continue\n margin = scores[j] - correct_class_score + 1 # note delta = 1\n if margin > 0:\n loss += margin\n dW[:, y[i]] -= X[i].transpose()\n dW[:, j] += X[i].transpose() # chain rule\n\n # Right now the loss is a sum over all training examples, but we want it\n # to be an average instead so we divide by num_train.\n loss /= num_train\n dW /= num_train\n\n # Add regularization to the loss.\n loss += 0.5 * reg * np.sum(W * W)\n dW += reg * W\n\n #############################################################################\n # TODO: #\n # Compute the gradient of the loss function and store it dW. #\n # Rather that first computing the loss and then computing the derivative, #\n # it may be simpler to compute the derivative at the same time that the #\n # loss is being computed. As a result you may need to modify some of the #\n # code above to compute the gradient. #\n #############################################################################\n\n return loss, dW", "def funcs(dataset, network, batch_size=BATCH_SIZE, learning_rate=LEARNING_RATE, sparsity=0.02, beta=0.5, momentum=MOMENTUM):\n\n # symbolic variables \n X_batch = T.tensor4()\n y_batch = T.tensor4()\n\n layers = lasagne.layers.get_all_layers(network)\n num_layers = len(layers)\n print(num_layers)\n\n code_layer = layers[num_layers/2]\n\n # code output \n code_output = lasagne.layers.get_output(code_layer, X_batch, deterministic=True)\n rho_hat = T.mean(code_output,axis=1)\n # L = T.sum(sparsity * T.log(sparsity/rho_hat) + (1 - sparsity) * T.log((1 - sparsity)/(1 - rho_hat)))\n l = T.sub(1,code_output)\n ll = T.mul(code_output,l)\n L = T.mul(4,ll)\n L = L.mean()\n\n\n # reg = 0.0001*lasagne.regularization.l2(network)\n # this is the cost of the network when fed throught the noisey network\n train_output = lasagne.layers.get_output(network, X_batch)\n cost = lasagne.objectives.mse(train_output, y_batch) \n cost = cost.mean() + beta * L\n # validation cost\n valid_output = lasagne.layers.get_output(network, X_batch)\n valid_cost = lasagne.objectives.mse(valid_output, y_batch) \n valid_cost = valid_cost.mean() \n\n # test the performance of the netowork without noise\n pred = lasagne.layers.get_output(network, X_batch, deterministic=True)\n # pred = T.argmax(test, axis=1)\n accuracy = 1 - T.mean(lasagne.objectives.mse(pred, y_batch), dtype=theano.config.floatX)\n\n all_params = lasagne.layers.get_all_params(network)\n updates = lasagne.updates.nesterov_momentum(cost, all_params, learning_rate, momentum)\n\n train = theano.function(inputs=[X_batch, y_batch], outputs=cost, updates=updates, allow_input_downcast=True)\n valid = theano.function(inputs=[X_batch, y_batch], outputs=valid_cost, allow_input_downcast=True)\n predict = theano.function(inputs=[X_batch], outputs=pred, allow_input_downcast=True)\n accuracy = theano.function(inputs=[X_batch,y_batch], outputs=accuracy, allow_input_downcast=True)\n code = theano.function(inputs=[X_batch], outputs=code_output, allow_input_downcast=True)\n\n return dict(\n train=train,\n valid=valid,\n predict=predict,\n accuracy=accuracy,\n code=code\n )", "def get_loss(self, x, y):\n \"*** YOUR CODE HERE ***\"\n y_pred = self.run(x)\n return nn.SoftmaxLoss(y_pred,y)", "def loss_fn(self, recons, inputs, mu, log_var, **kwargs):\n# kld_weight = kwargs['M_N'] # Account for the minibatch samples from the dataset\n recons_loss = F.mse_loss(recons, inputs)\n# recons_loss = F.binary_cross_entropy(recons, inputs)\n KLD = torch.mean(-0.5 * torch.sum(1 + log_var - mu**2 - log_var.exp(), dim=1), dim=0)\n loss = recons_loss - KLD\n return loss, recons_loss, KLD", "def get_loss(self, x, y):\n \"*** YOUR CODE HERE ***\"\n predictedY = self.run(x)\n return nn.SquareLoss(predictedY, y)", "def _create_loss(self):\n\n with tf.name_scope(\"loss\"):\n \n # gini=(tf.nn.l2_loss( self.score))/100000\n gini = tf.losses.softmax_cross_entropy(self.score, 0*self.score)\n \n promo_prob=tf.reduce_sum(tf.multiply(self.score, self.cohort_weight),\n axis=1)\n inc_value = tf.reduce_mean(tf.multiply(promo_prob, self.value))- self.control_value\n inc_cost = tf.reduce_mean( tf.multiply(promo_prob, self.cost)) - self.control_cost\n \n\n\n # determine loss function based on self.obj_rule\n if self.obj_rule == 'cpiv':\n self.objective = inc_cost / inc_value\n\n elif self.obj_rule == 'ivc':\n # maximize ivc\n self.objective = - inc_value / inc_cost\n\n elif self.obj_rule == 'lagrangian':\n assert self.shadow is not None, 'Need to pass in shadow value if use lagrangian as obj_rule.'\n self.objective = inc_cost - self.shadow * inc_value\n\n elif self.obj_rule == 'value':\n # maximize delta values\n self.objective = - inc_value\n\n # use only cost as objective\n elif self.obj_rule == 'cost':\n # maximize delta cost\n self.objective = - inc_cost\n\n else:\n raise Exception('Invalid obj_rule!')\n\n # regularization\n reg_loss = tf.reduce_sum(tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES))\n # weights = tf.trainable_variables() # all vars of your graph\n # reg_loss = tf.norm( weights,ord=1)\n\n # final loss\n self.loss = self.objective +reg_loss+.1*gini", "def svm_loss(x, y):\n N = x.shape[0]\n correct_class_scores = x[np.arange(N), y]\n margins = np.maximum(0, x - correct_class_scores[:, np.newaxis] + 1.0)\n margins[np.arange(N), y] = 0\n loss = np.sum(margins) / N\n num_pos = np.sum(margins > 0, axis=1)\n dx = np.zeros_like(x)\n dx[margins > 0] = 1\n dx[np.arange(N), y] -= num_pos\n dx /= N\n return loss, dx", "def main(args):\n print('Setting up')\n tf.reset_default_graph()\n # SETUP AND LOAD DATA\n print('...Loading settings and data')\n args, data = settings(args)\n\n # BUILD MODEL\n ## Placeholders\n print('...Creating network input')\n x = tf.placeholder(tf.float32, [args.batch_size, args.height, args.width, 3], name='x')\n y = tf.placeholder(tf.float32, [args.batch_size, args.height, args.width, 1], name='y')\n learning_rate = tf.placeholder(tf.float32, name='learning_rate')\n train_phase = tf.placeholder(tf.bool, name='train_phase')\n\n ## Construct model\n print('...Constructing model')\n if args.mode == 'baseline':\n pred = BSD_model.vgg_bsd(args, x, train_phase)\n elif args.mode == 'hnet':\n pred = BSD_model.hnet_bsd(args, x, train_phase)\n else:\n print('Must execute script with valid --mode flag: \"hnet\" or \"baseline\"')\n sys.exit(-1)\n bsd_map = tf.nn.sigmoid(pred['fuse'])\n\n # Print number of parameters\n n_vars = 0\n for var in tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES):\n n_vars += np.prod(var.get_shape().as_list())\n print('...Number of parameters: {:d}'.format(n_vars))\n\n print('...Building loss')\n loss = 0.\n beta = 1 - tf.reduce_mean(y)\n pw = beta / (1. - beta)\n for key in pred.keys():\n pred_ = pred[key]\n loss += tf.reduce_mean(tf.nn.weighted_cross_entropy_with_logits(y, pred_, pw))\n # Sparsity regularizer\n loss += args.sparsity * sparsity_regularizer(pred_, 1 - beta)\n\n ## Optimizer\n print('...Building optimizer')\n optim = tf.train.AdamOptimizer(learning_rate=learning_rate)\n train_op = optim.minimize(loss)\n\n # TRAIN\n print('TRAINING')\n lr = args.learning_rate\n saver = tf.train.Saver()\n sess = tf.Session()\n print('...Initializing variables')\n init = tf.global_variables_initializer()\n init_local = tf.local_variables_initializer()\n sess.run([init, init_local], feed_dict={train_phase: True})\n print('Beginning loop')\n start = time.time()\n epoch = 0\n\n while epoch < args.n_epochs:\n # Training steps\n batcher = pklbatcher(data['train_x'], data['train_y'], args.batch_size, shuffle=True, augment=True)\n train_loss = 0.\n for i, (X, Y, __) in enumerate(batcher):\n feed_dict = {x: X, y: Y, learning_rate: lr, train_phase: True}\n __, l = sess.run([train_op, loss], feed_dict=feed_dict)\n train_loss += l\n sys.stdout.write('{:d}/{:d}\\r'.format(i, len(data['train_x'].keys()) / args.batch_size))\n sys.stdout.flush()\n train_loss /= (i + 1.)\n\n print('[{:04d} | {:0.1f}] Loss: {:04f}, Learning rate: {:.2e}'.format(epoch,\n time.time() - start, train_loss, lr))\n\n if epoch % args.save_step == 0:\n # Validate\n save_path = args.test_path + '/T_' + str(epoch)\n if not os.path.exists(save_path):\n os.mkdir(save_path)\n generator = pklbatcher(data['valid_x'], data['valid_y'],\n args.batch_size, shuffle=False,\n augment=False, img_shape=(args.height, args.width))\n # Use sigmoid to map to [0,1]\n j = 0\n for batch in generator:\n batch_x, batch_y, excerpt = batch\n output = sess.run(bsd_map, feed_dict={x: batch_x, train_phase: False})\n for i in range(output.shape[0]):\n save_name = save_path + '/' + str(excerpt[i]).replace('.jpg', '.png')\n im = output[i, :, :, 0]\n im = (255 * im).astype('uint8')\n if data['valid_x'][excerpt[i]]['transposed']:\n im = im.T\n skio.imsave(save_name, im)\n j += 1\n print('Saved predictions to: %s' % (save_path,))\n\n # Updates to the training scheme\n if epoch % 40 == 39:\n lr = lr / 10.\n epoch += 1\n\n # Save model\n saver.save(sess, args.checkpoint_path + 'model.ckpt')\n sess.close()\n return train_loss", "def get_loss(self, x, y):\n \"*** YOUR CODE HERE ***\"\n #make your predictions using run\n #compute loss nn.squareloss\n y_pred = self.run(x)\n return nn.SquareLoss(y_pred,y)", "def loss_fun(model: GPModel, params: dict) -> float:\n py = model.module.call(params, train_ds['index_points'])\n return -py.log_prob(train_ds['y'])", "def nt_transfer_loss(self, student_net_params, masks, teacher_net_params, x, density_level): \n\n masked_student_net_params = get_sparse_params_filtered_by_masks(student_net_params, masks)\n\n # split inputs into two collections, x1 and x2.\n x1 = x[:int(len(x)/2)]\n x2 = x[int(len(x)/2):]\n \n # student network prediction\n student_prediction = self.apply_fn(masked_student_net_params, x) \n \n # teacher network prediction\n teacher_prediction = self.apply_fn(teacher_net_params, x)\n\n # student network's NTK evaluated on x1 and x2\n student_ntk_mat = self.emp_ntk_fn(x1, x2, masked_student_net_params) \n\n # teacher network's NTK evaluated on x1 and x2\n teacher_ntk_mat = self.emp_ntk_fn(x1, x2, teacher_net_params) \n\n # compute kernel, target, and paramter l2 loss\n ker_dist, target_dist, param_squared_norm = self.kernel_dist_target_dist_l2_loss(student_ntk_mat, student_prediction, teacher_ntk_mat, teacher_prediction, masked_student_net_params)\n\n # weight these losses to get the transfer loss\n transfer_loss = self.LAMBDA_KER_DIST * ker_dist + target_dist + (self.LAMBDA_L2_REG / density_level) * param_squared_norm \n\n return transfer_loss", "def loss(self, y: torch.Tensor, state: AlgorithmState) -> torch.Tensor:\n\n raise NotImplementedError()", "def loss_test(params):\n nx, ny, lx, ly, dx, dy, dt, t, p, pmlc, source = params\n\n history1 = callers.call_bpml(nx, ny, t, dx, dy, dt, p, pmlc, 0.1, source)\n history2 = callers.call_bpml(nx, ny, t, dx, dy, dt, p, pmlc, 0.01, source)\n history3 = callers.call_bpml(nx, ny, t, dx, dy, dt, p, pmlc, 0.001, source)\n\n nyh = int(ny / 2)\n names = [\"sigma = 0\", \"sigma = 0.01\", \"sigma = 0.001\"]\n labels = [\"Hz [V/m]\", \"Space / [Cells]\"]\n snaps = [history1[:, nyh, 50], history2[:, nyh, 50], history3[:, nyh, 50]]\n common.plot_snaps(names, labels, snaps)", "def lossFun(inputs, targets, hprev):\n xs, hs, ys, ps = {}, {}, {}, {}\n hs[-1] = np.copy(hprev)\n loss = 0\n\n # forward pass\n for t in xrange(len(inputs)):\n xs[t] = np.zeros((vocab_size,1)) # encode in 1-of-k representation\n xs[t][inputs[t]] = 1\n hs[t] = np.tanh(np.dot(Wxh, xs[t]) + np.dot(Whh, hs[t-1]) + bh) # hidden state\n ys[t] = np.dot(Why, hs[t]) + by # unnormalized log probabilities for next chars\n ps[t] = np.exp(ys[t]-np.max(ys[t])) / np.sum(np.exp(ys[t]-np.max(ys[t]))) # probabilities for next chars\n loss += -np.log(ps[t][targets[t],0]) # softmax (cross-entropy loss)\n\n assert_array_equal(van.window_step,t)\n assert_array_equal(van.state[t-1],hs[t-1].T[0])\n assert_array_equal(van.statenet[t].forward([xs[t].T[0],hs[t-1].T[0]]),hs[t].T[0])\n assert_array_equal(van.statenet[t].net.elements[0].elements[0].elements[1].W.get(),Wxh)\n assert_array_equal(van.statenet[t].net.elements[0].elements[1].elements[1].W.get(),Whh)\n assert_array_equal(van.statenet[t].net.elements[0].elements[2].W.get(),bh.T[0])\n\n assert_array_equal(vantr.statenet[t].net.elements[0].elements[0].elements[1].W.get(),Wxh)\n assert_array_equal(vantr.statenet[t].net.elements[0].elements[1].elements[1].W.get(),Whh)\n assert_array_equal(vantr.statenet[t].net.elements[0].elements[2].W.get(),bh.T[0])\n assert_array_equal(vantr.outputnet[t].net.elements[0].elements[1].W.get(),Why)\n assert_array_equal(vantr.outputnet[t].net.elements[1].W.get(),by.T[0])\n\n #\n # #Neg\n # assert_array_almost_equal(van.outputnet[t].net.elements[0].elements[0].elements[1].W,Why)\n # assert_array_almost_equal(van.outputnet[t].net.elements[0].elements[1].W,by.T[0])\n # assert_array_almost_equal(van.outputnet[t].forward(hs[t].T[0]),ps[t].T[0])\n # assert_array_almost_equal(van.outputnet[t].forward(van.statenet[t].forward([xs[t].T[0],hs[t-1].T[0]])),ps[t].T[0])\n # assert_array_almost_equal(van.forward(xs[t].T[0]),ps[t].T[0])\n #\n # Cross\n assert_array_equal(van.outputnet[t].net.elements[0].elements[1].W.get(),Why)\n assert_array_equal(van.outputnet[t].net.elements[1].W.get(),by.T[0])\n assert_array_equal(van.outputnet[t].forward(hs[t].T[0]),ys[t].T[0])\n assert_array_equal(van.outputnet[t].forward(van.statenet[t].forward([xs[t].T[0],hs[t-1].T[0]])),ys[t].T[0])\n assert_array_equal(van.outputnet[t].forward(van.statenet[t].forward([xs[t].T[0],van.state[t-1]])),ys[t].T[0])\n assert_array_equal(van.forward(xs[t].T[0]),ys[t].T[0])\n assert_array_equal(soft.forward(ys[t].T[0]),ps[t].T[0])\n\n # backward pass: compute gradients going backwards\n dWxh, dWhh, dWhy = np.zeros_like(Wxh), np.zeros_like(Whh), np.zeros_like(Why)\n dbh, dby = np.zeros_like(bh), np.zeros_like(by)\n dhnext = np.zeros_like(hs[0])\n\n for t in reversed(xrange(len(inputs))):\n dy = np.copy(ps[t])\n dy[targets[t]] -= 1 # backprop into y. see http://cs231n.github.io/neural-networks-case-study/#grad if confused here\n dWhy += np.dot(dy, hs[t].T)\n dby += dy\n dh = np.dot(Why.T, dy) + dhnext # backprop into h\n dhraw = (1 - hs[t] * hs[t]) * dh # backprop through tanh nonlinearity\n dbh += dhraw\n dWxh += np.dot(dhraw, xs[t].T)\n dWhh += np.dot(dhraw, hs[t-1].T)\n\n #\n # #Neg\n # van.backward(negLog.dJdy_gradient(ps[t].T[0],to_one_hot_vect(targets[t],vocab_size)),opt)\n # assert_array_almost_equal(van.outputnet[t].net.elements[0].elements[1].x,hs[t].T[0])\n # assert_array_almost_equal(van.outputnet[t].net.elements[0].elements[0].elements[1].dW,dWhy)\n # assert_array_almost_equal(van.outputnet[t].net.elements[0].elements[1].dW,dby.T[0])\n # assert_array_almost_equal(van.outputnet[t].net.elements[0].elements[0].elements[1].W,Why)\n # assert_array_almost_equal(van.outputnet[t].net.elements[0].elements[1].W,by.T[0])\n #\n #Cross\n assert_array_equal(van.outputnet[t].net.elements[0].elements[1].x,hs[t].T[0])\n assert_array_equal(van.outputnet[t].net.forward(hs[t].T[0]),ys[t].T[0])\n assert_array_equal(soft.forward(van.outputnet[t].net.forward(hs[t].T[0])),ps[t].T[0])\n assert_array_equal(soft.forward(van.outputnet[t].net.forward(hs[t].T[0]))-to_one_hot_vect(targets[t],vocab_size),dy.T[0])\n\n err = cross.dJdy_gradient(ys[t].T[0],to_one_hot_vect(targets[t],vocab_size))\n\n assert_array_equal(soft.forward(van.outputnet[t].net.forward(hs[t].T[0]))-to_one_hot_vect(targets[t],vocab_size),dy.T[0])\n assert_array_equal(ps[t].T[0]-to_one_hot_vect(targets[t],vocab_size),dy.T[0])\n assert_array_equal(err,dy.T[0])\n\n van.backward(err,opt)\n\n assert_array_equal(van.outputnet[t].net.elements[0].elements[1].W.get_dW(),dWhy)\n assert_array_equal(van.outputnet[t].net.elements[0].elements[1].W.get(),Why)\n assert_array_equal(van.outputnet[t].net.elements[1].W.get_dW(),dby.T[0])\n assert_array_almost_equal(van.outputnet[t].net.elements[1].W.get(),by.T[0])\n #\n\n assert_array_equal(van.statenet[t].net.elements[0].elements[0].elements[1].W.get_dW(),dWxh)\n assert_array_equal(van.statenet[t].net.elements[0].elements[1].elements[1].W.get_dW(),dWhh)\n assert_array_equal(van.statenet[t].net.elements[0].elements[2].W.get_dW(),dbh.T[0])\n assert_array_equal(van.statenet[t].net.elements[0].elements[0].elements[1].W.get(),Wxh)\n assert_array_equal(van.statenet[t].net.elements[0].elements[1].elements[1].W.get(),Whh)\n assert_array_equal(van.statenet[t].net.elements[0].elements[2].W.get(),bh.T[0])\n assert_array_equal(van.dJdh[t],dhnext.T[0])\n\n dhnext = np.dot(Whh.T, dhraw)\n\n opt.update_model()\n trainer.learn_window(vantr,zip(to_hot_vect(inputs,vocab_size),to_hot_vect(targets,vocab_size)),crosstr,opttr)\n\n for dparam in [dWxh, dWhh, dWhy, dbh, dby]:\n np.clip(dparam, -5, 5, out=dparam) # clip to mitigate exploding gradients\n\n return loss, dWxh, dWhh, dWhy, dbh, dby, hs[len(inputs)-1]", "def svm_loss_naive(W, X, y, reg):\n dW = np.zeros(W.shape) # initialize the gradient as zero\n\n # compute the loss and the gradient\n num_classes = W.shape[1]\n num_train = X.shape[0]\n loss = 0.0\n c = np.array([0, 1])\n pred_class = []\n for i in range(num_train):\n scores = X[i].dot(W)\n pred_class.append(c[np.argmax(scores)])\n #print('scores size:',scores.shape)\n correct_class_score = scores[y[i]]\n for j in range(num_classes):\n if j == y[i]:\n continue\n margin = scores[j] - correct_class_score + 1 # note delta = 1\n if margin > 0:\n loss += margin\n\n # Right now the loss is a sum over all training examples, but we want it\n # to be an average instead so we divide by num_train.\n loss /= num_train\n\n # Add regularization to the loss.\n loss += reg * np.sum(W * W)\n print(pred_class)\n\n return loss, dW, pred_class", "def backward_D_basic(self, netD, netDadv,real,fake,i,direction):\n # if(not self.orders):\n # pred_real = netD(real) \n # loss_D_real = self.criterionGAN_D(pred_real, self.labels[i])\n # # Fake\n # pred_fake = netD(fake.detach())\n # loss_D_fake = self.criterionGAN_D(pred_fake, self.labels[i+1])\n # # Combined loss and calculate gradients\n # self.loss_D_cls = (loss_D_real + loss_D_fake) * 0.5\n # else:\n if(direction):\n self.pred_real = netD(real)\n self.loss_D_cls = self.criterionGAN_D(self.pred_real, self.labels[i])\n ifvalidAdorn=netDadv(real)\n ifvalidNoAdorn=netDadv(fake.detach())\n loss_D_adv_real = self.criterionGAN_D(ifvalidAdorn,True)\n loss_D_adv_fake = self.criterionGAN_D(ifvalidNoAdorn,False)\n self.loss_D_adv=(loss_D_adv_fake+loss_D_adv_real)*0.5\n else:\n pred_real = netD(real)\n self.loss_D_cls = self.criterionGAN_D(\n pred_real, self.labels_rev[i])\n ifvalidAdorn=netDadv(real)\n ifvalidNoAdorn=netDadv(fake.detach())\n loss_D_adv_real = self.criterionGAN_D(ifvalidAdorn,True)\n loss_D_adv_fake = self.criterionGAN_D(ifvalidNoAdorn,False)\n self.loss_D_adv=(loss_D_adv_fake+loss_D_adv_real)*0.5\n\n loss_D = self.loss_D_cls+self.loss_D_adv\n loss_D.backward()\n return loss_D", "def node_loss_construct(cfg, model_name='node_loss', **kwargs):\n losses = node_loss_dict()\n loss_cfg = cfg[model_name]\n name = loss_cfg.get('name', 'type')\n if not name in losses:\n raise Exception(\"Unknown node loss name provided:\", name)\n\n return losses[name](loss_cfg, **kwargs)", "def D_loss_basic(self, netD, real, fake):\n # Real\n D_real = netD(real)\n D_real_loss = self.GANLoss(D_real, True, True)\n # fake\n D_fake = netD(fake)\n D_fake_loss = self.GANLoss(D_fake, False, True)\n # loss for discriminator\n D_loss = (D_real_loss + D_fake_loss) * 0.5\n # gradient penalty for wgan-gp\n if self.gan_mode == 'wgangp':\n gradient_penalty, gradients = base_function.cal_gradient_penalty(netD, real, fake)\n D_loss +=gradient_penalty\n\n D_loss = D_loss * self.loss_d_weight\n D_loss.backward()\n\n return D_loss", "def compute_loss(self):\n self.prototypes = self.compute_prototypes()\n self.test_logits = self.compute_logits()\n loss = tf.nn.sparse_softmax_cross_entropy_with_logits(\n labels=self.episode.test_labels, logits=self.test_logits)\n cross_entropy_loss = tf.reduce_mean(loss)\n regularization = tf.reduce_sum(\n tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES))\n loss = cross_entropy_loss + self.weight_decay * regularization\n return loss", "def neural_network(X, Y, Xs_test, Ys_test):\n \n X_n = (X - np.mean(X, axis = 0)) / np.std(X, axis = 0)\n Y_n = (Y - np.mean(Y, axis = 0)) / np.std(Y, axis = 0)\n \n model = Model(X_n.shape[1])\n model.addLayer(DenseLayer(100, ReLUActivation()))\n model.addLayer(DenseLayer(100, ReLUActivation()))\n model.addLayer(DenseLayer(Y.shape[1],LinearActivation()))\n model.initialize(QuadraticCost())\n\n mses = []\n for i, X_test in enumerate(Xs_test):\n Y_test = Ys_test[i]\n Y_pred = model.predict((X_test - np.mean(X, axis = 0)) / np.std(X, axis = 0)) \n Y_pred = Y_pred * np.std(Y, axis = 0) + np.mean(Y, axis = 0)\n mse = np.mean(np.sqrt(np.sum((Y_pred - Y_test) ** 2, axis = 1)) ) \n mses.append(mse)\n\n return mses", "def compute_loss(self, **kwargs):\n raise NotImplementedError", "def funcs(dataset, network, batch_size=BATCH_SIZE, learning_rate=LEARNING_RATE, sparsity=0.02, beta=0.01, momentum=MOMENTUM):\n\n # symbolic variables \n X_batch = T.matrix()\n y_batch = T.matrix()\n\n layers = lasagne.layers.get_all_layers(network)\n num_layers = len(layers)\n\n code_layer = layers[num_layers/2]\n activations_2_layer = layers[num_layers/2 - 1]\n activations_1_layer = layers[num_layers/2 - 2]\n\n # code output \n code_output = lasagne.layers.get_output(code_layer, X_batch, deterministic=True)\n\n l = T.sub(1,code_output)\n ll = T.mul(code_output,l)\n L = T.mul(4,ll)\n L = L.mean()\n\n rho_hat = T.mean(code_output,axis=1)\n # L = T.sum(sparsity * T.log(sparsity/rho_hat) + (1 - sparsity) * T.log((1 - sparsity)/(1 - rho_hat)))\n\n # reg = 0.0001*lasagne.regularization.l2(network)\n # this is the cost of the network when fed throught the noisey network\n train_output = lasagne.layers.get_output(network, X_batch)\n cost = lasagne.objectives.mse(train_output, y_batch) \n cost = cost.mean() + beta * L\n\n all_params = lasagne.layers.get_all_params(network)\n updates = lasagne.updates.nesterov_momentum(cost, all_params, learning_rate, momentum)\n\n \n\n # code and activation outputs\n \n activations_1_output = lasagne.layers.get_output(activations_1_layer, X_batch, deterministic=True)\n activations_2_output = lasagne.layers.get_output(activations_2_layer, X_batch, deterministic=True)\n\n train = theano.function(inputs=[X_batch, y_batch], outputs=cost, updates=updates, allow_input_downcast=True)\n code = theano.function(inputs=[X_batch], outputs=code_output, allow_input_downcast=True)\n activations_1 = theano.function(inputs=[X_batch], outputs=activations_1_output, allow_input_downcast=True)\n activations_2 = theano.function(inputs=[X_batch], outputs=activations_2_output, allow_input_downcast=True)\n\n return dict(\n train=train,\n code=code,\n activations_1=activations_1,\n activations_2=activations_2\n )", "def _initLoss(self):\n\n return torch.nn.MSELoss()", "def get_loss(self, x, y):\n \"*** YOUR CODE HERE question 3 ***\"\n return nn.SoftmaxLoss(self.run(x), y)", "def nll_loss(\n input,\n target,\n weight=None,\n size_average=None,\n ignore_index=None,\n reduce=None,\n reduction='mean',\n):\n if size_average is not None or reduce is not None:\n reduction = _reduction.legacy_get_string(size_average, reduce)\n else:\n reduction = reduction\n return FunctionLib.apply(\n 'NLLLoss', input.device, [input, target],\n axis=1, ignore_index=ignore_index, reduction=reduction.upper())", "def svm_loss(x, y):\n N = x.shape[0]\n correct_class_scores = x[np.arange(N), y]\n margins = np.maximum(0, x - correct_class_scores[:, np.newaxis] + 1.0)\n margins[np.arange(N), y] = 0\n loss = np.sum(margins) / N\n num_pos = np.sum(margins > 0, axis=1)\n dx = np.zeros_like(x)\n dx[margins > 0] = 1\n dx[np.arange(N), y] -= num_pos\n dx /= N\n return loss, dx", "def loss_op(pred, y, index, loss_func):\n loss = 0\n for node_type in pred:\n idx = index[node_type]\n loss += loss_func(pred[node_type][idx], y[node_type])\n return loss", "def loss_fun(para):\n\n return -data_processing(my_cir(para))", "def loss_fn(gr_truth, pred):\n return 100 * dice_loss(pred, gr_truth) + softmax_weighted_loss(pred, gr_truth)", "def eval_loss(self, input_dataset, target_dataset):\n\t\t#######################################################################\n\t\t# ** START OF YOUR CODE **\n\t\t#######################################################################\n\t\tprediction = self.network.forward(input_dataset)\n\t\tloss = self._loss_layer.forward(prediction, target_dataset)\n\t\t\n\t\treturn loss\n\t\t#######################################################################\n\t\t# ** END OF YOUR CODE **\n\t\t#######################################################################", "def build_loss(self):\n\n opt = tf.train.AdamOptimizer(self.learning_rate)\n mse = tf.losses.mean_squared_error(self.label[-1], self.outputs[-1])\n loss = tf.losses.get_total_loss()\n\n return mse, loss", "def build_densenet(l_in, input_var=None, first_output=64, growth_rate=32, num_blocks=4, dropout=0):\n \n\n nb_layers = [6, 12, 32, 32] # For DenseNet-169\n nb_layers = [6, 12, 24, 16] # For DenseNet-121\n # initial convolution\n network = Conv2DLayer(l_in, first_output, filter_size=7, stride=2, pad='same',\n W=lasagne.init.HeNormal(gain='relu'),\n b=None, nonlinearity=None, name='pre_conv')\n network = BatchNormLayer(network, name='pre_bn', beta=None, gamma=None)\n network = ScaleLayer(network, name='pre_scale')\n network = BiasLayer(network, name='pre_shift')\n network = dnn.MaxPool2DDNNLayer(network, pool_size=3, stride=2) \n # note: The authors' implementation does *not* have a dropout after the\n # initial convolution. This was missing in the paper, but important.\n # if dropout:\n # network = DropoutLayer(network, dropout)\n # dense blocks with transitions in between\n\n for b in range(num_blocks):\n network = dense_block(network, nb_layers[b], growth_rate, dropout,\n name_prefix='block%d' % (b + 1))\n if b < num_blocks - 1:\n network = transition(network, dropout,\n name_prefix='block%d_trs' % (b + 1))\n # post processing until prediction\n network = ScaleLayer(network, name='post_scale')\n network = BiasLayer(network, name='post_shift')\n network = NonlinearityLayer(network, nonlinearity=rectify, name='post_relu')\n\n return network", "def train(self, examples):\n optimizer = optim.Adam(self.nnet.parameters(),\n lr=1e-7,\n weight_decay=1e-7\n )\n average_loss = 0\n total_batch_num = 0\n for epoch in range(self.epoch_num):\n epoch_loss = 0\n batch_idx = 0\n while batch_idx < int(len(examples)/args.batch_size):\n ids = np.random.randint(len(examples), size=args.batch_size)\n state, policy, v = list(zip(*[examples[i] for i in ids]))\n\n state = torch.Tensor(np.array(state)).contiguous().cuda()\n target_policy = torch.Tensor(\n np.array(policy)).contiguous().cuda()\n target_v = torch.Tensor(np.array(v)).contiguous().cuda()\n\n # predict\n self.nnet.eval()\n out_policy, out_v = self.nnet(state)\n self.nnet.train()\n\n total_loss = self.loss(\n target_policy, out_policy, target_v, out_v)\n '''\n print(\"state:\\n {}\".format(state[3]))\n print(\"policy:\\n {}\".format(target_policy[3]))\n print(\"nn_policy:\\n {}\".format(out_policy[3]))\n '''\n\n average_loss += abs(np.sum(total_loss.cpu().data.numpy()))\n epoch_loss += abs(np.sum(total_loss.cpu().data.numpy()))\n # print(\"loss in batch {} is {}\".format(batch_idx, total_loss.cpu().data.numpy()))\n\n # compute gradient and do SGD step\n optimizer.zero_grad()\n total_loss.sum().backward()\n optimizer.step()\n\n batch_idx += 1\n total_batch_num += 1\n print('epoch: {}, loss: {}'.format(epoch, epoch_loss/batch_idx))\n self.nnet.eval()\n return average_loss / total_batch_num", "def __D_loss(self, D, real, fake):\n loss = 0.5 * (tf.reduce_mean(tf.squared_difference(D(real), 1.0)) + \\\n tf.reduce_mean(tf.square(D(fake))))\n\n return loss", "def _create_selu(cls, op, op_t):\n node = cls._common_singa_tensor_to_onnx_node(op, op_t)\n\n node.attribute.extend([\n helper.make_attribute('alpha', op.alpha),\n helper.make_attribute('gamma', op.gamma),\n ])\n return node", "def compute_loss(self, *args, **kwargs):\n raise NotImplementedError", "def compute_loss(self,\n pred_seg: Dict[str, torch.Tensor],\n target: torch.Tensor,\n ) -> Dict[str, torch.Tensor]:\n target[target > 0] = 1\n\n loss = self._compute_loss(pred_seg[\"seg_logits\"], target)\n\n preds_decoder_level = [pred_seg[f\"dsv_logits_{dl}\"] for dl in self.decoder_levels]\n targets_interpolated = self.interpolator(preds_decoder_level, target)\n\n for pred, target in zip(preds_decoder_level, targets_interpolated):\n loss = loss + self.dsv_weight * self._compute_loss(pred, target)\n\n return {\"seg_loss\": loss / (len(self.decoder_levels) + 1)}", "def test_compute_densitity_network(self):\n tf.reset_default_graph()\n\n # Build an identical network, with a different scope.\n with tf.variable_scope(name_or_scope=\"cnn\"):\n # Declare input\n input_placeholder = tf.placeholder(\n tf.float32,\n shape=(None, 28, 28, 1)\n )\n\n # Do the parsing\n sequence_to_net(self.net_nsc, input_placeholder)\n\n # Computed beforehand\n target_density = 1.2756756756756757 # Before: 1.281767955801105\n # Assert the value\n self.assertEqual(\n compute_network_density(tf.get_default_graph(), \"cnn\"),\n target_density\n )", "def loss(self, input_val_dict):\n\n sess = tf.get_default_session()\n feed_dict = self.create_feed_dict(input_val_dict)\n loss = sess.run(self._loss, feed_dict=feed_dict)\n return loss", "def validate_nip(model, data, save_dir=False, epoch=0, show_ref=False, loss_type='L2'):\n\n ssims = []\n psnrs = []\n losss = []\n\n # If requested, plot a figure with output/target pairs\n if save_dir is not None:\n images_x = np.minimum(data.count_validation, 10 if not show_ref else 5)\n images_y = np.ceil(data.count_validation / images_x)\n fig = Figure(figsize=(20, 20 / images_x * images_y * (1 if not show_ref else 0.5)))\n \n developed_out = np.zeros_like(data['validation']['y'], dtype=np.float32)\n\n for b in range(data.count_validation):\n example_x, example_y = data.next_validation_batch(b, 1)\n developed = model.process(example_x).numpy().clip(0, 1)\n developed_out[b, :, :, :] = developed\n developed = developed[:, :, :, :].squeeze()\n reference = example_y.squeeze()\n\n # Compute stats\n ssim = metrics.ssim(reference, developed).mean()\n psnr = metrics.psnr(reference, developed).mean()\n \n if loss_type == 'L2':\n loss = np.mean(np.power(reference - developed, 2.0))\n elif loss_type == 'L1':\n loss = np.mean(np.abs(reference - developed))\n else:\n raise ValueError('Invalid loss! Use either L1 or L2.')\n \n ssims.append(ssim)\n psnrs.append(psnr)\n losss.append(loss)\n \n # Add images to the plot\n if save_dir is not None:\n ax = fig.add_subplot(images_y, images_x, b+1)\n plots.image(\n np.concatenate((reference, developed), axis=1) if show_ref else developed,\n '{:.1f} dB / {:.2f}'.format(psnr, ssim),\n axes=ax\n )\n\n if save_dir is not None:\n if not os.path.exists(save_dir):\n os.makedirs(save_dir)\n fig.savefig('{}/nip_validation_{:05d}.jpg'.format(save_dir, epoch), bbox_inches='tight', dpi=100, quality=90)\n del fig\n \n return ssims, psnrs, losss", "def loss(loss_name):\n \n def contrastive_loss(y_true, y_pred, margin = 1):\n \"\"\"Implementation of the triplet loss function\n\n\n Contrastive loss = 0.5 * mean( (1-true_value) * square(distance) + true_value * square( max(margin-distance, 0) ))\n\n Args:\n y_true (int): true label, positive pair (same class) -> 0, \n negative pair (different class) -> 1\n \n y_pred (list): python list containing two objects in a pair of tensors:\n left : the encodings for one image data in a pair\n right : the encodings for the other image data in a pair\n margin (float, optional): m > 0 determines how far the embeddings of \n a negative pair should be pushed apart. Defaults to 1.\n\n\n Returns:\n loss (float): real number, value of the loss\n \"\"\"\n\n left = y_pred[0]\n right = y_pred[1]\n\n distance = tf.math.sqrt(tf.math.reduce_sum(tf.math.square(left - right), axis=-1))\n\n loss_positive = tf.math.square(distance)\n loss_negative = tf.math.square(tf.maximum(0., margin - distance))\n \n loss = y_true * loss_negative + (1 - y_true) * loss_positive\n loss = 0.5 * tf.math.reduce_mean(loss)\n\n return loss\n\n def triplet_loss(y_true, y_pred, margin = 1):\n \"\"\"Implementation of the triplet loss function\n\n Arguments:\n y_true : true labels, required when you define a loss in Keras, \n not applied in this function.\n\n y_pred (list): python list containing three objects:\n anchor : the encodings for the anchor data\n positive : the encodings for the positive data (similar to anchor)\n negative : the encodings for the negative data (different from anchor)\n \n margin (float, optional): m > 0 determines how far the embeddings of \n a negative data should be pushed apart. Defaults to 1.\n\n Returns:\n loss (float): real number, value of the loss\n \"\"\"\n\n anchor = y_pred[0]\n positive = y_pred[1]\n negative = y_pred[2]\n\n # squared distance between the anchor and the positive\n pos_dist = tf.math.reduce_sum(tf.math.square(anchor - positive), axis=-1)\n\n # squared distance between the anchor and the negative\n neg_dist = tf.math.reduce_sum(tf.math.square(anchor - negative), axis=-1)\n\n # compute loss\n basic_loss = margin + pos_dist - neg_dist\n loss = tf.math.maximum(basic_loss,0.0)\n loss = tf.math.reduce_mean(loss)\n return loss\n\n \n if loss_name == 'contrastive_loss':\n return contrastive_loss\n \n if loss_name == 'triplet_loss':\n return triplet_loss", "def loss(self, X, y=None):\n\n # In dev testing, the loss fnc stops at \"scores\" , unfollowed by \"softmax\" probability prediction.\n # In real testing, \"self.predict()\" needs to be implemented in Solver() class.\n \n if y is None:\n for bn_param in self.bn_params:\n bn_param[\"mode\"] = \"test\"\n\n\n W1, b1 = self.params['W1'], self.params['b1']\n gamma1, beta1 = self.params[\"sbnGamma1\"], self.params[\"sbnBeta1\"]\n bn_param1 = self.bn_params[0]\n\n W2, b2 = self.params['W2'], self.params['b2']\n gamma2, beta2 = self.params[\"sbnGamma2\"], self.params[\"sbnBeta2\"]\n bn_param2 = self.bn_params[1]\n\n W3, b3 = self.params['W3'], self.params['b3']\n gamma3, beta3 = self.params[\"bnGamma3\"], self.params[\"bnBeta3\"]\n bn_param3 = self.bn_params[2]\n\n W4, b4 = self.params['W4'], self.params['b4']\n \n # pass conv_param to the forward pass for the convolutional layer\n conv_param = self.conv_param\n\n # pass pool_param to the forward pass for the max-pooling layer\n pool_param = self.maxpool_params\n\n ############################################################################\n # TODO: Implement the forward pass for the three-layer convolutional net, #\n # computing the class scores for X and storing them in the scores #\n # variable. #\n ############################################################################\n \n scores = None \n cache = {}\n # def conv_sbn_relu_forward(x, w, b, gamma, beta, conv_param, bn_param): return out, cache;\n out, cache[\"layer1\"] = layer_utils.conv_sbn_relu_forward(X, W1, b1, gamma1, beta1, conv_param, bn_param1) \n out, cache[\"layer2\"] = layer_utils.conv_sbn_relu_forward(out, W2, b2, gamma2, beta2, conv_param, bn_param2)\n\n # def max_pool_forward_fast(x, pool_param): return out, cache;\n out, cache[\"maxpool\"] = fast_layers.max_pool_forward_fast(out, pool_param)\n\n # def affine_bn_relu_forward(x, w, b, gamma, beta, bn_param): return out, cache;\n \n out, cache[\"layer3\"] = layer_utils.affine_bn_relu_forward(out, W3, b3, gamma3, beta3, bn_param3)\n\n # def affine_forward(x, w, b): return out, cache;\n scores, cache[\"layer4\"] = layers.affine_forward(out, W4, b4)\n\n\n ############################################################################\n # END OF YOUR CODE #\n ############################################################################\n \n if y is None:\n return scores\n \n ############################################################################\n # TODO: Implement the backward pass for the three-layer convolutional net, #\n # storing the loss and gradients in the loss and grads variables. Compute #\n # data loss using softmax, and make sure that grads[k] holds the gradients #\n # for self.params[k]. Don't forget to add L2 regularization! #\n ############################################################################\n \n loss, grads = 0, {}\n\n # def softmax_loss(x, y): return loss, dscore;\n loss, dscores = layers.softmax_loss(scores, y)\n loss += 0.5 * self.reg * (np.sum(W1 * W1) + np.sum(W2 * W2) + np.sum(W3 * W3) + np.sum(W4 * W4))\n\n # def affine_backward(dout, cache): return dx, dw, db;\n dout, dW4, db4 = layers.affine_backward(dscores, cache[\"layer4\"]) \n\n # def affine_bn_relu_backward(dout, cache): return dx, dw, db, dgamma, dbeta;\n dout, dW3, db3, dgamma3, dbeta3 = layer_utils.affine_bn_relu_backward(dout, cache[\"layer3\"])\n\n # print cache[\"layer3\"]\n\n # def max_pool_backward_fast(dout, cache): return max_pool_backward_im2col(dout, real_cache);\n # def max_pool_backward_im2col(dout, cache): return dx;\n dout = fast_layers.max_pool_backward_fast(dout, cache[\"maxpool\"])\n\n # def conv_sbn_relu_backward(dout, cache): return dx, dw, db, dgamma, dbeta;\n dout, dW2, db2, dgamma2, dbeta2 = layer_utils.conv_sbn_relu_backward(dout, cache[\"layer2\"])\n _, dW1, db1, dgamma1, dbeta1 = layer_utils.conv_sbn_relu_backward(dout, cache[\"layer1\"])\n\n # reg\n grads['W4'], grads['b4'] = dW4 + self.reg * W4, db4\n \n grads['W3'], grads['b3'] = dW3 + self.reg * W3, db3\n grads[\"bnGamma3\"], grads[\"bnBeta3\"] = dgamma3, dbeta3\n\n grads['W2'], grads['b2'] = dW2 + self.reg * W2, db2\n grads[\"sbnGamma2\"], grads[\"sbnBeta2\"] = dgamma2, dbeta2\n\n grads['W1'], grads['b1'] = dW1 + self.reg * W1, db1\n grads[\"sbnGamma1\"], grads[\"sbnBeta1\"] = dgamma1, dbeta1\n\n ############################################################################\n # END OF YOUR CODE #\n ############################################################################\n \n return loss, grads", "def _str_to_loss(self, loss_str: str) -> KernelLoss:\n if loss_str == \"svc_loss\":\n loss_obj = SVCLoss()\n else:\n raise ValueError(f\"Unknown loss {loss_str}!\")\n\n return loss_obj", "def neural_network(X, Y, Xs_test, Ys_test):\n\n X_n = (X - np.mean(X, axis = 0)) / np.std(X, axis = 0)\n Y_n = (Y - np.mean(Y, axis = 0)) / np.std(Y, axis = 0)\n \n model = Model(X_n.shape[1])\n model.addLayer(DenseLayer(100, ReLUActivation()))\n model.addLayer(DenseLayer(100, ReLUActivation()))\n model.addLayer(DenseLayer(Y.shape[1],LinearActivation()))\n model.initialize(QuadraticCost())\n\n mses = []\n for i, X_test in enumerate(Xs_test):\n Y_test = Ys_test[i]\n Y_pred = model.predict((X_test - np.mean(X, axis = 0)) / np.std(X, axis = 0)) \n Y_pred = Y_pred * np.std(Y, axis = 0) + np.mean(Y, axis = 0)\n mse = np.mean(np.sqrt(np.sum((Y_pred - Y_test) ** 2, axis = 1)) ) \n mses.append(mse)\n\n return mses", "def setup_loss(self):\n with vs.variable_scope(\"loss\"):\n self.loss = tf.reduce_mean(tf.nn.sparse_softmax_cross_entropy_with_logits(labels=self.label_placeholder, logits=self.label_predictions))", "def get_network(encoded, is_training, scale=1., weight_decay=0.00004):\n with tf.variable_scope(\"Dense\"):\n batch_size = encoded.get_shape()[0].value\n end_points = {}\n bn_params = {\"is_training\": is_training,\n 'epsilon': 1e-3\n }\n pc = tf.reshape(encoded, [batch_size, 1, 1, -1])\n net = slim.conv2d(pc,\n # 400,\n max(int(round(400 * scale)), 32),\n [1, 1],\n padding='SAME',\n stride=1,\n normalizer_fn=slim.batch_norm,\n normalizer_params=bn_params,\n biases_initializer=tf.zeros_initializer(),\n weights_regularizer=slim.l2_regularizer(weight_decay),\n scope='fc1',\n activation_fn=tf.nn.relu6)\n net = slim.dropout(net, keep_prob=0.8, is_training=is_training, scope='dp1')\n net = slim.conv2d(net,\n # 400,\n max(int(round(400 * scale)), 32),\n [1, 1],\n padding='SAME',\n stride=1,\n normalizer_fn=slim.batch_norm,\n normalizer_params=bn_params,\n biases_initializer=tf.zeros_initializer(),\n weights_regularizer=slim.l2_regularizer(weight_decay),\n scope='fc2',\n activation_fn=tf.nn.relu6)\n net = slim.dropout(net, keep_prob=0.4, is_training=is_training, scope='dp2')\n net = slim.conv2d(net,\n 40, [1, 1],\n padding='SAME',\n stride=1,\n # normalizer_fn=slim.batch_norm,\n # normalizer_params=bn_params,\n biases_initializer=tf.zeros_initializer(),\n weights_regularizer=slim.l2_regularizer(weight_decay),\n scope='fc3',\n # activation_fn=tf.nn.relu6,\n activation_fn=None,\n )\n # print(net)\n net = tf.reshape(net, [batch_size, -1])\n return net, end_points", "def get_loss(self, Loss, results, inputs, device):\n return" ]
[ "0.63613164", "0.6158965", "0.6017301", "0.5997169", "0.590424", "0.5783352", "0.57639736", "0.5728302", "0.5715514", "0.5708102", "0.5672599", "0.56407714", "0.5635773", "0.5630035", "0.56168777", "0.5610483", "0.5603815", "0.55986285", "0.5573956", "0.5568347", "0.55591923", "0.5530415", "0.5523093", "0.55166364", "0.55079204", "0.5489856", "0.5487464", "0.54823416", "0.5481739", "0.5477149", "0.5475032", "0.54722047", "0.5462614", "0.5458318", "0.5455893", "0.5453518", "0.54455644", "0.5436783", "0.54337656", "0.5432876", "0.543142", "0.54277635", "0.5414054", "0.5408276", "0.5407549", "0.5406568", "0.5399659", "0.53760463", "0.53555065", "0.5354162", "0.5350671", "0.53491354", "0.53481376", "0.53457594", "0.534236", "0.53422904", "0.53382516", "0.5329332", "0.5327596", "0.5322959", "0.53182423", "0.5316764", "0.5310346", "0.5307372", "0.53038174", "0.52997786", "0.5297393", "0.528959", "0.52891934", "0.52886117", "0.5286926", "0.5285015", "0.527316", "0.52717376", "0.52711856", "0.5258975", "0.5250301", "0.52471787", "0.5246374", "0.5242632", "0.5241922", "0.5241525", "0.52337694", "0.5229304", "0.5222062", "0.52171093", "0.5199925", "0.519926", "0.51976323", "0.5195158", "0.5190193", "0.5187556", "0.5187408", "0.51849514", "0.51800585", "0.5178273", "0.51780427", "0.51765126", "0.51720554", "0.51703835" ]
0.6577575
0
create autoencoder Theano update for network given in argument
def create_autoencoder(nnet): floatX = Cfg.floatX B = Cfg.ae_B C = Cfg.ae_C ndim = nnet.data._X_train.ndim if ndim == 2: inputs = T.matrix('inputs') elif ndim == 4: inputs = T.tensor4('inputs') final_layer = nnet.all_layers[-1] # Backpropagation trainable_params = lasagne.layers.get_all_params(final_layer, trainable=True) prediction = lasagne.layers.get_output(final_layer, inputs=inputs, deterministic=False) # use l2 or binary crossentropy loss (features are scaled to [0,1]) if Cfg.ae_loss == "l2": loss = lasagne.objectives.squared_error(prediction, inputs) if Cfg.ae_loss == "ce": loss = lasagne.objectives.binary_crossentropy(prediction, inputs) scores = T.sum(loss, axis=range(1, ndim), dtype='floatX') loss = T.mean(scores) # Regularization if Cfg.ae_weight_decay: l2_penalty = (floatX(0.5) / C) * regularize_network_params(final_layer, l2) else: l2_penalty = T.cast(0, dtype='floatX') # Network activation sparsity regularization if Cfg.ae_sparsity_penalty: sparsity_penalty = ((1 / B) * get_sparsity_penalty(nnet, inputs, Cfg.ae_sparsity, mode=Cfg.ae_sparsity_mode, deterministic=False)) else: sparsity_penalty = T.cast(0, dtype='floatX') train_obj = loss + l2_penalty + sparsity_penalty updates = get_updates(nnet, train_obj, trainable_params, solver=nnet.ae_solver) nnet.ae_backprop = theano.function([inputs], [loss, l2_penalty, sparsity_penalty, scores], updates=updates) # Forwardpropagation test_prediction = lasagne.layers.get_output(final_layer, inputs=inputs, deterministic=True) # use l2 or binary crossentropy loss (features are scaled to [0,1]) if Cfg.ae_loss == "l2": test_loss = lasagne.objectives.squared_error(test_prediction, inputs) if Cfg.ae_loss == "ce": test_loss = lasagne.objectives.binary_crossentropy(test_prediction, inputs) test_scores = T.sum(test_loss, axis=range(1, ndim), dtype='floatX') test_loss = T.mean(test_scores) # Network activation sparsity regularization (with determinisitc=True) if Cfg.ae_sparsity_penalty: test_sparsity_penalty = ((1 / B) * get_sparsity_penalty(nnet, inputs, Cfg.ae_sparsity, mode=Cfg.ae_sparsity_mode, deterministic=True)) else: test_sparsity_penalty = T.cast(0, dtype='floatX') nnet.ae_forward = theano.function([inputs], [test_loss, l2_penalty, test_sparsity_penalty, test_scores, test_prediction])
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def build_full_conv_autoencoder():\n input_img = Input(shape=(84, 84, 3))\n\n x = Convolution2D(48, 8, 8, activation='relu', border_mode='same', name='c1')(input_img)\n x = MaxPooling2D((2, 2), border_mode='same')(x)\n x = Convolution2D(32, 4, 4, activation='relu', border_mode='same', name='c2')(x)\n x = MaxPooling2D((2, 2), border_mode='same')(x)\n x = Convolution2D(32, 3, 3, activation='relu', border_mode='same', name='c3')(x)\n encoded = MaxPooling2D((3, 3), border_mode='same')(x)\n\n x = Convolution2D(32, 3, 3, activation='relu', border_mode='same', name='c4')(encoded)\n x = UpSampling2D((3, 3))(x)\n x = Convolution2D(32, 4, 4, activation='relu', border_mode='same', name='c5')(x)\n x = UpSampling2D((2, 2))(x)\n x = Convolution2D(48, 8, 8, activation='relu', border_mode='same', name='c6')(x)\n x = UpSampling2D((2, 2))(x)\n decoded = Convolution2D(3, 4, 4, activation='sigmoid', border_mode='same', name='c7')(x)\n\n autoencoder = Model(input_img, decoded)\n autoencoder.compile(optimizer='adam', metrics=['mse'], loss='mse')\n autoencoder.summary()\n return autoencoder", "def autoencoder(dims, act='relu', init='glorot_uniform'):\n n_stacks = len(dims) - 1\n # input\n input_img = Input(shape=(dims[0],), name='input')\n x = input_img\n # internal layers in encoder\n for i in range(n_stacks-1):\n x = Dense(dims[i + 1], activation=act, kernel_initializer=init, name='encoder_%d' % i)(x)\n\n # hidden layer\n encoded = Dense(dims[-1], kernel_initializer=init, name='encoder_%d' % (n_stacks - 1))(x) # hidden layer, features are extracted from here\n\n x = encoded\n # internal layers in decoder\n for i in range(n_stacks-1, 0, -1):\n x = Dense(dims[i], activation=act, kernel_initializer=init, name='decoder_%d' % i)(x)\n\n # output\n x = Dense(dims[0], kernel_initializer=init, name='decoder_0')(x)\n decoded = x\n return Model(inputs=input_img, outputs=decoded, name='AE'), Model(inputs=input_img, outputs=encoded, name='encoder')", "def autoencoder_model(optimizer, learning_rate, \n filter_block1, kernel_size_block1, \n filter_block2, kernel_size_block2, \n filter_block3, kernel_size_block3, \n filter_block4, kernel_size_block4, \n activation_str, padding):\n # Input Tensors - fully conv\n input_img = Input(shape=(None, None, 1))\n # Encoder Part\n x = Conv2D(filters=filter_block1, kernel_size=kernel_size_block1, padding=padding)(input_img) # 420x540x32\n x = Activation('relu')(x)\n x = MaxPooling2D()(x) # 210x270x32\n encoded = Conv2D(filters=filter_block2, kernel_size=kernel_size_block2, padding=padding)(x) # 105x135x32\n # Decoder Part\n x = Conv2D(filters=filter_block3, kernel_size=kernel_size_block3, padding=padding)(encoded) # 210x270x32\n x = Activation('relu')(x)\n x = UpSampling2D()(x) # 420x540x32\n decoded = Conv2D(filters=filter_block4, kernel_size=kernel_size_block4, activation='sigmoid', padding=padding)(x) # 420x540x1\n\n # Build the model\n autoencoder = Model(inputs=input_img, outputs=decoded)\n opt = optimizer(learning_rate=learning_rate)\n autoencoder.compile(loss=\"binary_crossentropy\", optimizer=opt)\n autoencoder.summary()\n return autoencoder", "def autoencoder(X, inp_dims=2048):\n drop = tf.keras.layers.Dropout(rate=0.2)\n FC1 = tf.layers.Dense(units=inp_dims // 2, activation=\"tanh\", name='fc1')\n FC2 = tf.layers.Dense(units=inp_dims // 4, activation=\"tanh\", name='fc2')\n FC3 = tf.layers.Dense(units=inp_dims // 8, activation=None, name='fc3')\n Act = tf.keras.layers.Activation(activation=\"tanh\")\n # FC4 = tf.layers.Dense(units=inp_dims // 8,activation=\"tanh\",name='fc4')\n FC5 = tf.layers.Dense(units=inp_dims // 4, activation=\"tanh\", name='fc5')\n FC6 = tf.layers.Dense(units=inp_dims // 2, activation=None, name='fc6')\n FC7 = tf.layers.Dense(units=inp_dims, activation=None, name='fc7')\n X = FC1(drop(X))\n X = FC2(drop(X))\n X = FC3(X)\n fea = X\n X_up = Act(X)\n X_up = FC5(X_up)\n X_up = FC6(drop(X_up))\n pred = FC7(drop(X_up))\n return pred, fea", "def autoencoder(dims, act='relu', init='glorot_uniform'):\n x = tf.keras.layers.Input(shape=(1,), dtype=tf.string)\n\n h = tf.keras.layers.Lambda(UniversalEmbedding, output_shape=(512,))(x)\n\n return Model(inputs=x, outputs=h, name='encoder')", "def create_update(nnet):\n\n if nnet.data._X_val.ndim == 2:\n inputs = T.matrix('inputs')\n elif nnet.data._X_val.ndim == 4:\n inputs = T.tensor4('inputs')\n\n targets = T.ivector('targets')\n\n # compile theano functions\n if Cfg.softmax_loss:\n compile_update_softmax(nnet, inputs, targets)\n elif Cfg.ocsvm_loss:\n if Cfg.rho_fixed:\n compile_update_ocsvm_rho_fixed(nnet, inputs, targets)\n else:\n compile_update_ocsvm(nnet, inputs, targets)\n elif Cfg.svdd_loss:\n compile_update_svdd(nnet, inputs, targets)\n elif Cfg.reconstruction_loss:\n create_autoencoder(nnet)\n else:\n compile_update_default(nnet, inputs, targets)", "def build_autoencoder(self):\n # first build the encoder model\n inputs = Input(shape=(self.state_dim, ), name='state')\n feature_size = 32\n x = Dense(256, activation='relu')(inputs)\n x = Dense(128, activation='relu')(x)\n feature = Dense(feature_size, name='feature_vector')(x)\n\n # instantiate encoder model\n self.encoder = Model(inputs, feature, name='encoder')\n self.encoder.summary()\n plot_model(self.encoder,\n to_file='encoder.png', \n show_shapes=True)\n\n # build the decoder model\n feature_inputs = Input(shape=(feature_size,), \n name='decoder_input')\n x = Dense(128, activation='relu')(feature_inputs)\n x = Dense(256, activation='relu')(x)\n outputs = Dense(self.state_dim, activation='linear')(x)\n\n # instantiate decoder model\n self.decoder = Model(feature_inputs, \n outputs, \n name='decoder')\n self.decoder.summary()\n plot_model(self.decoder, \n to_file='decoder.png', \n show_shapes=True)\n\n # autoencoder = encoder + decoder\n # instantiate autoencoder model\n self.autoencoder = Model(inputs, \n self.decoder(self.encoder(inputs)),\n name='autoencoder')\n self.autoencoder.summary()\n plot_model(self.autoencoder, \n to_file='autoencoder.png', \n show_shapes=True)\n\n # Mean Square Error (MSE) loss function, Adam optimizer\n self.autoencoder.compile(loss='mse', optimizer='adam')", "def build_autoencoder(input_dim):\r\n input_layer = Input(shape=(input_dim, 1))\r\n enc = Conv1D(filters=16, kernel_size=2, padding='same', activation='relu')(input_layer)\r\n enc = MaxPooling1D(pool_size=2, padding='same')(enc)\r\n enc = Conv1D(filters=32, kernel_size=2, padding='same', activation='relu')(enc)\r\n enc = MaxPooling1D(pool_size=2, padding='same')(enc)\r\n enc = Conv1D(filters=64, kernel_size=2, padding='same', activation='relu')(enc)\r\n enc = MaxPooling1D(pool_size=2, padding='same')(enc)\r\n enc = Flatten()(enc)\r\n enc = Dense(64)(enc)\r\n\r\n dec = Dense(200704)(enc)\r\n dec = Reshape((3136, 64))(dec)\r\n dec = Conv1D(filters=64, kernel_size=2, padding='same', activation='relu')(dec)\r\n dec = UpSampling1D(2)(dec)\r\n dec = Conv1D(filters=32, kernel_size=2, padding='same', activation='relu')(dec)\r\n dec = UpSampling1D(2)(dec)\r\n dec = Conv1D(filters=16, kernel_size=2, padding='same', activation='relu')(dec)\r\n dec = UpSampling1D(2)(dec)\r\n dec = Conv1D(filters=1, kernel_size=2, padding='same', activation='relu')(dec)\r\n\r\n autoencoder = Model(input_layer, dec)\r\n autoencoder.compile(optimizer='adam', loss='mean_squared_error', metrics=['accuracy'])\r\n autoencoder.summary()\r\n encoder = Model(input_layer, enc)\r\n return autoencoder, encoder", "def autoencoder(dimensions=[784, 512, 256, 64]):\n # %% input to the network\n x = tf.placeholder(tf.float32, [None, dimensions[0]], name='x')\n current_input = x\n\n # %% Build the encoder\n encoder = []\n for layer_i, n_output in enumerate(dimensions[1:]):\n n_input = int(current_input.get_shape()[1])\n W = tf.Variable(\n tf.random_uniform([n_input, n_output],\n -1.0 / math.sqrt(n_input),\n 1.0 / math.sqrt(n_input)))\n b = tf.Variable(tf.zeros([n_output]))\n encoder.append(W)\n output = tf.nn.tanh(tf.matmul(current_input, W) + b)\n current_input = output\n\n # Latent representation (embedding, neural coding)\n z = current_input\n encoder.reverse()\n\n # Build the decoder using the same weights\n for layer_i, n_output in enumerate(dimensions[:-1][::-1]):\n W = tf.transpose(encoder[layer_i])\n b = tf.Variable(tf.zeros([n_output]))\n output = tf.nn.tanh(tf.matmul(current_input, W) + b)\n current_input = output\n\n # Now have the reconstruction through the network\n y = current_input\n\n # Cost function measures pixel-wise difference\n cost = tf.reduce_sum(tf.square(y - x))\n return {'x': x, 'z': z, 'y': y, 'cost': cost}", "def autoencoder3(ds, compression_factor=16, input_noise=0.2, dropout_p=0.1, activ='tanh', final_activ='tanh'):\n # compression_factor=20\n print('DS shape: {}'.format(ds.shape))\n in_dims = np.prod(ds.shape[1:])\n encoding_dim = int(in_dims // compression_factor)\n in_shape = ds[0].shape\n print('Input Dims: {}, input shape: {}, encoding dims: {}'.format(in_dims, in_shape, encoding_dim))\n\n # this is our input placeholder\n input_img = Input(shape=(in_dims,))\n encoded = GaussianNoise(input_noise)(input_img)\n\n encoded = Dense(encoding_dim * 4, activation=activ, activity_regularizer=regularizers.activity_l1(10e-5))(encoded)\n # encoded = Dense(encoding_dim*4, activation='sigmoid')(input_img)\n encoded = BatchNormalization()(encoded)\n encoded = Dropout(dropout_p)(encoded) # batch norm before dropout\n # encoded = Dense(encoding_dim*3, activation=activ)(encoded)\n # encoded = Dropout(dropout_p)(encoded)\n encoded = Dense(encoding_dim * 2, activation=activ)(encoded)\n encoded = Dropout(dropout_p)(encoded)\n\n encoded = Dense(encoding_dim, activation=activ)(encoded)\n # Middle Noise\n encoded = GaussianNoise(0.02)(encoded)\n\n # DECODED LAYER\n # \"decoded\" is the lossy reconstruction of the input\n decoded = Dense(encoding_dim * 2, activation=activ)(encoded)\n # decoded = Dropout(dropout_p)(decoded)\n decoded = Dense(encoding_dim * 4, activation=activ)(decoded)\n decoded = Dropout(dropout_p)(decoded)\n decoded = Dense(in_dims, activation=final_activ)(decoded)\n\n # MODEL\n autoencoder = Model(input=input_img, output=decoded)\n\n # SEPERATE ENCODER MODEL\n encoder = Model(input=input_img, output=encoded)\n\n # create a placeholder for an encoded (32-dimensional) input\n encoded_input = Input(shape=(encoding_dim,))\n\n # retrieve the last layer of the autoencoder model\n decoder_layer0 = autoencoder.layers[-4]\n decoder_layer1 = autoencoder.layers[-3]\n decoder_layer2 = autoencoder.layers[-2]\n decoder_layer3 = autoencoder.layers[-1]\n # todo: make this into a dedicated unrolling function\n\n # create the decoder model - unrolling the model as we go\n decoder = Model(input=encoded_input, output=decoder_layer3(decoder_layer2(\n decoder_layer1(decoder_layer0(encoded_input)))))\n\n # model.add(GaussianNoise(0.1), input_shape=(n_input_len,))\n autoencoder.compile(optimizer='adadelta', loss='binary_crossentropy')\n\n autoencoder.model_name = 'Autoencoder 1'\n return autoencoder, encoder, decoder", "def _define_encoder(self):\n self.encoder = nn.Sequential(nn.Conv2d(in_channels=3, out_channels=32, kernel_size=4, stride=2, padding=1), # B, 32, 32, 32\n nn.SELU(),\n nn.Conv2d(32, 32, 4, 2, 1), # B, 32, 16, 16\n nn.SELU(),\n nn.Conv2d(32, 64, 4, 2, 1), # B, 64, 8, 8\n nn.SELU(),\n nn.Conv2d(64, 64, 4, 2, 1), # B, 64, 4, 4\n nn.SELU(),\n nn.Conv2d(64, 256, 4, 1), # B, 256, 1, 1\n nn.SELU(),\n View((-1, 256 * 1 * 1)), # B, 256\n nn.Linear(256, self.encoding_shape * 2), # B, z_dim*2\n )", "def build_conv_combo_autoencoder():\n input_img = Input(shape=(84, 84, 1))\n\n x = Convolution2D(48, 4, 4, activation='relu', border_mode='same', name='c1')(input_img)\n x = MaxPooling2D((2, 2), border_mode='same')(x)\n x = Convolution2D(48, 4, 4, activation='relu', border_mode='same', name='c2')(x)\n x = MaxPooling2D((3, 3), border_mode='same')(x)\n x = Flatten()(x)\n encoded = Dense(512, activation='relu')(x)\n\n encoded_input = Input((512,))\n d1 = Dense(9408, activation='relu')(encoded_input)\n d2 = Reshape((14, 14, 48))(d1)\n d3 = Convolution2D(48, 4, 4, activation='relu', border_mode='same', name='c5')(d2)\n d4 = UpSampling2D((3, 3))(d3)\n d5 = Convolution2D(48, 4, 4, activation='relu', border_mode='same', name='c6')(d4)\n d6 = UpSampling2D((2, 2))(d5)\n decoded = Convolution2D(1, 4, 4, activation='relu', border_mode='same', name='c9')(d6)\n\n encoder = Model(input=input_img, output=encoded, name='conv_encoder')\n decoder = Model(input=encoded_input, output=decoded, name='conv_decoder')\n\n autoencoder = Sequential(name='full_conv_autoencoder')\n autoencoder.add(encoder)\n autoencoder.add(decoder)\n\n encoder.compile(optimizer='adam', loss='mse')\n encoder.summary()\n decoder.compile(optimizer='adam', loss='mse')\n decoder.summary()\n autoencoder.compile(optimizer='adam', metrics=['mse'], loss='mse')\n autoencoder.summary()\n return autoencoder, encoder, decoder", "def SemiAutoencoder(ds, compression_factor=16, input_noise=0.2, dropout_p=0.1, activ='tanh', final_activ='tanh'):\n # compression_factor=20\n print('DS shape: {}'.format(ds.shape))\n in_dims = np.prod(ds.shape[1:])\n encoding_dim = int(in_dims // compression_factor)\n in_shape = ds[0].shape\n print('Input Dims: {}, input shape: {}, encoding dims: {}'.format(in_dims, in_shape, encoding_dim))\n\n # this is our input placeholder\n input_img = Input(shape=(in_dims,))\n encoded = GaussianNoise(input_noise)(input_img)\n\n encoded = Dense(encoding_dim * 4, activation=activ, activity_regularizer=regularizers.activity_l1(10e-5))(encoded)\n # encoded = Dense(encoding_dim*4, activation='sigmoid')(input_img)\n encoded = BatchNormalization()(encoded)\n encoded = Dropout(dropout_p)(encoded) # batch norm before dropout\n # encoded = Dense(encoding_dim*3, activation=activ)(encoded)\n # encoded = Dropout(dropout_p)(encoded)\n encoded = Dense(encoding_dim * 2, activation=activ)(encoded)\n encoded = Dropout(dropout_p)(encoded)\n\n encoded = Dense(encoding_dim, activation=activ)(encoded)\n # Middle Noise\n encoded = GaussianNoise(0.02)(encoded)\n\n # DECODED LAYER\n # \"decoded\" is the lossy reconstruction of the input\n decoded = Dense(encoding_dim * 2, activation=activ)(encoded)\n # decoded = Dropout(dropout_p)(decoded)\n decoded = Dense(encoding_dim * 4, activation=activ)(decoded)\n decoded = Dropout(dropout_p)(decoded)\n decoded = Dense(in_dims, activation=final_activ)(decoded)\n\n # MODEL\n autoencoder = Model(input=input_img, output=decoded)\n\n # SEPERATE ENCODER MODEL\n encoder = Model(input=input_img, output=encoded)\n\n # create a placeholder for an encoded (32-dimensional) input\n encoded_input = Input(shape=(encoding_dim,))\n\n # retrieve the last layer of the autoencoder model\n decoder_layer0 = autoencoder.layers[-4]\n decoder_layer1 = autoencoder.layers[-3]\n decoder_layer2 = autoencoder.layers[-2]\n decoder_layer3 = autoencoder.layers[-1]\n # todo: make this into a dedicated unrolling function\n\n # create the decoder model - unrolling the model as we go\n decoder = Model(input=encoded_input, output=decoder_layer3(decoder_layer2(\n decoder_layer1(decoder_layer0(encoded_input)))))\n\n # model.add(GaussianNoise(0.1), input_shape=(n_input_len,))\n autoencoder.compile(optimizer='adadelta', loss='binary_crossentropy')\n\n autoencoder.model_name = 'Autoencoder 1'\n return autoencoder, encoder, decoder", "def train_autoencoder(data, n_iters=10, batch_size=100):\n tqdm.write(f'Training a fully-convolutional autoencoder for {n_iters} iterations.')\n (trainx, trainy), (valx, valy), (testx, testy) = data\n train_size, val_size, test_size = trainx.shape[0], valx.shape[0], testx.shape[0]\n train_batches = (train_size - 1) // batch_size + 1\n val_batches = (val_size - 1) // batch_size + 1\n test_batches = (test_size - 1) // batch_size + 1\n\n model = Network()\n model.add_layer(ConvLayer(10, (2, 2), (2, 2), 1)) \\\n .add_layer(ConvLayer(10, (2, 2), (2, 2), 1)) \\\n .add_layer(ConvLayer(15, (1, 1), (2, 2), 1)) \\\n .add_layer(TransposedConvLayer(10, (1, 1), (2, 2), 1)) \\\n .add_layer(TransposedConvLayer(10, (2, 2), (2, 2), 1)) \\\n .add_layer(TransposedConvLayer(1, (2, 2), (2, 2), 1)) \\\n .add_layer(SSELayer())\n for i in range(1, n_iters + 1):\n train_order = np.random.permutation(train_size)\n bar = trange(train_batches, file=sys.stdout)\n for j in bar:\n cost = model.forward(trainx[train_order[j * batch_size: (j + 1) * batch_size]],\n trainx[train_order[j * batch_size: (j + 1) * batch_size]])\n bar.set_description(f'Curr squared error: {cost}')\n model.backward()\n model.adam_trainstep()\n errors = []\n for j in range(val_batches):\n errors.append(model.forward(valx[j * batch_size:(j + 1) * batch_size],\n valx[j * batch_size:(j + 1) * batch_size]))\n tqdm.write(f'Validation squared error: {np.mean(errors)}')\n tqdm.write('-------------------------------------------------------')\n\n errors = []\n for i in range(test_batches):\n errors.append(model.forward(testx[i * batch_size:(i + 1) * batch_size],\n testx[i * batch_size:(i + 1) * batch_size]))\n tqdm.write(f'Test squared error: {np.mean(errors)}')\n tqdm.write('-------------------------------------------------------')", "def _define_encoder(self):\n self.encoder = nn.Sequential(View((-1, 64 * 64 * 3)),\n nn.Linear(64 * 64 * 3, 5120, bias=False), nn.SELU(),\n nn.BatchNorm1d(5120),\n nn.Linear(5120, 2560, bias=False), nn.SELU(),\n nn.BatchNorm1d(2560),\n nn.Linear(2560, 512, bias=False), nn.SELU(),\n nn.BatchNorm1d(512),\n nn.Linear(512, 128, bias=False), nn.SELU(),\n nn.BatchNorm1d(128),\n nn.Linear(128, self.encoding_shape, bias=False), nn.SELU(),\n )", "def encoder_decoder_archi_gan(inputs, is_train):\n\n encoder_layers = []\n\n encoded = inputs\n\n encoder_layers.append(encoded)\n\n for i in range(config.encoder_layers):\n encoded = encoder_conv_block_gan(encoded, i, is_train)\n encoder_layers.append(encoded)\n \n encoder_layers.reverse()\n\n\n\n decoded = encoder_layers[0]\n\n for i in range(config.encoder_layers):\n decoded = decoder_conv_block_gan(decoded, encoder_layers[i+1], i, is_train)\n\n return decoded", "def run_autoencoder2(experiment,\n X_train, y_train, X_valid, y_valid, X_test, y_test,\n model_path, prev_model_path,\n code_size=600, prev_code_size=1000):\n\n if os.path.isfile(model_path) or \\\n os.path.isfile(model_path + \".meta\"):\n return\n\n # Convert training, validation and test set to the new representation\n prev_model = ae(X_train.shape[1], prev_code_size,\n corruption=0.0, # Disable corruption for conversion\n enc=tf.nn.tanh, dec=None)\n\n init = tf.global_variables_initializer()\n with tf.Session() as sess:\n sess.run(init)\n saver = tf.train.Saver(prev_model[\"params\"], write_version=tf.train.SaverDef.V2)\n if os.path.isfile(prev_model_path):\n saver.restore(sess, prev_model_path)\n X_train = sess.run(prev_model[\"encode\"], feed_dict={prev_model[\"input\"]: X_train})\n X_valid = sess.run(prev_model[\"encode\"], feed_dict={prev_model[\"input\"]: X_valid})\n X_test = sess.run(prev_model[\"encode\"], feed_dict={prev_model[\"input\"]: X_test})\n del prev_model\n\n reset()\n\n # Hyperparameters\n learning_rate = 0.002\n corruption = 0.68\n ae_enc = tf.nn.tanh\n ae_dec = None\n\n training_iters = 100\n batch_size = 50\n n_classes = 2\n\n # Load model\n model = ae(prev_code_size, code_size, corruption=corruption, enc=ae_enc, dec=ae_dec)\n\n # Use GD for optimization of model cost\n optimizer = tf.train.AdamOptimizer(learning_rate, beta1=0.9).minimize(model[\"cost\"])\n\n # Initialize Tensorflow session\n init = tf.global_variables_initializer()\n with tf.Session() as sess:\n sess.run(init)\n\n # Define model saver\n saver = tf.train.Saver(model[\"params\"], write_version=tf.train.SaverDef.V2)\n\n # Initialize with an absurd cost for model selection\n prev_costs = np.array([9999999999] * 3)\n\n # Iterate Epochs\n for epoch in range(training_iters):\n\n # randomly shuffle data\n index = np.arange(X_train.shape[0])\n random.shuffle(index)\n\n X_train = X_train[index,]\n y_train = y_train[index]\n\n # Break training set into batches\n batches = range(len(X_train) // batch_size)\n costs = np.zeros((len(batches), 3))\n\n for ib in batches:\n # Compute start and end of batch from training set data array\n from_i = ib * batch_size\n to_i = (ib + 1) * batch_size\n\n # Select current batch\n batch_xs, batch_ys = X_train[from_i:to_i], y_train[from_i:to_i]\n\n # Run optimization and retrieve training cost\n _, cost_train = sess.run(\n [optimizer, model[\"cost\"]],\n feed_dict={\n model[\"input\"]: batch_xs\n }\n )\n\n # Compute validation cost\n cost_valid = sess.run(\n model[\"cost\"],\n feed_dict={\n model[\"input\"]: X_valid\n }\n )\n\n # Compute test cost\n cost_test = sess.run(\n model[\"cost\"],\n feed_dict={\n model[\"input\"]: X_test\n }\n )\n\n costs[ib] = [cost_train, cost_valid, cost_test]\n\n # Compute the average costs from all batches\n costs = costs.mean(axis=0)\n cost_train, cost_valid, cost_test = costs\n\n # Pretty print training info\n print(format_config(\n \"Exp={experiment}, Model=ae2, Iter={epoch:5d}, Cost={cost_train:.6f} {cost_valid:.6f} {cost_test:.6f}\",\n {\n \"experiment\": experiment,\n \"epoch\": epoch,\n \"cost_train\": cost_train,\n \"cost_valid\": cost_valid,\n \"cost_test\": cost_test,\n }\n ))\n\n # Save better model if optimization achieves a lower cost\n if cost_valid < prev_costs[1]:\n print(\"Saving better model\")\n saver.save(sess, model_path)\n prev_costs = costs\n else:\n print", "def deep_autoencoder(X_train_input, X_test_input, encoding_dim = 20):\r\n input_dim = X_train_input.shape[1]\r\n \r\n autoencoder = Sequential()\r\n \r\n # Encoder Layers\r\n autoencoder.add(Dense(4 * encoding_dim, input_shape=(input_dim,), activation='relu'))\r\n autoencoder.add(Dense(2 * encoding_dim, activation='relu'))\r\n autoencoder.add(Dense(encoding_dim, activation='relu'))\r\n \r\n # Decoder Layers\r\n autoencoder.add(Dense(2 * encoding_dim, activation='relu'))\r\n autoencoder.add(Dense(4 * encoding_dim, activation='relu'))\r\n autoencoder.add(Dense(input_dim, activation='sigmoid'))\r\n \r\n autoencoder.compile(optimizer='adam', loss='binary_crossentropy')\r\n autoencoder.fit(X_train_input, X_train_input,\r\n epochs=50,\r\n batch_size=256,\r\n validation_data=(X_test_input, X_test_input))\r\n \r\n input_img = Input(shape=(input_dim,))\r\n encoder_layer1 = autoencoder.layers[0]\r\n encoder_layer2 = autoencoder.layers[1]\r\n encoder_layer3 = autoencoder.layers[2]\r\n encoder = Model(input_img, encoder_layer3(encoder_layer2(encoder_layer1(input_img))))\r\n \r\n X_train_output = encoder.predict(X_train_input)\r\n X_test_output = encoder.predict(X_test_input)\r\n \r\n return X_train_output, X_test_output", "def compile_update_softmax(nnet, inputs, targets):\n\n floatX = Cfg.floatX\n C = Cfg.C\n\n final_layer = nnet.all_layers[-1]\n trainable_params = lasagne.layers.get_all_params(final_layer,\n trainable=True)\n\n # Regularization\n if Cfg.weight_decay:\n l2_penalty = (floatX(0.5) / C) * get_l2_penalty(nnet, Cfg.include_bias)\n else:\n l2_penalty = T.cast(0, dtype='floatX')\n\n # Backpropagation\n prediction = lasagne.layers.get_output(final_layer, inputs=inputs,\n deterministic=False)\n if Cfg.ad_experiment:\n train_loss = T.mean(l_objectives.binary_crossentropy(\n prediction.flatten(), targets),\n dtype='floatX')\n train_acc = T.mean(l_objectives.binary_accuracy(prediction.flatten(),\n targets),\n dtype='floatX')\n else:\n train_loss = T.mean(l_objectives.categorical_crossentropy(prediction,\n targets),\n dtype='floatX')\n train_acc = T.mean(T.eq(T.argmax(prediction, axis=1), targets),\n dtype='floatX')\n\n\n train_obj = T.cast(train_loss + l2_penalty, dtype='floatX')\n updates = get_updates(nnet, train_obj, trainable_params, solver=nnet.solver)\n nnet.backprop = theano.function([inputs, targets],\n [train_obj, train_acc],\n updates=updates)\n\n # Forwardpropagation\n test_prediction = lasagne.layers.get_output(final_layer, inputs=inputs,\n deterministic=True)\n if Cfg.ad_experiment:\n test_loss = T.mean(l_objectives.binary_crossentropy(\n test_prediction.flatten(), targets), dtype='floatX')\n test_acc = T.mean(l_objectives.binary_accuracy(\n test_prediction.flatten(), targets), dtype='floatX')\n else:\n test_loss = T.mean(l_objectives.categorical_crossentropy(\n test_prediction, targets), dtype='floatX')\n test_acc = T.mean(T.eq(T.argmax(test_prediction, axis=1), targets),\n dtype='floatX')\n test_obj = T.cast(test_loss + l2_penalty, dtype='floatX')\n nnet.forward = theano.function([inputs, targets],\n [test_obj, test_acc, test_prediction,\n l2_penalty, test_loss])", "def auto_encoder(data: np.ndarray) -> np.ndarray:\n input_img = Input(shape=(784,))\n encoded = Dense(128, activation='relu')(input_img)\n encoded = Dense(64, activation='relu')(encoded)\n encoded = Dense(32, activation='relu')(encoded)\n\n decoded = Dense(64, activation='relu')(encoded)\n decoded = Dense(128, activation='relu')(decoded)\n decoded = Dense(784, activation='sigmoid')(decoded)\n\n autoencoder = Model(input_img, decoded)\n autoencoder.compile(optimizer='adadelta', loss='binary_crossentropy')\n\n autoencoder.fit(x_train, x_train,\n epochs=100,\n batch_size=256,\n shuffle=True,\n validation_data=(x_test, x_test))", "def define_reparameterization_network(self) -> None:\n self.input_tokens = nn.Parameter(torch.arange(self.prefix_token_num).long(), requires_grad=False) # to allow automatic devicing\n self.wte = nn.Embedding(self.prefix_token_num, self.embed_dim)\n self.control_trans = nn.Sequential(\n nn.Linear(self.embed_dim, self.mid_dim),\n nn.Tanh(),\n nn.Linear(self.mid_dim, self.total_parameters_num//self.prefix_token_num)\n )", "def encoder(enc_input, attn_bias, n_layer, n_head,\n d_key, d_value, d_model, d_inner_hid, pos_enc,\n preporstprocess_dropout, attention_dropout,\n relu_dropout, preprocess_cmd='n',\n postprocess_cmd='da'):\n for i in range(n_layer):\n enc_output = encoder_layer(enc_input, attn_bias, n_head,\n d_key, d_value, d_model,d_inner_hid, pos_enc,\n prepostprocess_dropout, attention_dropout,relu_dropout,\n preprocess_cmd, postprocess_cmd\n )\n enc_input = enc_output\n enc_output = pre_process_layer(enc_output,\n preprocess_cmd, preporstprocess_dropout)\n return enc_output", "def __init__(self, n_inpt, n_hidden, hidden_transfer='identity',\n out_transfer='identity', loss='squared', tied_weights=True,\n batch_size=None,\n optimizer='lbfgs', max_iter=1000, verbose=False):\n super(AutoEncoder, self).__init__(\n n_inpt, n_hidden, hidden_transfer, out_transfer, loss, tied_weights)\n self.batch_size = batch_size\n self.optimizer = optimizer\n self.f_transform = None\n self.f_reconstruct = None\n self.parameters.data[:] = np.random.standard_normal(\n self.parameters.data.shape).astype(theano.config.floatX)\n self.max_iter = max_iter\n self.verbose = verbose", "def autoencoder(self, data):\n with tf.variable_scope(\"autoencoder\"):\n latent = self.encoder(data)\n _, output = self.decoder(latent)\n\n return output, latent", "def train_autoencoder_and_embed(adv_examples_path=None):\n import tensorflow as tf\n import tensorflow.examples.tutorials.mnist.input_data as input_data\n import matplotlib.pyplot as plt\n\n # load MNIST as before\n mnist = input_data.read_data_sets('MNIST_data', one_hot=True)\n mean_img = np.mean(mnist.train.images, axis=0)\n ae = autoencoder(dimensions=[784, 256, 64])\n\n learning_rate = 0.001\n optimizer = tf.train.AdamOptimizer(learning_rate).minimize(ae['cost'])\n\n # We create a session to use the graph\n sess = tf.Session()\n sess.run(tf.global_variables_initializer())\n\n # Fit all training data\n batch_size = 50\n n_epochs = 30\n for epoch_i in range(n_epochs):\n for batch_i in range(mnist.train.num_examples // batch_size):\n batch_xs, _ = mnist.train.next_batch(batch_size)\n train = np.array([img - mean_img for img in batch_xs])\n sess.run(optimizer, feed_dict={ae['x']: train})\n print(epoch_i, sess.run(ae['cost'], feed_dict={ae['x']: train}))\n\n def load_adv_tiff_examples(adv_path):\n data_root = pathlib.Path(adv_path)\n all_image_paths = list(data_root.glob('*.tiff'))\n all_image_paths = [str(path) for path in all_image_paths]\n\n all_adv_images = []\n for p in all_image_paths:\n img = Image.open(p)\n img = np.asarray(img)\n img = img + 0.5\n img = np.ndarray.reshape(img, 28 * 28)\n all_adv_images.append(img)\n return all_adv_images\n\n # Get embeddings.\n # If you have too much to get and that it does not fit in memory, you may\n # need to use a batch size or to force to use the CPU rather than the GPU.\n test = [img - mean_img for img in mnist.test.images]\n if adv_examples_path:\n adv = load_adv_tiff_examples(adv_examples_path)\n adv = [img - mean_img for img in adv]\n test.extend(adv)\n\n embedded_data = sess.run(\n ae['z'],\n feed_dict={ae['x']: test}\n )\n return embedded_data, sess", "def _configure_network(self):\r\n def repeat_vector(args):\r\n [layer_to_repeat, sequence_layer] = args\r\n return RepeatVector(K.shape(sequence_layer)[1])(layer_to_repeat)\r\n\r\n encoder_input = Input(shape=(None, self._input_cells))\r\n encoder_output = LSTM(self._latent_space)(encoder_input)\r\n\r\n # Before feeding the decoder, the encoded data must be repeated as many times as time steps in the input data,\r\n # but the decoder does not know beforehand how many timesteps are fed into the autoencoder.\r\n # Check https://github.com/keras-team/keras/issues/7949 for the solution to this. Basically we take it\r\n # dynamically from the input shape with a Lambda layer for the repeat vector.\r\n # The input shape may vary per sample.\r\n\r\n decoder_input = Lambda(repeat_vector, output_shape=(None, self._latent_space))([encoder_output, encoder_input])\r\n\r\n decoder_output = LSTM(self._input_cells, return_sequences=True)(decoder_input)\r\n\r\n self._autoencoder = Model(encoder_input, decoder_output)\r\n self._encoder = Model(encoder_input, encoder_output)\r\n\r\n self._autoencoder.compile(optimizer=\"Adam\", loss=\"mse\", metrics=[\"accuracy\"])", "def autoencoder(input_dims, filters, latent_dims):\n e_inputs = keras.Input(input_dims)\n d_inputs = keras.Input(latent_dims)\n\n encoder = e_inputs\n for f in filters:\n encoder = keras.layers.Conv2D(\n f, (3, 3), activation='relu', padding='same')(encoder)\n encoder = keras.layers.MaxPooling2D((2, 2), padding='same')(encoder)\n\n decoder = d_inputs\n for i in reversed(range(1, len(filters))):\n decoder = keras.layers.Conv2D(\n filters[i], (3, 3), activation='relu', padding='same')(decoder)\n decoder = keras.layers.UpSampling2D((2, 2))(decoder)\n\n decoder = keras.layers.Conv2D(\n filters[0], (3, 3), activation='relu', padding='valid')(decoder)\n decoder = keras.layers.UpSampling2D((2, 2))(decoder)\n decoder = keras.layers.Conv2D(input_dims[-1], (3, 3),\n activation='sigmoid',\n padding='same')(decoder)\n\n encoder = keras.Model(e_inputs, encoder)\n decoder = keras.Model(d_inputs, decoder)\n\n auto = keras.Model(e_inputs, decoder(encoder(e_inputs)))\n auto.compile(optimizer=\"adam\", loss=\"binary_crossentropy\")\n\n return encoder, decoder, auto", "def train_autoencoder(sett, tr, ts, data_axis=0, make_predictions=False, dataset=\"CF\"):\n if data_axis == 0:\n train_size = sett[\"nusers\"]\n feature_size = sett[\"nitems\"]\n else:\n train_size = sett[\"nitems\"]\n feature_size = sett[\"nusers\"]\n\n tr, m_tr = prepare_data(tr, data_axis)\n ts, m_ts = prepare_data(ts, data_axis)\n if (data_axis == 1):\n tr = tr.T\n m_tr = m_tr.T\n ts = ts.T\n m_ts = m_ts.T\n print(\"%s Prepared data. Data axis is %d\" % (util.get_time(), data_axis))\n\n\n batch = tf.Variable(0) # global step of the optimizer\n # Decay once per epoch, using an exponential schedule starting at 0.01.\n learning_rate = tf.train.exponential_decay(\n sett[\"learning_rate\"], # Base learning rate.\n batch * sett[\"batch_size\"], # Current index into the dataset.\n train_size, # Decay step.\n sett[\"learning_rate_decay\"], # Decay rate.\n staircase=True)\n tf.summary.scalar('learning_rate', learning_rate, collections=[\"autoencoder\"])\n optimizer = tf.train.AdamOptimizer(learning_rate)\n\n model = DenoisingAutoencoder(name=\"autoencoder\",\n n_input=feature_size,\n n_hidden=sett[\"hidden_size\"],\n dropout_prob=sett[\"dropout_prob\"],\n gaussian_prob=sett[\"gaussian_prob\"],\n gaussian_std=sett[\"gaussian_std\"],\n sap_prob=sett[\"sap_prob\"],\n alpha_weight=sett[\"alpha\"],\n beta_weight=sett[\"beta\"],\n regl_weight=sett[\"regularization\"],\n optimizer=optimizer,\n rseed=381328,\n batch=batch)\n model.init_saver([batch], os.path.join(sett[\"log_dir\"], \"model.ckpt\"))\n\n batch_size = sett[\"batch_size\"]\n train_indices = range(train_size)\n\n with tf.Session() as s:\n init = tf.global_variables_initializer()\n s.run(init)\n summary_writer = tf.summary.FileWriter(sett[\"log_dir\"], graph=s.graph)\n\n for epoch in range(sett[\"num_epochs\"]):\n print(\"%s Epoch %d\" % (util.get_time(), epoch))\n # Randomize order of data samples at each epoch\n perm_indices = np.random.permutation(train_indices)\n # Index of data sample in this epoch\n run_index = 0\n\n for ibatch in range(train_size // batch_size):\n data_offset = (ibatch * batch_size) % (train_size - batch_size)\n batch_indices = perm_indices[data_offset:(data_offset+batch_size)]\n # Data for this batch\n batch_X = tr[batch_indices,:]\n batch_missing = m_tr[batch_indices,:]\n\n run_index += batch_size\n\n if run_index % sett[\"report_every\"] == 0:\n # print update and save summary for tensorboard\n cost, trerr, tserr, summary = model.fit_summary(s, tr, m_tr, ts, m_ts)\n\n print(\"%s step %d -- loss=%f -- train error=%f -- test error=%f\" %\n (util.get_time(), run_index, cost, trerr, tserr))\n\n summary_writer.add_summary(summary, epoch*train_size + run_index)\n summary_writer.flush()\n sys.stdout.flush()\n else:\n # Perform training\n cost = model.fit(s, batch_X, batch_missing)\n\n # Make predictions and write them to file.\n if make_predictions:\n print(\"%s Making final predictions\" % (util.get_time()))\n preds = model.predictions(s, tr)\n ts_pred = untransform_data(preds, m_ts)\n tr_pred = untransform_data(preds, m_tr)\n if data_axis == 1:\n ts_pred = ts_pred.T\n m_ts = m_ts.T\n tr_pred = tr_pred.T\n m_tr = m_tr.T\n util.write_predict(lambda u, i: ts_pred[u, i], np.invert(m_ts), sett[\"prediction_file\"] + \"_test.csv\")\n util.write_predict(lambda u, i: tr_pred[u, i], np.invert(m_tr), sett[\"prediction_file\"] + \"_train.csv\")\n print(\"%s Predictions written to %s\" % (util.get_time(), sett[\"prediction_file\"]))\n\n return (cost, trerr, tserr)", "def RunAutoEncoder(net, criterion, optimizer, lr_scheduler, train_dl, train_len, test_dl, test_len, N_EPOCHS, outputPath, SAVE_FILE,\\\n DO_PROJ_middle, run_model, criterion_classification, LOSS_LAMBDA, feature_name, TYPE_PROJ, ETA, ETA_STAR=100, AXIS=0 ):\n device = torch.device(\"cuda:0\" if torch.cuda.is_available() else \"cpu\")\n epoch_loss, epoch_acc, epoch_reconstruction, epoch_classification, train_time = [], [], [], [], []\n epoch_val_loss, epoch_val_acc, epoch_val_reconstruction, epoch_val_classification = [], [], [], []\n best_test = 0 \n for e in range(N_EPOCHS):\n t1 = time.perf_counter()\n\n running_loss, running_accuracy = 0, 0 \n running_classification , running_reconstruction = 0,0\n net.train()\n for i,batch in enumerate(tqdm(train_dl)):\n x = batch[0]\n labels = batch[1]\n \n if torch.cuda.is_available():\n x = x.cuda()\n labels = labels.cuda() \n \n encoder_out, decoder_out = net(x)\n \n # Compute the loss \n loss_classification = criterion_classification(encoder_out,labels.long())\n if type(criterion) == torch.nn.modules.loss.KLDivLoss:\n loss_reconstruction = LOSS_LAMBDA * criterion(x.log(), decoder_out)\n else:\n loss_reconstruction = LOSS_LAMBDA * criterion(decoder_out, x)\n loss = loss_classification + loss_reconstruction\n \n optimizer.zero_grad()\n loss.backward()\n \n # Set the gradient as 0\n if run_model =='MaskGrad':\n for index,param in enumerate(list(net.parameters())):\n if index<len(list(net.parameters()))/2-2 and index%2==0:\n param.grad[ DO_PROJ_middle[int(index/2)] ] =0 \n optimizer.step() \n \n with torch.no_grad():\n running_loss += loss.item()\n running_reconstruction += loss_reconstruction.item()\n running_classification += loss_classification.item()\n running_accuracy += (encoder_out.max(1)[1] == labels).sum().item() \n \n if e == N_EPOCHS-1 :\n# labels = encoder_out.max(1)[1].float()\n if i == 0:\n data_decoded = torch.cat((decoder_out,labels.view(-1,1)), dim = 1)\n data_encoder = torch.cat((encoder_out,labels.view(-1,1)), dim = 1)\n else:\n tmp1 = torch.cat((decoder_out,labels.view(-1,1)), dim = 1)\n data_decoded = torch.cat((data_decoded,tmp1),dim= 0)\n \n tmp2 = torch.cat((encoder_out,labels.view(-1,1)), dim = 1)\n data_encoder = torch.cat((data_encoder,tmp2 ),dim= 0)\n\n t2 = time.perf_counter()\n train_time.append(t2-t1)\n print(\"Total loss:\", running_loss / float(train_len ),'loss_reconstruction: ', running_reconstruction/ train_len ,\\\n 'loss_classification: ',running_classification/ train_len ) \n epoch_loss.append(running_loss / train_len )\n epoch_reconstruction.append( running_reconstruction / train_len )\n epoch_classification.append( running_classification / train_len )\n epoch_acc.append(running_accuracy / train_len)\n \n \n # Do projection at last epoch (GRADIENT_MASK)\n if run_model=='ProjectionLastEpoch' and e==(N_EPOCHS-1):\n net_parameters = list(net.parameters())\n for index,param in enumerate(net_parameters):\n if DO_PROJ_middle == False and \\\n index!= len(net_parameters)/2-2: # Do no projection at middle layer\n param.data = Projection(param.data, TYPE_PROJ, ETA, ETA_STAR, AXIS, device).to(device)\n \n #testing our model\n running_loss, running_accuracy = 0, 0 \n running_classification , running_reconstruction = 0,0\n net.eval()\n \n for i,batch in enumerate(tqdm(test_dl)):\n with torch.no_grad():\n x = batch[0]\n labels = batch[1]\n if torch.cuda.is_available():\n x = x.cuda()\n labels = labels.cuda()\n encoder_out, decoder_out = net(x)\n \n # Compute the loss \n loss_classification = criterion_classification(encoder_out,labels.long())\n if type(criterion) == torch.nn.modules.loss.KLDivLoss:\n loss_reconstruction = LOSS_LAMBDA * criterion(x.log(), decoder_out)\n else:\n loss_reconstruction = LOSS_LAMBDA * criterion(decoder_out, x)\n loss = loss_classification + loss_reconstruction\n running_loss += loss.item()\n running_reconstruction += loss_reconstruction.item()\n running_classification += loss_classification.item()\n running_accuracy += (encoder_out.max(1)[1] == labels).sum().item() \n print(\"test accuracy : \", running_accuracy / test_len, \"Total loss:\", running_loss / float(test_len ),'loss_reconstruction: ', running_reconstruction/ test_len ,\\\n 'loss_classification: ',running_classification/ test_len )\n if running_accuracy > best_test :\n best_net_it = e\n best_test = running_accuracy\n torch.save(net.state_dict(), str(outputPath)+\"/best_net\")\n epoch_val_loss.append(running_loss / test_len )\n epoch_val_reconstruction.append( running_reconstruction / test_len )\n epoch_val_classification.append( running_classification / test_len )\n epoch_val_acc.append(running_accuracy / test_len) \n \n print('Epoch du best net = ', best_net_it) \n if SAVE_FILE and str(run_model)!= 'ProjectionLastEpoch':\n # Save encoder data\n Lung_encoder = data_encoder.cpu().detach().numpy()\n colunms = [x for x in range(Lung_encoder.shape[1]-1)] +['label']\n res =pd.DataFrame(Lung_encoder,columns= colunms)\n #res.to_csv('{}encoder_tiro_{}.csv'.format(outputPath, str(run_model)),sep=';')\n # Save decoder data\n Lung_decoded = data_decoded.cpu().detach().numpy()\n Label = ['Label']+list(Lung_decoded[:,-1].astype(int)+1)\n Name = ['Name'] + [x+2 for x in range(train_len)]\n Label = np.vstack( (np.array(Name),np.array(Label)) )\n Lung = np.delete(Lung_decoded, -1, axis =1 )\n Lung = np.hstack( (feature_name.reshape(-1,1), Lung.T) )\n Lung = np.vstack((Label, Lung))\n res = pd.DataFrame(Lung)\n #res.to_csv('{}decoded_{}.csv'.format(outputPath, str(run_model)),sep=';',index=0, header=0) \n print(\"-----------------------\")\n print(\"Saved file to \",str(outputPath))\n print(\"-----------------------\")\n #Plot \n if str(run_model)!= 'ProjectionLastEpoch':\n #plt.figure()\n #plt.plot( epoch_acc )\n #plt.plot( epoch_val_acc )\n #plt.title('Total accuracy classification')\n #plt.show()\n print('{} epochs trained for {}s , {} s/epoch'.format(N_EPOCHS, sum(train_time), np.mean(train_time)))\n return data_encoder, data_decoded, epoch_loss , best_test, net", "def update_networks(self):\n\t\t# layer 1 update\n\t\tself.W1_tv = tf.assign(self.W1_tv, self.W1_av)\n\t\tself.b1_tv = tf.assign(self.b1_tv, self.b1_av)\n\n\t\t# layer 2 update\n\t\tself.W2_tv = tf.assign(self.W2_tv, self.W2_av)\n\t\tself.b2_tv = tf.assign(self.b2_tv, self.b2_av)\n\n\t\t# layer 3 update\n\t\tself.W3_tv = tf.assign(self.W3_tv, self.W3_av)\n\t\tself.b3_tv = tf.assign(self.b3_tv, self.b3_av)", "def __init__(self, attribute_size, output_size, n_hidden_layers=2, n_hidden_neurons=30):\n self.n_hidden_layers = n_hidden_layers\n self.n_hidden_neurons = n_hidden_neurons\n self.attribute_size = attribute_size\n self.output_size = output_size\n\n X = T.fmatrix()\n Y = T.fmatrix()\n\n self.w_h = nnet.init_weights((self.attribute_size, self.n_hidden_neurons))\n self.w_h2 = nnet.init_weights((self.n_hidden_neurons, self.n_hidden_neurons))\n self.w_o = nnet.init_weights((self.n_hidden_neurons, self.output_size))\n\n if self.n_hidden_layers == 2:\n\n noise_py_x = nnet.model_reg(X, self.w_h, self.w_h2, self.w_o, 0, 0)\n py_x = nnet.model_reg(X, self.w_h, self.w_h2, self.w_o, 0, 0)\n\n cost = nnet.rmse(noise_py_x, Y)\n params = [self.w_h, self.w_h2, self.w_o]\n updates = nnet.RMSprop(cost, params, lr=0.001)\n\n self.train = theano.function(inputs=[X, Y], outputs=cost, updates=updates, allow_input_downcast=True)\n self.predict_ = theano.function(inputs=[X], outputs=py_x, allow_input_downcast=True)\n\n elif self.n_hidden_layers == 3:\n\n self.w_h3 = nnet.init_weights((self.n_hidden_neurons, self.n_hidden_neurons))\n\n noise_py_x = nnet.model_reg3(X, self.w_h, self.w_h2, self.w_h3, self.w_o, 0, 0)\n py_x = nnet.model_reg3(X, self.w_h, self.w_h2, self.w_h3, self.w_o, 0, 0)\n\n cost = nnet.rmse(noise_py_x, Y)\n params = [self.w_h, self.w_h2, self.w_h3, self.w_o]\n updates = nnet.RMSprop(cost, params, lr=0.001)\n\n self.train = theano.function(inputs=[X, Y], outputs=cost, updates=updates, allow_input_downcast=True)\n self.predict_ = theano.function(inputs=[X], outputs=py_x, allow_input_downcast=True)", "def build_encoder(img_shape):\n input_img = Input(shape=(img_shape)) \n x = Conv2D(16, (3, 3), activation='tanh', padding='same')(input_img)\n x = AveragePooling2D((2, 2), padding='same')(x)\n x = Conv2D(8, (3, 3), padding='same')(x)\n x = LeakyReLU()(x)\n x = AveragePooling2D((2, 2), padding='same')(x)\n x = Conv2D(8, (3, 3), padding='same')(x)\n x = LeakyReLU()(x)\n x = Dropout(0.5)(x)\n x = AveragePooling2D((2, 2), padding='same')(x)\n x = Conv2D(1, (3, 3), padding='same')(x)\n x = LeakyReLU()(x)\n x = AveragePooling2D((3, 3), padding='same')(x)\n x = Flatten()(x)\n encoded = Dense(540, activation='tanh')(x)\n Encoder=Model(input_img,encoded,name='encoder')\n return input_img,encoded,Encoder", "def __init__(self, momentum: float = .5):\n super(VanillaEncoder, self).__init__()\n self.conv1 = PointNetConv2Layer(64, momentum)\n self.conv2 = PointNetConv2Layer(64, momentum)\n self.conv3 = PointNetConv2Layer(64, momentum)\n self.conv4 = PointNetConv2Layer(128, momentum)\n self.conv5 = PointNetConv2Layer(1024, momentum)", "def run_autoencoder1(experiment,\n X_train, y_train, X_valid, y_valid, X_test, y_test,\n model_path, code_size=1000):\n\n # Hyperparameters\n learning_rate = 0.0001\n sparse = True # Add sparsity penalty\n sparse_p = 0.2\n sparse_coeff = 0.5\n corruption = 0.75 # Data corruption ratio for denoising\n ae_enc = tf.nn.tanh # Tangent hyperbolic\n ae_dec = None # Linear activation\n\n training_iters = 100\n batch_size = 64\n n_classes = 2\n\n if os.path.isfile(model_path) or \\\n os.path.isfile(model_path + \".meta\"):\n return\n\n # Create model and add sparsity penalty (if requested)\n model = ae(X_train.shape[1], code_size, corruption=corruption, enc=ae_enc, dec=ae_dec)\n if sparse:\n model[\"cost\"] += sparsity_penalty(model[\"encode\"], sparse_p, sparse_coeff)\n\n # Use GD for optimization of model cost\n optimizer = tf.train.AdamOptimizer(learning_rate, beta1=0.8).minimize(model[\"cost\"])\n\n # Initialize Tensorflow session\n init = tf.global_variables_initializer()\n with tf.Session() as sess:\n sess.run(init)\n\n # Define model saver\n saver = tf.train.Saver(model[\"params\"], write_version=tf.train.SaverDef.V2)\n\n # Initialize with an absurd cost for model selection\n prev_costs = np.array([9999999999] * 3)\n\n for epoch in range(training_iters):\n\n # randomly shuffle data\n index = np.arange(X_train.shape[0])\n random.shuffle(index)\n\n X_train = X_train[index,]\n y_train = y_train[index]\n\n # Break training set into batches\n batches = range(len(X_train) // batch_size)\n costs = np.zeros((len(batches), 3))\n\n for ib in batches:\n # Compute start and end of batch from training set data array\n from_i = ib * batch_size\n to_i = (ib + 1) * batch_size\n\n # Select current batch\n batch_xs, batch_ys = X_train[from_i:to_i], y_train[from_i:to_i]\n\n # Run optimization and retrieve training cost\n _, cost_train = sess.run(\n [optimizer, model[\"cost\"]],\n feed_dict={\n model[\"input\"]: batch_xs\n }\n )\n\n # Compute validation cost\n cost_valid = sess.run(\n model[\"cost\"],\n feed_dict={\n model[\"input\"]: X_valid\n }\n )\n\n # Compute test cost\n cost_test = sess.run(\n model[\"cost\"],\n feed_dict={\n model[\"input\"]: X_test\n }\n )\n\n costs[ib] = [cost_train, cost_valid, cost_test]\n\n # Compute the average costs from all batches\n costs = costs.mean(axis=0)\n cost_train, cost_valid, cost_test = costs\n\n # Pretty print training info\n print(format_config(\n \"Exp={experiment}, Model=ae1, Iter={epoch:5d}, Cost={cost_train:.6f} {cost_valid:.6f} {cost_test:.6f}\",\n {\n \"experiment\": experiment,\n \"epoch\": epoch,\n \"cost_train\": cost_train,\n \"cost_valid\": cost_valid,\n \"cost_test\": cost_test,\n }\n ))\n\n # Save better model if optimization achieves a lower cost\n if cost_valid < prev_costs[1]:\n print(\"Saving better model\")\n saver.save(sess, model_path)\n prev_costs = costs\n else:\n print", "def _build_encoder(self, hparams, is_training):\n with tf.variable_scope(\"trajectory_encoder\"):\n with tf.name_scope(\"source_placeholder\"):\n input_phs = list_ops.list_placeholder(self.num_gpu, (None, self.input_length, self.input_dims), tf.float32)\n for ph in input_phs:\n tf.add_to_collection('placeholder', ph)\n \n if hparams.encoder_type == \"rnn\":\n net = input_phs\n with tf.variable_scope(\"projection\"):\n net = self._build_input_projection(hparams, net, is_training)\n\n with tf.name_scope(\"batch_time_transpose\"):\n net = list_ops.list_transpose(net, perm=[1, 0, 2])\n\n with tf.variable_scope(\"rnn\"):\n net, state = self._build_rnn_encoder(hparams, net, is_training)\n\n if hparams.relu_reconfiguration:\n with tf.variable_scope(\"reconfiguration\"):\n net = list_ops.list_dense_with_bn(net,\n hparams.cnn_input_projector_filters[-1],\n is_training,\n self.bn_decay,\n seed=self.random_seed)\n\n elif hparams.encoder_type == \"cnn\":\n net = self._build_cnn_encoder(hparams, input_phs, is_training)\n state = None\n \n else:\n raise ValueError(\"Unknown encoder type {:s}.\".format(hparams.encoder_type))\n\n return net, state", "def RunAutoEncoder_unsupervised(net, criterion, optimizer, lr_scheduler, train_dl, train_len, N_EPOCHS, outputPath, SAVE_FILE,\\\n DO_PROJ_middle, run_model, criterion_classification, LOSS_LAMBDA , feature_name, TYPE_PROJ, ETA, ETA_STAR=100, AXIS=0 ):\n \n device = torch.device(\"cuda:0\" if torch.cuda.is_available() else \"cpu\")\n epoch_loss, epoch_acc, epoch_reconstruction, epoch_classification, train_time = [], [], [], [], []\n epoch_val_loss, epoch_val_acc, epoch_val_reconstruction, epoch_val_classification = [], [], [], [] \n for e in range(N_EPOCHS):\n t1 = time.perf_counter()\n print('EPOCH:',e)\n running_loss, running_accuracy = 0, 0 \n running_classification , running_reconstruction = 0,0\n net.train()\n \n for i,batch in enumerate(tqdm(train_dl)):\n x = batch[0]\n labels = batch[1]\n \n if torch.cuda.is_available():\n x = x.cuda()\n labels = labels.cuda() \n \n encoder_out, decoder_out = net(x)\n \n # Compute the loss \n loss_classification = criterion_classification(encoder_out,labels.long())\n \n if type(criterion) == torch.nn.modules.loss.KLDivLoss:\n loss_reconstruction = LOSS_LAMBDA * criterion(x.log(), decoder_out)\n \n else:\n loss_reconstruction = LOSS_LAMBDA * criterion(decoder_out, x)\n loss = loss_classification + loss_reconstruction\n \n optimizer.zero_grad()\n loss.backward()\n \n # Set the gradient as 0\n if run_model =='MaskGrad':\n for index,param in enumerate(list(net.parameters())):\n if index<len(list(net.parameters()))/2-2 and index%2==0:\n param.grad[ DO_PROJ_middle[int(index/2)] ] =0 \n optimizer.step() \n \n with torch.no_grad():\n running_loss += loss.item()\n running_reconstruction += loss_reconstruction.item()\n running_classification += loss_classification.item()\n running_accuracy += (encoder_out.max(1)[1] == labels).sum().item() \n \n if e == N_EPOCHS-1 :\n\n if i == 0:\n data_decoded = torch.cat((decoder_out,labels.view(-1,1)), dim = 1)\n data_encoder = torch.cat((encoder_out,labels.view(-1,1)), dim = 1)\n else:\n tmp1 = torch.cat((decoder_out,labels.view(-1,1)), dim = 1)\n data_decoded = torch.cat((data_decoded,tmp1),dim= 0)\n \n tmp2 = torch.cat((encoder_out,labels.view(-1,1)), dim = 1)\n data_encoder = torch.cat((data_encoder,tmp2 ),dim= 0)\n\n t2 = time.perf_counter()\n train_time.append(t2-t1)\n print(\"Total loss:\", running_loss / float(train_len ),'loss_reconstruction: ', running_reconstruction/ train_len ,\\\n 'loss_classification: ',running_classification/ train_len ) \n epoch_loss.append(running_loss / train_len )\n epoch_reconstruction.append( running_reconstruction / train_len )\n epoch_classification.append( running_classification / train_len )\n epoch_acc.append(running_accuracy / train_len)\n \n \n # Do projection at last epoch (GRADIENT_MASK)\n if run_model=='ProjectionLastEpoch' and e==(N_EPOCHS-1):\n net_parameters = list(net.parameters())\n for index,param in enumerate(net_parameters):\n if DO_PROJ_middle == False and \\\n index!= len(net_parameters)/2-2: # Do no projection at middle layer\n param.data = Projection(param.data).to(device)\n \n\n \n\n if SAVE_FILE and str(run_model)!= 'ProjectionLastEpoch':\n # Save encoder data\n Lung_encoder = data_encoder.cpu().detach().numpy()\n colunms = [x for x in range(Lung_encoder.shape[1]-1)] +['label']\n res =pd.DataFrame(Lung_encoder,columns= colunms)\n res.to_csv('{}encoder_tiro_{}.csv'.format(outputPath, str(run_model)),sep=';')\n # Save decoder data\n Lung_decoded = data_decoded.cpu().detach().numpy()\n Label = ['Label']+list(Lung_decoded[:,-1].astype(int)+1)\n Name = ['Name'] + [x+2 for x in range(train_len)]\n Label = np.vstack( (np.array(Name),np.array(Label)) )\n Lung = np.delete(Lung_decoded, -1, axis =1 )\n Lung = np.hstack( (feature_name.reshape(-1,1), Lung.T) )\n Lung = np.vstack((Label, Lung))\n res = pd.DataFrame(Lung)\n res.to_csv('{}decoded_{}.csv'.format(outputPath, str(run_model)),sep=';',index=0, header=0) \n print(\"-----------------------\")\n print(\"Saved file to \",str(outputPath))\n print(\"-----------------------\")\n #Plot \n if str(run_model)!= 'ProjectionLastEpoch':\n plt.figure()\n plt.plot( epoch_loss )\n plt.plot( epoch_val_loss )\n plt.title('Total Loss ')\n plt.figure()\n plt.plot( epoch_reconstruction, label ='λ*reconstruction' )\n plt.plot( epoch_classification, label = 'classification' )\n plt.plot( epoch_val_reconstruction, label ='λ*reconstruction test' )\n plt.plot( epoch_val_classification, label = 'classification test' )\n plt.legend()\n plt.title('Loss ')\n plt.figure()\n plt.plot( epoch_acc )\n plt.plot( epoch_val_acc )\n plt.title('Total accuracy classification')\n print('{} epochs trained for {}s , {} s/epoch'.format(N_EPOCHS, sum(train_time), np.mean(train_time)))\n return data_encoder, data_decoded, epoch_loss , net, sum(epoch_acc)/N_EPOCHS", "def arch(self, nn) -> 'final node of the tensor flow graph (y_conv)':\n\n print(self)\n\n # first conv. layer \n # 5x5 filter, 1 input channel, 32 output channels\n W_conv1 = nn.weight_variable([5, 5, 1, 32])\n b_conv1 = nn.bias_variable([32])\n stride1 = 1\n h_conv1 = tf.nn.relu(nn.conv2d(nn.x_image, W_conv1, stride1, 'VALID') + b_conv1) \n # outputs a 24x24x32 image\n \n # first pooling layer (2x2) \n h_pool1 = nn.max_pool_2x2(h_conv1) \n # outputs a 12x12x32 image\n\n # second conv. layer \n # 3x3 filter, 32 input channel, 32 output channels\n W_conv2 = nn.weight_variable([3, 3, 32, 32])\n b_conv2 = nn.bias_variable([32])\n stride2 = 1\n h_conv2 = tf.nn.relu(nn.conv2d(h_pool1, W_conv2, stride2, 'VALID') + b_conv2)\n # outputs a 10x10x32 image\n\n # third conv. layer\n # 3x3 filter, 32 input channel, 32 output channels\n W_conv3 = nn.weight_variable([3, 3, 32, 32])\n b_conv3 = nn.bias_variable([32])\n stride3 = 1\n h_conv3 = tf.nn.relu(nn.conv2d(h_conv2, W_conv3, stride3, 'VALID') + b_conv3)\n # outputs a 8x8x32 image\n\n # reshape (flatten) output\n h_conv3_flat = tf.reshape(h_conv3, [-1, 8*8*32])\n\n # first fully connected layer\n W_fc1 = nn.weight_variable([8 * 8 * 32, 1024])\n b_fc1 = nn.bias_variable([1024])\n h_fc1 = tf.nn.relu(tf.matmul(h_conv3_flat, W_fc1) + b_fc1)\n\n # dropout\n h_fc1_drop = tf.nn.dropout(h_fc1, nn.keep_prob)\n\n # second (final) fully connected layer (softmax)\n W_fc2 = nn.weight_variable([1024, 10])\n b_fc2 = nn.bias_variable([10])\n y_conv=tf.nn.softmax(tf.matmul(h_fc1_drop, W_fc2) + b_fc2)\n\n return y_conv", "def build_encoder_bi(tparams, options):\n\t# word embedding (source)\n\tembedding = tensor.tensor3('embedding', dtype='float32')\n\tembeddingr = embedding[::-1]\n\tx_mask = tensor.matrix('x_mask', dtype='float32')\n\txr_mask = x_mask[::-1]\n\n\t# encoder\n\tproj = get_layer(options['encoder'])[1](tparams, embedding, options,\n\t\t\t\t\t\t\t\t\t\t\tprefix='encoder',\n\t\t\t\t\t\t\t\t\t\t\tmask=x_mask)\n\tprojr = get_layer(options['encoder'])[1](tparams, embeddingr, options,\n\t\t\t\t\t\t\t\t\t\t\t prefix='encoder_r',\n\t\t\t\t\t\t\t\t\t\t\t mask=xr_mask)\n\n\tctx = tensor.concatenate([proj[0][-1], projr[0][-1]], axis=1)\n\n\treturn embedding, x_mask, ctx", "def __init__(self, input_dim, hidden_dim_1, hidden_dim_2, num_nodes):\n super(VariationalGraphAutoEncoder, self).__init__()\n\n # Define the graph convolutional layers\n self.conv_shared = DenseSAGEConv(in_feats=input_dim, out_feats=hidden_dim_1)\n self.conv_mean = DenseSAGEConv(in_feats=hidden_dim_1, out_feats=hidden_dim_2)\n self.conv_log_std = DenseSAGEConv(in_feats=hidden_dim_1, out_feats=hidden_dim_2)\n self.conv_non_prob = DenseSAGEConv(in_feats=hidden_dim_1, out_feats=hidden_dim_2)\n\n # The output activation function\n self.output_func = nn.Sigmoid()\n\n # Drop out layers\n self.conv_dropout_1 = nn.Dropout(p=0.3)\n\n # Other attributes\n self.num_nodes = num_nodes\n self.hidden_dim_2 = hidden_dim_2\n self.h_mean = None\n self.h_log_std = None\n\n self.z = None", "def network_modified(input):\n\n up6 = upsample_and_concat( conv5, conv4, 256, 512 , 'up_conv1' )\n conv6=slim.conv2d(up6, 256,[3,3], rate=1, activation_fn=lrelu,scope='g_conv6_1')\n conv6=slim.conv2d(conv6,256,[3,3], rate=1, activation_fn=lrelu,scope='g_conv6_2')\n\n up7 = upsample_and_concat( conv6, conv3, 128, 256 , 'up_conv2' )\n conv7=slim.conv2d(up7, 128,[3,3], rate=1, activation_fn=lrelu,scope='g_conv7_1')\n conv7=slim.conv2d(conv7,128,[3,3], rate=1, activation_fn=lrelu,scope='g_conv7_2')\n\n up8 = upsample_and_concat( conv7, conv2, 64, 128 , 'up_conv3')\n conv8=slim.conv2d(up8, 64,[3,3], rate=1, activation_fn=lrelu,scope='g_conv8_1')\n conv8=slim.conv2d(conv8,64,[3,3], rate=1, activation_fn=lrelu,scope='g_conv8_2')\n\n up9 = upsample_and_concat( conv8, conv1, 32, 64 , 'up_conv4')\n conv9=slim.conv2d(up9, 32,[3,3], rate=1, activation_fn=lrelu,scope='g_conv9_1')\n conv9=slim.conv2d(conv9,32,[3,3], rate=1, activation_fn=lrelu,scope='g_conv9_2')\n\n conv10=slim.conv2d(conv9,12,[1,1], rate=1, activation_fn=None, scope='g_conv10')\n out = tf.depth_to_space(conv10,2)\n return out", "def __init__(self, n_inpt, n_hidden, hidden_transfer='sigmoid',\n out_transfer='identity', reconstruct_loss='squared',\n c_jacobian=1, tied_weights=True, batch_size=None,\n optimizer='lbfgs', max_iter=1000, verbose=False):\n super(ContractiveAutoEncoder, self).__init__(\n n_inpt, n_hidden, hidden_transfer, out_transfer,\n reconstruct_loss, c_jacobian,\n tied_weights)\n self.batch_size = batch_size\n self.optimizer = optimizer\n self.f_transform = None\n self.f_reconstruct = None\n self.parameters.data[:] = np.random.standard_normal(\n self.parameters.data.shape).astype(theano.config.floatX)\n self.max_iter = max_iter\n self.verbose = verbose", "def __init__(self, rng, input, filter_shape, image_shape, poolsize=(2, 2), non_linear=\"tanh\"):\n\n assert image_shape[1] == filter_shape[1]\n self.input = input\n self.filter_shape = filter_shape\n self.image_shape = image_shape\n self.poolsize = poolsize\n self.non_linear = non_linear\n # there are \"num input feature maps * filter height * filter width\"\n # inputs to each hidden unit\n fan_in = np.prod(filter_shape[1:])\n # each unit in the lower layer receives a gradient from:\n # \"num output feature maps * filter height * filter width\" /\n # pooling size\n fan_out = (filter_shape[0] * np.prod(filter_shape[2:]) /np.prod(poolsize))\n # initialize weights with random weights\n if self.non_linear == \"none\" or self.non_linear == \"relu\":\n self.W = theano.shared(np.asarray(rng.uniform(low=-0.01,high=0.01,size=filter_shape), \n dtype=theano.config.floatX),borrow=True,name=\"W_conv\")\n # self.W = T.as_tensor_variable(np.asarray(rng.uniform(low=-0.01,high=0.01,size=filter_shape), \n # dtype=theano.config.floatX),name=\"W_conv\")\n else:\n W_bound = np.sqrt(6. / (fan_in + fan_out))\n self.W = theano.shared(np.asarray(rng.uniform(low=-W_bound, high=W_bound, size=filter_shape),\n dtype = theano.config.floatX),borrow=True,name=\"W_conv\") \n # self.W = T.as_tensor_variable(np.asarray(rng.uniform(low=-W_bound, high=W_bound, size=filter_shape),\n # dtype = theano.config.floatX),name=\"W_conv\") \n b_values = np.zeros((filter_shape[0],), dtype=theano.config.floatX)\n self.b = theano.shared(value=b_values, borrow=True, name=\"b_conv\")\n # self.b = T.as_tensor_variable(b_values, name=\"b_conv\")\n \n # convolve input feature maps with filters\n conv_out = conv.conv2d(input=input, filters=self.W,filter_shape=self.filter_shape, image_shape=self.image_shape)\n if self.non_linear == \"tanh\":\n conv_out_tanh = T.tanh(conv_out + self.b.dimshuffle('x', 0, 'x', 'x'))\n self.output = downsample.max_pool_2d(input=conv_out_tanh, ds=self.poolsize, ignore_border=True)\n elif self.non_linear == \"relu\":\n conv_out_tanh = ReLU(conv_out + self.b.dimshuffle('x', 0, 'x', 'x'))\n self.output = downsample.max_pool_2d(input=conv_out_tanh, ds=self.poolsize, ignore_border=True)\n else:\n pooled_out = downsample.max_pool_2d(input=conv_out, ds=self.poolsize, ignore_border=True)\n self.output = pooled_out + self.b.dimshuffle('x', 0, 'x', 'x')\n self.params = [self.W, self.b]", "def funcs(dataset, network, batch_size=BATCH_SIZE, learning_rate=LEARNING_RATE, sparsity=0.02, beta=0.01, momentum=MOMENTUM):\n\n # symbolic variables \n X_batch = T.matrix()\n y_batch = T.matrix()\n\n layers = lasagne.layers.get_all_layers(network)\n num_layers = len(layers)\n\n code_layer = layers[num_layers/2]\n activations_2_layer = layers[num_layers/2 - 1]\n activations_1_layer = layers[num_layers/2 - 2]\n\n # code output \n code_output = lasagne.layers.get_output(code_layer, X_batch, deterministic=True)\n\n l = T.sub(1,code_output)\n ll = T.mul(code_output,l)\n L = T.mul(4,ll)\n L = L.mean()\n\n rho_hat = T.mean(code_output,axis=1)\n # L = T.sum(sparsity * T.log(sparsity/rho_hat) + (1 - sparsity) * T.log((1 - sparsity)/(1 - rho_hat)))\n\n # reg = 0.0001*lasagne.regularization.l2(network)\n # this is the cost of the network when fed throught the noisey network\n train_output = lasagne.layers.get_output(network, X_batch)\n cost = lasagne.objectives.mse(train_output, y_batch) \n cost = cost.mean() + beta * L\n\n all_params = lasagne.layers.get_all_params(network)\n updates = lasagne.updates.nesterov_momentum(cost, all_params, learning_rate, momentum)\n\n \n\n # code and activation outputs\n \n activations_1_output = lasagne.layers.get_output(activations_1_layer, X_batch, deterministic=True)\n activations_2_output = lasagne.layers.get_output(activations_2_layer, X_batch, deterministic=True)\n\n train = theano.function(inputs=[X_batch, y_batch], outputs=cost, updates=updates, allow_input_downcast=True)\n code = theano.function(inputs=[X_batch], outputs=code_output, allow_input_downcast=True)\n activations_1 = theano.function(inputs=[X_batch], outputs=activations_1_output, allow_input_downcast=True)\n activations_2 = theano.function(inputs=[X_batch], outputs=activations_2_output, allow_input_downcast=True)\n\n return dict(\n train=train,\n code=code,\n activations_1=activations_1,\n activations_2=activations_2\n )", "def arch(self, nn) -> 'final node of the tensor flow graph (y_conv)':\n\n print(self)\n\n # first conv. layer \n # 5x5 filter, 1 input channel, 32 output channels\n W_conv1 = nn.weight_variable([5, 5, 1, 32])\n b_conv1 = nn.bias_variable([32])\n stride1 = 1\n h_conv1 = tf.nn.relu(nn.conv2d(nn.x_image, W_conv1, stride1) + b_conv1)\n \n # first pooling layer (2x2) \n h_pool1 = nn.max_pool_2x2(h_conv1)\n\n # second conv. layer \n # 5x5 filter, 32 input channel, 64 output channels\n W_conv2 = nn.weight_variable([5, 5, 32, 64])\n b_conv2 = nn.bias_variable([64])\n stride2 = 1\n h_conv2 = tf.nn.relu(nn.conv2d(h_pool1, W_conv2, stride2) + b_conv2)\n\n # second pooling layer (2x2) \n h_pool2 = nn.max_pool_2x2(h_conv2)\n\n # reshape (flatten) output\n h_pool2_flat = tf.reshape(h_pool2, [-1, 7*7*64])\n\n # first fully connected layer\n W_fc1 = nn.weight_variable([7 * 7 * 64, 1024])\n b_fc1 = nn.bias_variable([1024])\n h_fc1 = tf.nn.relu(tf.matmul(h_pool2_flat, W_fc1) + b_fc1)\n\n # dropout\n h_fc1_drop = tf.nn.dropout(h_fc1, nn.keep_prob)\n\n # second (final) fully connected layer (softmax)\n W_fc2 = nn.weight_variable([1024, 10])\n b_fc2 = nn.bias_variable([10])\n y_conv=tf.nn.softmax(tf.matmul(h_fc1_drop, W_fc2) + b_fc2)\n\n return y_conv", "def update_network(self, a, batch_size):\n for layer in self.layers:\n layer.weights_update(a, self.alpha, self.l2_lambda, batch_size)\n a = layer.a", "def model(inputs, target_images, is_training):\n # if isinstance(inputs, tuple):\n assert mask_augs >= 0. and mask_augs <= 1., \"mask_augs must be in [0, 1]\"\n if FLAGS.use_td_loss and isinstance(inputs, tuple):\n # print('#'*80)\n # print(inputs)\n assert metric is not None, \"Metric function is None\"\n inputs, augs = inputs\n B = inputs.get_shape().as_list()[0]\n A = augs.get_shape().as_list()[1]\n if mask_augs > 0:\n mask = tf.cast(tf.greater(tf.random.uniform(shape=[B, A], minval=0., maxval=1.), 0.5), augs.dtype) # noqa\n bias = mask * -1\n augs = (augs * mask) + bias # Randomly mask out augs for difficulty and code those dims as -1\n with tf.variable_scope('encoder'): # variable_scope name_scope\n features, block_activities = encoder(inputs, is_training=is_training)\n print(\"Features: \")\n print(features)\n print(\"---\")\n # Global average pool of B 7 7 2048 -> B 2048\n if data_format == 'channels_last':\n outputs = tf.reduce_mean(features, [1, 2])\n else:\n outputs = tf.reduce_mean(features, [2, 3])\n outputs = tf.identity(outputs, 'final_avg_pool')\n print(\"Outputs: \")\n print(outputs)\n print(\"---\")\n # B 2048\n\n h_w = features.get_shape().as_list()[1]\n # print(h_w)\n\n augs = tf.tile(augs[:,None,None,:], tf.constant([1,h_w,h_w,1]))\n print(\"Augs: \")\n print(augs)\n print(\"---\")\n features = tf.concat([features, augs], axis=-1)\n \n with tf.variable_scope('decoder'):\n recon_images = decoder(\n features,\n block_activities,\n is_training=is_training,\n skip=skip)\n print(\"Reconstructed images and target images: \")\n print(recon_images)\n print(target_images)\n print(\"---\")\n with tf.variable_scope('metric'):\n # Squash both recon and target images\n recon_images_squash = tf.tanh(recon_images)\n target_images = (target_images * 2) - 1\n Bt = target_images.get_shape().as_list()[0]\n Br = recon_images_squash.get_shape().as_list()[0]\n if Bt == Br:\n # Attractive + repulsive loss\n pass\n elif Bt * 2 == Br:\n # Attractive-only loss\n target_images = tf.concat([target_images, target_images], 0)\n\n # Differentiable perceptual metric. First reconstruction.\n # both_images = tf.concat([recon_images, target_images], -1) # B H W 6\n all_images = tf.concat([recon_images_squash, target_images], 0) # Stack these in batch dim\n metric_all_images = metric(all_images, is_training=is_training)\n # B = metric_all_images.get_shape().as_list()[0]\n metric_all_images = tf.reshape(metric_all_images, [B, -1])\n metric_hidden_r, metric_hidden_t = tf.split(metric_all_images, 2, 0) # Split these in batch dim\n\n # Prep recon_images for visualization\n # recon_images = tf.clip_by_value(recon_images, clip_value_min=-5, clip_value_max=5)\n # recon_images = (recon_images + 5) / 10\n\n recon_mean, recon_std = tf.nn.moments(recon_images, axes=[1, 2], keep_dims=True)\n recon_images = (recon_images - recon_mean) / recon_std\n recon_images = tf.clip_by_value(recon_images, clip_value_min=-5, clip_value_max=5)\n recon_images = (recon_images + 5) / 10\n # recon_images = recon_images_squash\n if greyscale_viz:\n recon_images = tf.image.rgb_to_grayscale(recon_images)\n recon_images = tf.concat([recon_images, recon_images, recon_images], -1)\n print(\"Embedding output: \")\n print(metric_hidden_t)\n print(\"---\")\n return outputs, recon_images, metric_hidden_r, metric_hidden_t\n\n else:\n # augs = None\n \n with tf.variable_scope('encoder'): # variable_scope name_scope\n features, block_activities = encoder(inputs, is_training)\n \n if data_format == 'channels_last':\n print(\"Features:\")\n print(features)\n outputs = tf.reduce_mean(features, [1, 2])\n else:\n outputs = tf.reduce_mean(features, [2, 3])\n outputs = tf.identity(outputs, 'final_avg_pool')\n \n # filter_trainable_variables(trainable_variables, after_block=5)\n # add_to_collection(trainable_variables, 'trainable_variables_inblock_')\n\n return outputs", "def convolutional_32_encoder(self, x, reuse=True): \n\t\tenc_layers = self.enc_layers\n\t\tregularizer = tf.contrib.layers.l2_regularizer(scale=self.reg_param)\n\t\twith tf.variable_scope('model', reuse=reuse):\n\t\t\twith tf.variable_scope('encoder', reuse=reuse):\n\t\t\t\tconv1 = tf.layers.conv2d(x, 128, 2, strides=(2,2), padding=\"VALID\", activation=tf.nn.relu, kernel_regularizer=regularizer, reuse=reuse, name='conv1')\n\t\t\t\tconv1 = tf.layers.batch_normalization(conv1)\n\n\t\t\t\tconv2 = tf.layers.conv2d(conv1, 256, 2, strides=(2,2), padding=\"VALID\", activation=tf.nn.relu, kernel_regularizer=regularizer, reuse=reuse, name='conv2')\n\t\t\t\tconv2 = tf.layers.batch_normalization(conv2)\n\n\t\t\t\tconv3 = tf.layers.conv2d(conv2, 512, 2, strides=(2,2), padding=\"VALID\", activation=tf.nn.relu, kernel_regularizer=regularizer, reuse=reuse, name='conv3')\n\t\t\t\tconv3 = tf.layers.batch_normalization(conv3)\n\n\t\t\t\tflattened = tf.contrib.layers.flatten(conv3)\n\t\t\t\tz_mean = tf.layers.dense(flattened, enc_layers[-1], activation=None, use_bias=False, kernel_regularizer=regularizer, reuse=reuse, name='fc-final')\n\t\treturn z_mean", "def autoencoder_train(discriminator_loss, generator_loss, reconstruction_loss, global_step):\n # Variables that affect learning rate.\n decay_steps = NUM_ITERATIONS_PER_DECAY\n\n # Decay the learning rate exponentially based on the number of steps.\n lr = tf.train.exponential_decay(\n INITIAL_LEARNING_RATE,\n global_step,\n decay_steps,\n LEARNING_RATE_DECAY_FACTOR,\n staircase=True\n )\n\n tf.scalar_summary('learning_rate', lr)\n\n # Generate moving averages of all losses and associated summaries.\n loss_averages_op = _add_loss_summaries(\n [discriminator_loss, generator_loss, reconstruction_loss]\n )\n # Get total weight decay\n total_weight_loss = tf.add_n(tf.get_collection(\"losses\"), name=\"total_weight_loss\")\n\n # Get losses for each optimizer\n G_loss = generator_loss + total_weight_loss\n R_loss = reconstruction_loss + total_weight_loss\n D_loss = discriminator_loss + total_weight_loss\n\n # separate out the G and D variables\n trainable_vars = tf.trainable_variables()\n D_vars = [var for var in trainable_vars if \"discriminator\" in var.name]\n G_vars = [var for var in trainable_vars if not \"discriminator\" in var.name]\n\n # Compute gradients.\n with tf.control_dependencies([loss_averages_op]):\n # optimizer for Discriminator\n D_opt = tf.train.AdamOptimizer(lr, beta1=.5, name=\"D_optimizer\")\n D_grads = D_opt.compute_gradients(D_loss, D_vars)\n\n # optimizer for Reconstruction and generator\n R_opt = tf.train.AdamOptimizer(lr, name=\"R_optimizer\")\n R_grads = R_opt.compute_gradients(R_loss+G_loss, G_vars)\n\n\n # Apply gradients.\n R_apply_gradient_op = R_opt.apply_gradients(R_grads, global_step=global_step)\n D_apply_gradient_op = D_opt.apply_gradients(D_grads, global_step=global_step)\n\n\n # Add histograms for trainable variables.\n for var in trainable_vars:\n tf.histogram_summary(var.op.name, var)\n\n # Add histograms for gradients for each optimizer\n for grads, name in [(D_grads, '/D_gradients'), (R_grads, '/R_gradients')]:\n for grad, var in grads:\n if grad is not None:\n tf.histogram_summary(var.op.name + name, grad)\n\n # Track the moving averages of the batch norm variables.\n variable_averages = tf.train.ExponentialMovingAverage(\n MOVING_AVERAGE_DECAY, global_step)\n\n # average the batch norm variables\n variables_to_average = list(\n set(\n [v for v in tf.all_variables() if \"_mean\" in v.name or \"_variance\" in v.name]\n )\n )\n variables_averages_op = variable_averages.apply(variables_to_average)\n\n # generate training op for reconstruction\n with tf.control_dependencies([R_apply_gradient_op, variables_averages_op]):\n R_train_op = tf.no_op(name='R_train')\n # generate training op for discriminator\n with tf.control_dependencies([D_apply_gradient_op, variables_averages_op]):\n D_train_op = tf.no_op(name='D_train')\n\n #return G_train_op, R_train_op, D_train_op\n return R_train_op, D_train_op", "def train(self, network, training_examples, iterations, unsupervised=False):\n if unsupervised:\n # For now this means we are training a sparse autoencoder.\n # Therefore, we need to keep a running estimate of the\n # \"sparsity\" of a node, where we try to keep the activation\n # of the node stay close to a small value near 0 known as\n # rho (Greek lower case p) or the 'sparsity parameter',\n # which we will set to 0.05.\n # This forces the network to learn the smallest set of features\n # necessary to accurately build a close estimate of the original\n # input vector\n # In this case, we set the input vector equal to the target vector,\n # and usually set a smaller value for the number of hidden nodes\n # Then perform normal backpropagation, and during that, for each\n # hidden node, also update the rho_estimate, and then update the\n # threshold value\n rho = 0.05\n rho_estimates = [0] * len(network.layers[0].neurons) # set to 0 for each node\n beta = 0.2 # the learning rate for updating the threshold terms\n for iteration_counter in range(iterations):\n random.shuffle(training_examples)\n # for each row of data\n for training_example in training_examples:\n input_vector = training_example[0]\n target_output_vector = training_example[1]\n # prime the network on this row of input data\n # -this will cause output (activation) values to be\n # set for each neuron\n network.forward(input_vector)\n\n # Note: next_layer_deltas is a vector of the single\n # delta values for each node in the next\n # (forward) layer\n next_layer_deltas = []\n next_layer_weights = []\n isOutputLayer = True\n for layer in reversed(network.layers): # iterate backwards\n this_layer_deltas = [] # values from current layer\n this_layer_weights = []\n for j, neuron in enumerate(layer.neurons):\n derivative = neuron.activation_function.derivative\n # The output layer neurons are treated slightly\n # different than the hidden neurons\n if isOutputLayer:\n if neuron.activation_function.name == \"logistic\":\n # derivative simplifies down to just\n # subtracting the target from the\n # hypothesis\n delta = neuron.output - target_output_vector[j]\n else: # Tanh or Linear\n delta = (neuron.output-target_output_vector[j])*derivative(neuron.output)\n else: # for the hidden layer neurons\n # Need to sum the products of the delta of\n # a neuron in the next (forward) layer and the\n # weight associated with the connection between\n # this hidden layer neuron and that neuron.\n # This will basically determine how much this\n # neuron contributed to the error of the neuron\n # it is connected to\n # Note: next_layer_deltas is a vector of the \n # single delta values for each node in the next\n # (forward) layer\n sum_value = 0.0\n for next_delta, weights in zip(next_layer_deltas,\n next_layer_weights):\n sum_value += weights[j] * next_delta\n delta = (derivative(neuron.output) *\n sum_value)\n # now store the delta and the list of weights\n # for this neuron into these storage lists for the\n # whole layer\n this_layer_deltas.append(delta)\n this_layer_weights.append(neuron.weights)\n # Now, compute the gradient (partial deriv of cost\n # func, J, w/ respect to parameter ij) for each\n # weight_ij (parameter_ij) associated with\n # this neuron\n for ij, input_ij in enumerate(neuron.input_vector):\n # compute gradient (partial deriv of cost J w/\n # respect to parameter ij)\n # Note: index ij means from a previous\n # layer node i to this layer node j\n # Then Gradient Descent: multiply by the learning\n # rate, and subtract from the current value\n # Note: Subtract in order to minimize error, since\n # partial derivs point in direction of gradient\n # AScent\n gradient_ij = delta * input_ij\n neuron.weights[ij] -= self.learning_rate * gradient_ij\n # Now, compute the gradient (partial deriv of cost\n # func, J, with respect to parameter ij) for the\n # threshold value (parameter_0j), by using a \"1\" as\n # the threshold \"input value\"\n # -Note: index 0j means from a previous\n # layer threshold node 0 (threshold always has\n # index i=0) to this layer node j\n # -can also think of it as the threshold being\n # internal to this neuron\n gradient_0j = delta * 1\n neuron.threshold -= self.learning_rate * gradient_0j\n if unsupervised and not isOutputLayer:\n rho_estimates[j] = (0.999*rho_estimates[j] +\n 0.001*neuron.output)\n neuron.threshold -= (self.learning_rate * beta *\n (rho_estimates[j] - rho))\n # Once this layer is done, store the gradients and weights\n # from the current layer for the next layer iteration\n # (moving backwards)\n next_layer_deltas = this_layer_deltas\n next_layer_weights = this_layer_weights\n isOutputLayer = False\n # Note: this is after the while loop\n self.iterations = iteration_counter", "def cifar10_convolutional_encoder(self, x, reuse=True): \n\t\tenc_layers = self.enc_layers\n\t\tregularizer = tf.contrib.layers.l2_regularizer(scale=self.reg_param)\n\t\twith tf.variable_scope('model', reuse=reuse):\n\t\t\twith tf.variable_scope('encoder', reuse=reuse):\n\t\t\t\tconv1 = tf.layers.conv2d(x, 64, (3,3), padding=\"SAME\", activation=None, kernel_regularizer=regularizer, reuse=reuse, name='conv1')\n\t\t\t\tbn1 = tf.layers.batch_normalization(conv1)\n\t\t\t\trelu1 = tf.nn.relu(bn1)\n\t\t\t\tconv1_out = tf.layers.max_pooling2d(relu1, (2,2), (2,2), padding='same')\n\t\t\t\t# 2nd convolutional layer\n\t\t\t\tconv2 = tf.layers.conv2d(conv1_out, 32, (3,3), padding=\"SAME\", activation=None, kernel_regularizer=regularizer, reuse=reuse, name='conv2')\n\t\t\t\tbn2 = tf.layers.batch_normalization(conv2)\n\t\t\t\trelu2 = tf.nn.relu(bn2)\n\t\t\t\tconv2_out = tf.layers.max_pooling2d(relu2, (2,2), (2,2), padding='same')\n\t\t\t\t# 3rd convolutional layer\n\t\t\t\tconv3 = tf.layers.conv2d(conv2_out, 16, (3,3), padding=\"SAME\", activation=None, kernel_regularizer=regularizer, reuse=reuse, name='conv3')\n\t\t\t\tbn3 = tf.layers.batch_normalization(conv3)\n\t\t\t\trelu3 = tf.nn.relu(bn3)\n\t\t\t\tconv3_out = tf.layers.max_pooling2d(relu3, (2,2), (2,2), padding='same')\n\t\t\t\tflattened = tf.reshape(conv3_out, (-1, 4*4*16))\n\t\t\t\tz_mean = tf.layers.dense(flattened, enc_layers[-1], activation=None, use_bias=False, kernel_regularizer=regularizer, reuse=reuse, name='fc-final')\n\t\treturn z_mean", "def update_net(optimizer):\n assert kl_train_dataset.bp_mode\n frames_gen, frame_cnt, rel_props, prop_ticks, prop_scaling = kl_train_dataset[index]\n\n optimizer.zero_grad()\n \n num_crop = 1\n length = 3\n if args.modality == 'Flow':\n length = 10\n elif args.modality == 'RGBDiff':\n length = 18\n \n for frames in frames_gen:\n # frames.shape == [frame_batch_size * num_crop * 3, 224, 224]\n assert len(frames) == length * frame_cnt\n input_var = torch.autograd.Variable(frames.view(-1, length, frames.size(-2), frames.size(-1)).cuda())\n base_out = net(input_var, None, None, None, None)\n assert base_out.size(0) == frame_cnt and base_out.size(1) == base_out_dim\n step_features = base_out.mean(dim=0).unsqueeze(0)\n gate, glcu_task_pred = net.glcu(step_features)\n gate = gate.repeat(1, frame_cnt).view(frame_cnt, base_out_dim)\n assert glcu_task_pred.size(0) == 1\n glcu_task_pred = F.softmax(glcu_task_pred.squeeze(), dim=0)\n if net.additive_glcu:\n base_out = base_out + gate\n else:\n base_out = base_out * gate\n\n output = net.test_fc(base_out)\n assert output.size(0) == frame_cnt and output.size(1) == output_dim\n act_scores, comp_scores, reg_scores = reorg_stpp.forward(output, prop_ticks, prop_scaling, bp_mode=True)\n\n # Task Head\n combined_scores = F.softmax(act_scores[:, 1:], dim=1) * torch.exp(comp_scores)\n combined_scores = combined_scores.mean(dim=0).unsqueeze(0)\n task_pred = net.task_head(combined_scores)\n assert task_pred.size(0) == 1\n task_pred = F.softmax(net.task_head(combined_scores).squeeze(), dim=0)\n\n loss = KL(task_pred, glcu_task_pred)\n loss.backward()\n torch.cuda.empty_cache() # To empty the cache from previous iterations\n break\n\n optimizer.step()\n optimizer.zero_grad()\n torch.cuda.empty_cache()\n\n return float(loss.data), frame_cnt", "def main(tetrode_number=TETRODE_NUMBER,num_hidden_units=300,num_hidden_units_2=200,num_code_units=50):\n print(\"Loading the data...\")\n dataset = load_data(tetrode_number)\n print(\"Done!\")\n\n print(\"Tetrode number: {}, Num outputs: {}\".format(tetrode_number,dataset['output_dim']))\n\n print(dataset['input_shape'])\n print(dataset['output_dim'])\n \n print(\"Making the model...\")\n network = model(dataset['input_shape'],dataset['output_dim'],num_hidden_units,num_hidden_units_2,num_code_units,(4,1))\n print(\"Done!\")\n\n print(\"Setting up the training functions...\")\n training = funcs(dataset,network)\n print(\"Done!\")\n\n accuracies = []\n trainvalidation = []\n\n print(\"Begining to train the network...\")\n epochsDone = 0\n autoencoderSameLabels = []\n try:\n for i in range(NUM_EPOCHS):\n costs = []\n valid_costs = []\n\n for start, end in zip(range(0, dataset['num_examples_train'], BATCH_SIZE), range(BATCH_SIZE, dataset['num_examples_train'], BATCH_SIZE)):\n cost = training['train'](dataset['X_train'][start:end],dataset['y_train'][start:end])\n costs.append(cost)\n \n for start, end in zip(range(0, dataset['num_examples_valid'], BATCH_SIZE), range(BATCH_SIZE, dataset['num_examples_valid'], BATCH_SIZE)):\n cost = training['valid'](dataset['X_valid'][start:end],dataset['y_valid'][start:end])\n valid_costs.append(cost)\n\n\n meanValidCost = np.mean(np.asarray(valid_costs),dtype=np.float32) \n meanTrainCost = np.mean(np.asarray(costs,dtype=np.float32))\n accuracy = training['accuracy'](dataset['X_test'],dataset['y_test'])\n\n print(\"Epoch: {}, Accuracy: {}, Training cost / validation cost: {}\".format(i+1,accuracy,meanTrainCost/meanValidCost))\n\n if(np.isnan(meanTrainCost/meanValidCost)):\n print(\"Nan value\")\n break\n\n\n # this is the test to see if the autoencoder is learning how to \n if i%10==0:\n acs = []\n for j in range(dataset['caswells_dim']):\n # print(dataset['labeled_test'][j].shape)\n codes = training['code'](dataset['labeled_test'][j])\n np.mean(np.argmax(dataset['y_test'], axis=1) == np.argmax(training['predict'](dataset['X_test']), axis=1))\n format_codes = []\n for code in codes:\n # if(j==0):\n format_codes.append(np.argmax(code))\n\n prev = sorted(format_codes)[0]\n # print(sorted(format_codes))\n k = 0\n same = [1]\n for code in sorted(format_codes)[1:]:\n if(code == prev):\n same[k] = same[k] + 1\n else:\n k+=1\n same.append(1)\n prev = code\n\n same = np.asarray(same)\n # print(same,np.argmax(same),same[np.argmax(same)],np.sum(same))\n label_acc = same[np.argmax(same)]*1.0/np.sum(same)\n acs.append(label_acc)\n print(\"Label: {}, Num examples: {}, Same label with autoencoder: {} \".format(j,dataset['labeled_test'][j].shape[0],label_acc))\n acs = np.asarray(acs)\n autoencoderSameLabels.append(np.mean(acs))\n print(\"Average agreement: {}\".format(np.mean(acs)))\n\n\n if i%50 == 0:\n ran = randint(0,dataset['num_examples_test']-20)\n now = datetime.datetime.now()\n for j in range(10):\n testing = [dataset['X_test'][ran]]\n # print(testing[0].shape)\n output = dataset['y_test'][ran].reshape((1, 200))[0]\n print(output)\n\n # print(np.arange(dataset['output_dim']))\n # print(output)\n prediction = training['predict'](testing)[0].reshape((1, 200))[0]\n print(prediction)\n # print(prediction)\n # print(testing[0][0])\n \n code = training['code'](testing).reshape((1, 50))\n\n # print(code)\n \n # plotting the figure\n\n fig = plt.figure(1)\n sub1 = fig.add_subplot(311)\n sub2 = fig.add_subplot(312)\n sub3 = fig.add_subplot(313)\n\n # add titles\n\n sub1.set_title('Desired output')\n sub2.set_title('Net output')\n sub3.set_title('Code layer output')\n\n # adding x labels\n\n sub1.set_xlabel('Time')\n sub2.set_xlabel('Time')\n sub3.set_xlabel('Code label')\n\n # adding y labels\n\n sub1.set_ylabel('Amplitude')\n sub2.set_ylabel('Amplitude')\n sub3.set_ylabel('Probability')\n\n # Plotting data\n\n # print(testing[0][0])\n # inp = []\n # for z in range(4):\n # inp += list(testing[0][0][z])\n\n\n sub1.plot(output)\n # sub1.bar(x_axis, output, width=1)\n sub1.grid(True)\n\n sub2.plot(prediction)\n sub2.grid(True)\n\n x_axis = list(np.arange(len(code[0])))\n\n # sub3.plot(code[0])\n sub3.bar(x_axis, code[0], width=1)\n # plt.show()\n\n fig.tight_layout()\n\n # plt.plot(var2)\n # fig.tight_layout()\n plt.savefig('../logs/convAuto/fig{}_{}_{}.png'.format(i,j,now), bbox_inches='tight')\n plt.close()\n \n ran += 1\n # break\n\n\n trainvalidation.append([meanTrainCost,meanValidCost])\n accuracies.append(accuracy)\n if(EARLY_STOPPING):\n if(len(accuracies) < STOPPING_RANGE):\n pass\n else:\n test = [k for k in accuracies if k < accuracy]\n if not test:\n print('Early stopping causing training to finish at epoch {}'.format(i+1))\n break\n del accuracies[0]\n accuracies.append(accuracy)\n\n epochsDone = epochsDone + 1\n\n except KeyboardInterrupt:\n pass\n\n # plt.plot(trainvalidation)\n # plt.show()\n\n if(LOG_EXPERIMENT):\n print(\"Logging the experiment details...\")\n log = dict(\n NET_TYPE = \"Conv auto encoder 2 hidden 1 code\",\n TETRODE_NUMBER = tetrode_number,\n BASENAME = BASENAME,\n NUM_EPOCHS = epochsDone,\n BATCH_SIZE = BATCH_SIZE,\n TRAIN_VALIDATION = trainvalidation,\n LEARNING_RATE = LEARNING_RATE,\n MOMENTUM = MOMENTUM,\n SAME_LABEL_AVERAGES = autoencoderSameLabels,\n ACCURACY = accuracies,\n NETWORK_LAYERS = [str(type(layer)) for layer in lasagne.layers.get_all_layers(network)],\n OUTPUT_DIM = dataset['output_dim'],\n # NETWORK_PARAMS = lasagne.layers.get_all_params_values(network)\n )\n now = datetime.datetime.now()\n filename = \"experiments/convAuto/{}_{}_{}_NUMLAYERS_{}_OUTPUTDIM_{}\".format(now,NUM_EPOCHS,NUM_HIDDEN_UNITS,len(log['NETWORK_LAYERS']),log['OUTPUT_DIM'])\n filename = re.sub(\"[^A-Za-z0-9_/,-:]\", \"\", filename)\n with open(filename,\"w\") as outfile:\n outfile.write(str(log))", "def forward(network: dict, x: np.array) -> np.array:\n W1, W2, W3 = network['W1'], network['W2'], network['W3']\n b1, b2, b3 = network['b1'], network['b2'], network['b3']\n z1 = _forward(x, W1, b1, 'sigmoid')\n z2 = _forward(z1, W2, b2, 'sigmoid')\n y = _forward(z2, W3, b3, 'identity')\n return y", "def __call__(self, encoder_inputs, attention_bias, inputs_padding):\n for n, layer in enumerate(self.layers):\n # Run inputs through the sublayers.\n self_attention_layer = layer[0]\n feed_forward_network = layer[1]\n\n with tf.variable_scope(\"encoder_layer_%d\" % n):\n with tf.variable_scope(\"self_attention\"):\n encoder_inputs = self_attention_layer(encoder_inputs, attention_bias)\n with tf.variable_scope(\"ffn\"):\n encoder_inputs = feed_forward_network(encoder_inputs, inputs_padding)\n\n with tf.variable_scope(\"encoder_output\"):\n output = self.output_normalization(encoder_inputs)\n\n return output # self.output_normalization(encoder_inputs)", "def __init__(self, rng, input, n_in = 0, n_out = 0, \n halfWinSize = 0, activation = T.nnet.relu, mask = None):\n self.input = input\n self.n_in = n_in\n self.n_out = n_out\n\tself.halfWinSize = halfWinSize\n\n windowSize = 2*halfWinSize + 1\n self.filter_size = windowSize\n\n # reshape input to shape (batchSize, n_in, nRows=1, nCols=seqLen) \n in4conv2D = input.dimshuffle(0, 1, 'x', 2)\n\n # initialize the filter\n w_shp = (n_out, n_in, 1, windowSize)\n\tif activation == T.nnet.relu:\n W_values = np.asarray(\n rng.normal(scale = np.sqrt(2. / (n_in*windowSize + n_out)),\n size = w_shp), \n dtype = theano.config.floatX )\n\telse:\n W_values = np.asarray(\n rng.uniform(low = - np.sqrt(6. / (n_in*windowSize + n_out)), \n high = np.sqrt(6. / (n_in*windowSize + n_out)), \n size = w_shp),\n dtype=theano.config.floatX\n )\n if activation == theano.tensor.nnet.sigmoid:\n \tW_values *= 4\n\n self.W = theano.shared(value=W_values, name='ResConv1d_W', borrow=True)\n\n b_shp = (n_out,)\n self.b = theano.shared(\n np.asarray(rng.uniform(low = -.0, high = .0, size = b_shp), \n dtype=input.dtype), \n name ='ResConv1d_b', \n borrow=True)\n\n # conv_out and conv_out_bias have shape (batch_size, n_out, 1, nCols)\n conv_out = T.nnet.conv2d(in4conv2D, self.W, \n filter_shape=w_shp, border_mode='half')\n if activation is not None:\n conv_out_bias = activation(conv_out + \n self.b.dimshuffle('x', 0, 'x', 'x'))\n else:\n conv_out_bias = (conv_out + self.b.dimshuffle('x', 0, 'x', 'x'))\n\n\t## out2 has shape (batchSize, n_out, nCols)\n out2 = conv_out_bias.dimshuffle(0, 1, 3, 2)[:, :, :, 0]\n\n if mask is not None:\n ## since we did zero padding at left side of the input tensor\n ## we need to reset these positions to 0 again after convolution \n ## to avoid introducing noise\n ## mask has shape (batchSize, #positions_to_be_masked)\n\n ##take the subtensor of out2 that needs modification\n out2_sub = out2[:, :, :mask.shape[1] ]\n mask_new = mask.dimshuffle(0, 'x', 1)\n self.output = T.set_subtensor(out2_sub, T.mul(out2_sub, mask_new))\n else:\n self.output = out2\n\n\t##self.output has shape (batchSize, n_out, nCols)\n\n # parameters of the model\n self.params=[self.W, self.b]\n\n self.paramL1 = abs(self.W).sum() + abs(self.b).sum()\n self.paramL2 = (self.W**2).sum() + (self.b**2).sum()", "def encoder_net_func(num_layer, net_type='cnn'):\n ec_funct = []\n for i in range(num_layer):\n ec_funct.append(net_type)\n ec_funct.append('mlp')\n\n dc_funct = copy.deepcopy(ec_funct)\n dc_funct.reverse()\n\n return ec_funct, dc_funct", "def example():\n base_path = Path(TMPDIR)\n\n discriminator = Model(num_input=28 * 28)\n discriminator.add(Layer(512, activation=af.RELU))\n discriminator.add(Layer(1, activation=af.SIGMOID))\n\n generator_discriminator = Model(num_input=100)\n generator_discriminator.add(Layer(512, activation=af.LEAKY_RELU))\n generator_discriminator.add(Layer(28 * 28, activation=af.SIGMOID))\n generator_discriminator.add(Layer(512, activation=af.RELU)) # Needs to match discriminator\n generator_discriminator.add(Layer(1, activation=af.SIGMOID)) # Needs to match discriminator\n\n nn_discriminator = NeuralNetwork(discriminator, learning_rate=0.0002, cost_function=cf.CROSS_ENTROPY,\n\n optimizer=opt.ADAM,\n optimizer_settings=AdamOptimizer(beta1=0.5, beta2=0.999, epsilon=1e-8),\n batch_size=32)\n\n discriminator_weight_path = Path(DISCRIMINATOR_WEIGHTS_FILE_PATH)\n if discriminator_weight_path.exists():\n log.info(\"Discriminator weight file detected. Loading.\")\n nn_discriminator.load(discriminator_weight_path)\n\n nn_generator_discriminator = NeuralNetwork(generator_discriminator,\n use_layer_from=[{\"model\": nn_discriminator,\n \"layer_map\": [{\"from\": 1, \"to\": 3},\n {\"from\": 2, \"to\": 4}]}],\n\n learning_rate=0.0002, cost_function=cf.CROSS_ENTROPY, # Slower than D\n optimizer=opt.ADAM,\n optimizer_settings=AdamOptimizer(beta1=0.5, beta2=0.999, epsilon=1e-8),\n batch_size=32,\n weight_parameter=wparam(init_type=wparam.NORMAL, stddev=0.02))\n\n generator_weight_path = Path(GENERATOR_WEIGHTS_FILE_PATH)\n if generator_weight_path.exists():\n log.info(\"Generator weight file detected. Loading.\")\n nn_generator_discriminator.load(generator_weight_path)\n\n noise = np.random.normal(size=(NUM_IMAGES_TO_GENERATE, 100))\n\n print(\"Generating...\")\n test_images = nn_generator_discriminator.predict_intermediate(noise, 2)\n\n for p in range(test_images.shape[0]):\n img = test_images[p].reshape((28, 28)).copy()\n img *= 255.0\n img_pil = Image.fromarray(np.uint8(img))\n image_path = base_path / Path(\"%d.jpg\" % (p))\n img_pil.save(image_path)", "def setup_train(self, input_data, target):\n \n W_my = self.setup_print(self.W, \"intial W\")\n \n # The weights with the random adjustment are <batch_size, from, to>, so\n # we inflate W here, too.\n W_exp = tf.tile(tf.expand_dims(W_my, 0), [self.config['batch_size'], 1, 1]) # <batch_size, from, to>\n\n # 1. Actual output\n output = self.setup_forward(W_exp, input_data, prefix=\"org\") # <batch_size, (timesteps,) output>\n loss = self.setup_loss(output, target, prefix=\"org\") # <batch_size>\n loss = self.setup_print(loss, \"loss\")\n \n # 2. Test output in the environment\n # TODO Do the random test around the decayed weights\n # NOTE: W_adj_source keeps its value inside a single run\n # https://stackoverflow.com/questions/52213325/are-tensorflow-random-values-guaranteed-to-be-the-same-inside-a-single-run\n W_adj = self.W_adj_source # <batch_size, from, to>\n W_adj = self.setup_print(W_adj, \"W_adj\")\n \n output_adj = self.setup_forward(W_exp + W_adj, input_data, prefix=\"adj\")\n loss_adj = self.setup_loss(output_adj, target, prefix=\"adj\")\n loss_adj = self.setup_print(loss_adj, \"loss_adj\")\n # improvement is positive when we go from large error to small error\n improvement = loss - loss_adj # <batch_size>\n improvement = self.setup_print(improvement, \"improvement\")\n \n # Update the weights\n improvement = tf.expand_dims(tf.expand_dims(improvement, 1), 2) # <batch_size, 1, 1>\n weight_update = W_adj * improvement # <batch_size, from, to>\n weight_update = self.setup_print(weight_update, \"weight_update\")\n weight_update = tf.reduce_mean(weight_update, axis=0) # <from, to>\n \n weight_update = self.setup_print(weight_update, \"weight_update_reduced\")\n weight_update = self.W.assign_add(weight_update)\n \n # Get the average loss\n loss_avg = tf.reduce_mean(loss, axis=0)\n \n return weight_update, loss_avg", "def build_encoder(tparams, options):\n\t# word embedding (source)\n\tembedding = tensor.tensor3('embedding', dtype='float32')\n\tx_mask = tensor.matrix('x_mask', dtype='float32')\n\n\t# encoder\n\tproj = get_layer(options['encoder'])[1](tparams, embedding, options,\n\t\t\t\t\t\t\t\t\t\t\tprefix='encoder',\n\t\t\t\t\t\t\t\t\t\t\tmask=x_mask)\n\tctx = proj[0][-1]\n\n\treturn embedding, x_mask, ctx", "def __init__(self, E, U, height, width, filter_hs, conv_non_linear,\n hidden_units, batch_size, non_static, dropout_rates,subspace_size=None,\n activations=[Iden]):\n rng = np.random.RandomState(3435)\n feature_maps = hidden_units[0]\n self.batch_size = batch_size\n\n # define model architecture\n self.index = T.lscalar()\n self.x = T.matrix('x') \n self.y = T.ivector('y') \n self.Words = theano.shared(value=E, name=\"Words\") \n self.Users = None \n self.u = None\n self.subspace_size = subspace_size\n zero_vec_tensor = T.vector()\n self.zero_vec = np.zeros(width)\n # reset Words to 0?\n self.set_zero = theano.function([zero_vec_tensor],\n updates=[(self.Words, T.set_subtensor(self.Words[0,:],zero_vec_tensor))],\n allow_input_downcast=True)\n # inputs to the ConvNet go to all convolutional filters:\n layer0_input = self.Words[T.cast(self.x.flatten(), dtype=\"int32\")].reshape(\n (self.x.shape[0], 1, self.x.shape[1], self.Words.shape[1]))\n self.conv_layers = [] \n \n # outputs of convolutional filters\n layer1_inputs = []\n image_shape = (batch_size, 1, height, width)\n filter_w = width \n for filter_h in filter_hs: \n filter_shape = (feature_maps, 1, filter_h, filter_w)\n pool_size = (height-filter_h+1, width-filter_w+1)\n conv_layer = LeNetConvPoolLayer(rng, input=layer0_input,\n image_shape=image_shape,\n filter_shape=filter_shape,\n poolsize=pool_size,\n non_linear=conv_non_linear)\n layer1_input = conv_layer.output.flatten(2)\n self.conv_layers.append(conv_layer)\n layer1_inputs.append(layer1_input)\n # inputs to the MLP\n layer1_input = T.concatenate(layer1_inputs, 1)\n if U is not None:\n print \"Will use user embeddings\"\n self.u = T.ivector('u')\n self.Users = theano.shared(value=U, name=\"Users\")\n them_users = self.Users[self.u]\n if self.subspace_size:\n print \"and subspace\"\n # set_trace()\n self.subspace = HiddenLayer(rng, them_users, U.shape[1], subspace_size, Sigmoid)\n self.peep = theano.function([self.x, self.u],[self.subspace.output,layer1_input],allow_input_downcast=True)\n\n layer1_input = T.concatenate((layer1_input,T.nnet.sigmoid(self.subspace.output)),1)\n layer_sizes = [feature_maps*len(filter_hs)+subspace_size] \n # layer1_input = T.concatenate((layer1_input,them_users),1)\n # layer_sizes = [feature_maps*len(filter_hs)+U.shape[1]]\n\n else:\n layer1_input = T.concatenate((layer1_input,them_users),1)\n layer_sizes = [feature_maps*len(filter_hs)+U.shape[1]]\n\n else:\n print \"NO user embeddings\"\n layer_sizes = [feature_maps*len(filter_hs)]\n layer_sizes += hidden_units[1:]\n \n super(ConvNet, self).__init__(rng, input=layer1_input,\n layer_sizes=layer_sizes,\n activations=activations,\n dropout_rates=dropout_rates)\n\n # add parameters from convolutional layers\n for conv_layer in self.conv_layers:\n self.params += conv_layer.params\n if non_static:\n # if word vectors are allowed to change, add them as model parameters\n self.params += [self.Words]\n if U is not None:\n # if self.subspace_size is None:\n self.params += [self.Users]", "def build_sentence_encoders(tparams, options):\n opt_ret = dict()\n trng = RandomStreams(1234)\n\n #xs, masks, sents_all = [], [], []\n in_outs = []\n\n langs = options['langs']\n for lang in langs:\n # description string: #words x #samples\n # forward\n x = tensor.matrix('x_%s'%lang, dtype='int64')\n mask = tensor.matrix('x_mask_%s'%lang, dtype='float32')\n\n n_timesteps = x.shape[0]\n n_samples = x.shape[1]\n\n # Word embedding (forward)\n emb = tparams['Wemb_%s'%lang][x.flatten()].reshape([n_timesteps, n_samples, options['dim_word']])\n\n if options['bidirectional_enc']:\n # backward RNN\n x_r = x[::-1]\n mask_r = mask[::-1]\n emb_r = tparams['Wemb_%s'%lang][x_r.flatten()].reshape([n_timesteps, n_samples, options['dim_word']])\n\n if options['use_dropout']:\n retain_probability_emb = 1-options['dropout_embedding']\n retain_probability_hidden = 1-options['dropout_hidden']\n retain_probability_source = 1-options['dropout_source']\n rec_dropout = theano.shared(numpy.array([retain_probability_hidden]*2, dtype='float32'))\n rec_dropout_r = theano.shared(numpy.array([retain_probability_hidden]*2, dtype='float32'))\n emb_dropout = theano.shared(numpy.array([retain_probability_emb]*2, dtype='float32'))\n emb_dropout_r = theano.shared(numpy.array([retain_probability_emb]*2, dtype='float32'))\n source_dropout = theano.shared(numpy.float32(retain_probability_source))\n emb *= source_dropout\n if options['bidirectional_enc']:\n embr *= source_dropout\n else:\n rec_dropout = theano.shared(numpy.array([1.]*2, dtype='float32'))\n rec_dropout_r = theano.shared(numpy.array([1.]*2, dtype='float32'))\n emb_dropout = theano.shared(numpy.array([1.]*2, dtype='float32'))\n emb_dropout_r = theano.shared(numpy.array([1.]*2, dtype='float32'))\n\n # Encode sentences\n if options['encoder_%s'%lang] == 'bow':\n sents = (emb * mask[:,:,None]).sum(0)\n else:\n # iteratively push input from first hidden layer until the last\n for i in range(int(options['n_enc_hidden_layers'])):\n layer_name_prefix='encoder_%s_%i'%(lang,i)\n # if first layer input are wembs, otherwise input will be output of last hidden layer\n layer_below=emb if i==0 else layer_below[0]\n layer_below=get_layer(options['encoder_%s'%lang])[1](tparams,\n layer_below, options, None, prefix=layer_name_prefix, mask=mask,\n emb_dropout=emb_dropout, rec_dropout=rec_dropout)\n\n if i==int(options['n_enc_hidden_layers'])-1:\n # sentence embeddings (projections) are the output of the last hidden layer\n proj = layer_below\n\n if options['bidirectional_enc']:\n for i in range(int(options['n_enc_hidden_layers'])):\n layer_name_prefix='encoder_%s_r_%i'%(lang,i)\n # if first layer input are wembs, otherwise input will be output of last hidden layer\n layer_below=emb_r if i==0 else layer_below[0]\n layer_below=get_layer(options['encoder_%s'%lang])[1](tparams,\n layer_below, options, None, prefix=layer_name_prefix, mask=mask_r,\n emb_dropout=emb_dropout_r, rec_dropout=rec_dropout_r)\n\n if i==int(options['n_enc_hidden_layers'])-1:\n # sentence embeddings (projections) are the output of the last hidden layer\n proj_r = layer_below\n\n # use last hidden state of forward and backward RNNs\n sents = concatenate([proj[0][-1],proj_r[0][-1]], axis=proj[0].ndim-2)\n else:\n sents = proj[0][-1]\n\n if options['use_dropout']:\n sents *= shared_dropout_layer((n_samples, options['dim']), use_noise, trng, retain_probability_hidden)\n\n # project sentences into multimodal space\n sents_mm = get_layer('ff')[1](tparams, sents, options, prefix='ff_sentence_mm', activ='linear')\n if not 'attention_type' in options or options['attention_type'] == 'dot':\n sents_mm = l2norm(sents_mm)\n\n if options['use_dropout']:\n sents_mm *= shared_dropout_layer((n_samples, options['dim_multimodal']), use_noise, trng, retain_probability_hidden)\n\n # outputs per language\n in_outs.append(([x, mask], sents_mm))\n\n return trng, in_outs", "def autoencode(self, imgs):\n x = tf.layers.conv2d(imgs, filters=32, kernel_size=3, strides=1, padding='same', activation=tf.nn.relu)\n shape2 = x.shape\n x = tf.layers.max_pooling2d(x, pool_size=2, strides=2)\n x = tf.layers.conv2d(x, filters=64, kernel_size=3, strides=1, padding='same', activation=tf.nn.relu)\n shape1 = x.shape\n x = tf.layers.max_pooling2d(x, pool_size=2, strides=2)\n # shape0 = x.shape\n # x = tf.layers.conv2d(x, filters=128, kernel_size=3, strides=1, padding='same', activation=tf.nn.relu)\n # x = tf.layers.max_pooling2d(x, pool_size=2, strides=2)\n# shape_orig = x.shape\n# x = tf.layers.flatten(x)\n# shape_dense = x.shape\n# x = tf.layers.dense(x, units=512, activation=tf.nn.relu)\n# x = tf.layers.dense(x, units=shape_dense[-1], activation=tf.nn.relu)\n# x = tf.reshape(x, [-1, shape_orig[1], shape_orig[2], shape_orig[3]])\n # x = tf.layers.conv2d(x, filters=128, kernel_size=3, strides=1, padding='same', activation=tf.nn.relu)\n # x = tf.image.resize(x, size=(shape0[1], shape0[2]), method=tf.image.ResizeMethod.NEAREST_NEIGHBOR)\n x = tf.layers.conv2d(x, filters=64, kernel_size=3, strides=1, padding='same', activation=tf.nn.relu)\n x = tf.image.resize(x, size=(shape1[1], shape1[2]), method=tf.image.ResizeMethod.NEAREST_NEIGHBOR)\n x = tf.layers.conv2d(x, filters=32, kernel_size=3, strides=1, padding='same', activation=tf.nn.relu)\n x = tf.image.resize(x, size=(shape2[1], shape2[2]), method=tf.image.ResizeMethod.NEAREST_NEIGHBOR)\n x = tf.layers.conv2d(x, filters=3, kernel_size=3, strides=1, padding='same', activation=tf.nn.relu)\n \n return x", "def apply_network(inputs):\n return apply_layer(tf.sigmoid(apply_layer(inputs, 64)), 1)", "def __init__(self, nh, nc, ne, de, cs, normal=True, longdependence=False,optimization=None):\n ### emb the embedding matrix for all the vocabulary\n self.emb = theano.shared(name='embeddings',\n value=0.2 * numpy.random.uniform(-1.0, 1.0,\n (ne+1, de))\n # add one for padding at the end\n .astype(theano.config.floatX))\n ### weight for input \n self.wxi = theano.shared(name='wxi',\n value=0.2 * numpy.random.uniform(-1.0, 1.0,\n (de * cs, nh))\n .astype(theano.config.floatX))\n self.wxf = theano.shared(name='wxf',\n value=0.2 * numpy.random.uniform(-1.0, 1.0,\n (de * cs, nh))\n .astype(theano.config.floatX))\n self.wxc = theano.shared(name='wxc',\n value=0.2 * numpy.random.uniform(-1.0, 1.0,\n (de * cs, nh))\n .astype(theano.config.floatX)) \n self.wxo = theano.shared(name='wxo',\n value=0.2 * numpy.random.uniform(-1.0, 1.0,\n (de * cs, nh))\n .astype(theano.config.floatX)) \n \n ### weight for t-1 hidden layer \n self.whi = theano.shared(name='whi',\n value=0.2 * numpy.random.uniform(-1.0, 1.0,\n (nh, nh))\n .astype(theano.config.floatX))\n self.whf = theano.shared(name='whf',\n value=0.2 * numpy.random.uniform(-1.0, 1.0,\n (nh, nh))\n .astype(theano.config.floatX)) \n self.whc = theano.shared(name='whc',\n value=0.2 * numpy.random.uniform(-1.0, 1.0,\n (nh, nh))\n .astype(theano.config.floatX))\n self.who = theano.shared(name='who',\n value=0.2 * numpy.random.uniform(-1.0, 1.0,\n (nh, nh))\n .astype(theano.config.floatX)) \n \n ### weight for memeory cell [diagonal matrix]\n ## Initialization requirement : initialize as the diagonal matrix. becuase this the proportion of\n ## the memory from history/current state for each neuron. \n self.wci = theano.shared(name='wci',\n value= 0.2 * numpy.diag(numpy.random.uniform(0.0, 1.0,\n nh))\n .astype(theano.config.floatX))\n self.wcf = theano.shared(name='wcf',\n value= 0.2 * numpy.diag(numpy.random.uniform(0.0, 1.0,\n nh))\n .astype(theano.config.floatX))\n self.wco = theano.shared(name='wco',\n value= 0.2 * numpy.diag(numpy.random.uniform(0.0, 1.0,\n nh))\n .astype(theano.config.floatX))\n \n ### weight for the output layer \n self.w = theano.shared(name='w',\n value=0.2 * numpy.random.uniform(-1.0, 1.0,\n (nh, nc))\n .astype(theano.config.floatX))\n \n ### bias\n self.bi = theano.shared(name='bi',\n value=numpy.zeros(nh,\n dtype=theano.config.floatX))\n self.bf = theano.shared(name='bf',\n value=numpy.zeros(nh,\n dtype=theano.config.floatX))\n self.bc = theano.shared(name='bc',\n value=numpy.zeros(nh,\n dtype=theano.config.floatX))\n self.bo = theano.shared(name='bo',\n value=numpy.zeros(nh,\n dtype=theano.config.floatX))\n self.b = theano.shared(name='b',\n value=numpy.zeros(nc,\n dtype=theano.config.floatX))\n \n ### Initialization for recurrence\n self.h0 = theano.shared(name='h0',\n value=numpy.zeros(nh,\n dtype=theano.config.floatX))\n self.c0 = theano.shared(name='c0',\n value=numpy.zeros(nh,\n dtype=theano.config.floatX))\n \n # bundle\n self.params = [self.wxi,self.wxf,self.wxc,self.wxo ,\\\n self.whi,self.whf,self.whc,self.who ,\\\n self.wco,\\\n self.bi,self.bf,self.bc,self.bo ,\\\n self.w,self.b, self.h0,self.c0]\n \n # word embeding: use vector of [de] to represent each wrod [trained parameter]\n idxs = T.imatrix()\n x = self.emb[idxs].reshape((idxs.shape[0], de*cs)) ## flatten the matrix in to (n_step,dimension * context widnow)\n y_sentence = T.ivector('y_sentence') # labels [ column ]\n \n ################################## Different recurrent Method #############################\n ########################### AR + Bi-direction LSTM + Attention + None #####################\n if longdependence=='AR': \n ## goal: give longer dependence on the otuput sequence, by directly using the previous hidden layer \n ## value on the output layer, in addition to the hidden layer. [Ar[1]-lstm + lag_Moving[3]]\n self.war0 = theano.shared(name='w0',\n value=numpy.diag(numpy.ones(nc))\n .astype(theano.config.floatX))\n self.war1 = theano.shared(name='w1',\n value=numpy.zeros((nc, nc))\n .astype(theano.config.floatX)) \n self.war2 = theano.shared(name='w2',\n value=numpy.zeros((nc, nc))\n .astype(theano.config.floatX))\n \n self.params = [self.wxi,self.wxf,self.wxc,self.wxo ,\\\n self.whi,self.whf,self.whc,self.who ,\\\n self.wco,\\\n self.bi,self.bf,self.bc,self.bo ,\\\n self.w,self.b, self.h0,self.c0,\\\n self.war0,self.war1,self.war2] \n \n def recurrence(x_t,h_tm1,h_tm2,c_tm1):\n i_t = T.nnet.sigmoid( T.dot(x_t, self.wxi) + T.dot(h_tm1, self.whi) + T.dot(c_tm1, self.wci) + self.bi )\n f_t = T.nnet.sigmoid( T.dot(x_t, self.wxf) + T.dot(h_tm1, self.whf) + T.dot(c_tm1, self.wcf) + self.bf )\n \n c_t = T.tanh(T.dot(x_t, self.wxc) + T.dot(h_tm1, self.whc) + self.bc)\n c_t = f_t * c_tm1+ i_t * c_t\n \n o_t = T.nnet.sigmoid( T.dot(x_t, self.wxo) + T.dot(h_tm1 , self.who) + T.dot(c_t, self.wco) + self.bo )\n \n h_t = o_t * T.tanh(c_t) \n ## change dimension from nh to nc\n p_t_0 = T.dot(h_t, self.w)\n p_t_1 = T.dot(h_tm1, self.w) \n p_t_2 = T.dot(h_tm2, self.w) \n\n ## compute output label dependency from history output\n q_t_0 = T.dot(p_t_0,self.war0) \n q_t_1 = T.dot(p_t_1,self.war1) \n q_t_2 = T.dot(p_t_2,self.war2) \n\n ## incorporate moving average\n s_t = self.b + q_t_0 + q_t_1 +q_t_2\n return [h_t,h_tm1,c_t,s_t]\n \n [h,_,c,s], _ = theano.scan(fn=recurrence,\n sequences=x,\n outputs_info=[self.h0,self.h0,self.c0, None], \n n_steps=x.shape[0]) \n s = T.nnet.softmax(s) \n p_y_given_x_sentence = s\n \n else:\n def recurrence(x_t, h_tm1,c_tm1):\n i_t = T.nnet.sigmoid( T.dot(x_t, self.wxi) + T.dot(h_tm1, self.whi) + T.dot(c_tm1, self.wci) + self.bi )\n f_t = T.nnet.sigmoid( T.dot(x_t, self.wxf) + T.dot(h_tm1, self.whf) + T.dot(c_tm1, self.wcf) + self.bf )\n \n c_t = T.tanh(T.dot(x_t, self.wxc) + T.dot(h_tm1, self.whc) + self.bc)\n c_t = f_t * c_tm1+ i_t * c_t\n \n o_t = T.nnet.sigmoid( T.dot(x_t, self.wxo) + T.dot(h_tm1, self.who) + T.dot(c_t, self.wco) + self.bo )\n \n h_t = o_t * T.tanh(c_t) \n s_t = T.dot(h_t, self.w) + self.b\n return [h_t, c_t ,s_t]\n\n #shape h[x.shape[0],nh],s[x.shape[0],1,nc]\n [h,c,s], _ = theano.scan(fn=recurrence,\n sequences=[x],\n outputs_info=[self.h0,self.c0, None], \n n_steps=x.shape[0])\n\n s = T.nnet.softmax(s)\n p_y_given_x_sentence = s\n\n ## get the highest probability\n y_pred = T.argmax(p_y_given_x_sentence, axis=1)\n\n # cost and gradients and learning rate\n lr = T.scalar('lr')\n\n sentence_nll = -T.mean(T.log(p_y_given_x_sentence)\n [T.arange(x.shape[0]), y_sentence])\n \n ## used for SGD\n sentence_gradients = T.grad(sentence_nll, self.params)\n if optimization ==None:\n sentence_updates = OrderedDict((p, p - lr*g)\n for p, g in\n zip(self.params, sentence_gradients))\n elif optimization =='Adagrad':\n sentence_updates = OrderedDict(adagrad(params=self.params, gparams=sentence_gradients, learning_rate = lr, epsilon = 1e-6))\n else:\n pass\n \n self.normalize = theano.function(inputs=[],\n updates={self.emb:\n self.emb /\n T.sqrt((self.emb**2)\n .sum(axis=1))\n \n .dimshuffle(0, 'x')})\n self.normal = normal\n\n ## add momentum and ada\n self.classify = theano.function(inputs=[idxs], outputs=y_pred)\n self.sentence_train = theano.function(inputs=[idxs, y_sentence, lr],\n outputs=sentence_nll,\n updates=sentence_updates)", "def _build_encoder(self, hparams):\n\t\tnum_layers = self.num_encoder_layers\n\t\tnum_redisual_layers = self.num_encoder_residual_layers\n\n\t\twith tf.variable_scope('encoder') as _:\n\t\t\tself.encoder_emb_inp = tf.nn.embedding_lookup(self.embedding_encoder, self.encoder_input_data)\n\n\t\t\tif hparams.encoder_type == 'uni':\n\t\t\t\t_info('num_layers = {} num_residual_layers = {}'.format(num_layers, num_redisual_layers))\n\t\t\t\t# 1. build a list of cells\n\t\t\t\tcell = self._build_encoder_cell(hparams, num_layers, num_redisual_layers)\n\t\t\t\t# 2. forward\n\t\t\t\t# encoder_outputs: [batch, time, hidden]\n\t\t\t\t# encoder_state: ([batch, hidden] for _ in range(layers))\n\t\t\t\tencoder_outputs, encoder_state = tf.nn.dynamic_rnn(\n\t\t\t\t\tcell,\n\t\t\t\t\tself.encoder_emb_inp,\n\t\t\t\t\tdtype=self.dtype,\n\t\t\t\t\tsequence_length=self.seq_length_encoder_input_data,\n\t\t\t\t\tswap_memory=True)\n\t\t\telif hparams.encoder_type == 'bi':\n\t\t\t\tif not num_layers % 2 == 0:\n\t\t\t\t\t_error('Bi-directional requires num_layers={} should be divided by 2'.format(num_layers))\n\t\t\t\t\traise ValueError\n\t\t\t\tnum_bi_layers = int(num_layers / 2)\n\t\t\t\tnum_bi_residual_layers = num_bi_layers - 1\n\t\t\t\t_info(' num_bi_layers={} num_bi_residual_layers={}'.format(num_bi_layers, num_bi_residual_layers))\n\n\t\t\t\tcell_fw = self._build_encoder_cell(hparams, num_bi_layers, num_bi_residual_layers)\n\t\t\t\tcell_bw = self._build_encoder_cell(hparams, num_bi_layers, num_bi_residual_layers)\n\n\t\t\t\t# bi_outputs: (fw, bw): fw: [batch, seq, hidden]\n\t\t\t\t# bi_state: (fw, bw): fw : [[batch, hidden] for _ in range(layers)]\n\t\t\t\tbi_outputs, bi_state = tf.nn.bidirectional_dynamic_rnn(\n\t\t\t\t\tcell_fw,\n\t\t\t\t\tcell_bw,\n\t\t\t\t\tself.encoder_emb_inp,\n\t\t\t\t\tdtype=self.dtype,\n\t\t\t\t\tsequence_length=self.seq_length_encoder_input_data,\n\t\t\t\t\tswap_memory=True)\n\n\t\t\t\tif num_bi_layers == 1:\n\t\t\t\t\tencoder_state = bi_state\n\t\t\t\telse:\n\t\t\t\t\tencoder_state = []\n\t\t\t\t\tfor layer_id in range(num_bi_layers):\n\t\t\t\t\t\tencoder_state.append(bi_state[0][layer_id])\t\t# fw state in layer id\n\t\t\t\t\t\tencoder_state.append(bi_state[1][layer_id])\t\t# bw state in layer id\n\t\t\t\t\tencoder_state = tuple(encoder_state)\n\t\t\t\tencoder_outputs = tf.concat(bi_outputs, -1)\t\t# [batch, seq, hidden * 2]\n\t\t\telse:\n\t\t\t\t_error('Unknow encoder type: {}'.format(hparams.encoder_type))\n\t\t\t\traise ValueError\n\t\t\n\t\treturn encoder_outputs, encoder_state", "def resnet_autoencoder_v1(encoder_depth, decoder_depth, width_multiplier, metric_channels, # noqa\n cifar_stem=False, data_format='channels_last',\n dropblock_keep_probs=None, dropblock_size=None,\n mask_augs=0., greyscale_viz=False, skip=True):\n encoder = resnet_encoder_v1(encoder_depth, \n width_multiplier,\n cifar_stem=cifar_stem, \n data_format=data_format,\n dropblock_keep_probs=dropblock_keep_probs, \n dropblock_size=dropblock_size)\n\n decoder = resnet_decoder_v1(decoder_depth=decoder_depth,\n encoder_depth=encoder_depth,\n width_multiplier=width_multiplier,\n cifar_stem=cifar_stem, \n data_format=data_format,\n dropblock_keep_probs=dropblock_keep_probs, \n dropblock_size=dropblock_size)\n\n metric = learned_metric_v1(data_format=data_format, metric_channels=metric_channels) \n \n return resnet_autoencoder_v1_generator(\n encoder=encoder,\n decoder=decoder,\n metric=metric,\n skip=skip,\n mask_augs=mask_augs,\n greyscale_viz=greyscale_viz,\n data_format=data_format)", "def train_conv_net(datasets,datasets_weights,\n U, U_Topical,\n img_w=300, \n filter_hs=[3,4,5],\n hidden_units=[100,2], \n dropout_rate=[0.5],\n shuffle_batch=True,\n n_epochs=25, \n batch_size=50, \n lr_decay = 0.95,\n conv_non_linear=\"relu\",\n use_valid_set=True,\n show_states=False,\n activations=[Iden],\n sqr_norm_lim=9,\n non_static=True): \n rng = np.random.RandomState(3435)\n img_h = len(datasets[0][0])-1 \n U_Topical.dtype = \"float32\"\n (num_topics,topic_dim) = U_Topical.shape\n word_w = img_w\n img_w = int(img_w + num_topics*topic_dim)\n filter_w = img_w \n feature_maps = hidden_units[0]\n filter_shapes = []\n pool_sizes = []\n for filter_h in filter_hs: \n filter_shapes.append((feature_maps, 1, filter_h, filter_w)) # 100 1 3 300\n pool_sizes.append((img_h-filter_h+1, img_w-filter_w+1)) # size of words samples one\n parameters = [(\"image shape\",img_h,img_w),(\"filter shape\",filter_shapes), (\"hidden_units\",hidden_units),\n (\"dropout\", dropout_rate), (\"batch_size\",batch_size),(\"non_static\", non_static),\n (\"learn_decay\",lr_decay), (\"conv_non_linear\", conv_non_linear), (\"non_static\", non_static)\n ,(\"sqr_norm_lim\",sqr_norm_lim),(\"shuffle_batch\",shuffle_batch)]\n #print parameters \n \n #define model architecture\n index = T.lscalar()\n x = T.matrix('x') \n y = T.ivector('y')\n x_topic = T.tensor3('x_topic')\n Words = theano.shared(value = U, name = \"Words\")\n Topics = theano.shared(value=U_Topical,name=\"Topics\")\n zero_vec_tensor = T.vector()\n zero_vec = np.zeros(word_w, dtype='float32')\n set_zero = theano.function([zero_vec_tensor], updates=[(Words, T.set_subtensor(Words[0,:], zero_vec_tensor))])\n layer0_input_words = Words[T.cast(x.flatten(),dtype=\"int32\")].reshape((x.shape[0],1,x.shape[1],Words.shape[1])) \n layer0_inputs_topics = []\n for i in range(num_topics):\n sin_topic = x_topic[:,:,i]\n Topic = Topics[i].reshape((1,Topics[i].shape[0]))\n weights = sin_topic.flatten()\n weights = weights.reshape((weights.shape[0],1))\n layer0_inputs_topics.append(T.dot(weights, Topic))\n layer0_input_topics = T.concatenate(layer0_inputs_topics,1)\n layer0_input_topics = layer0_input_topics.reshape((x_topic.shape[0],1,x_topic.shape[1],num_topics*topic_dim))\n layer0_input = T.concatenate([layer0_input_words,layer0_input_topics],3) \n conv_layers = []\n layer1_inputs = []\n for i in xrange(len(filter_hs)):\n filter_shape = filter_shapes[i]\n pool_size = pool_sizes[i]\n conv_layer = LeNetConvPoolLayer(rng, input=layer0_input,image_shape=(batch_size, 1, img_h, img_w),\n filter_shape=filter_shape, poolsize=pool_size, non_linear=conv_non_linear)\n layer1_input = conv_layer.output.flatten(2)\n conv_layers.append(conv_layer)\n layer1_inputs.append(layer1_input)\n layer1_input = T.concatenate(layer1_inputs,1)\n hidden_units[0] = feature_maps*len(filter_hs) \n classifier = MLPDropout(rng, input=layer1_input, layer_sizes=hidden_units, activations=activations, dropout_rates=dropout_rate)\n \n #define parameters of the model and update functions using adadelta\n params = classifier.params \n for conv_layer in conv_layers:\n params += conv_layer.params\n \n if non_static:\n #if word vectors are allowed to change, add them as model parameters\n params += [Words] #params are model parameters\n params += [Topics] #Topics embedding are adjusted\n cost = classifier.negative_log_likelihood(y) \n dropout_cost = classifier.dropout_negative_log_likelihood(y) \n grad_updates = sgd_updates_adadelta(params, dropout_cost, lr_decay, 1e-6, sqr_norm_lim)\n \n #shuffle dataset and assign to mini batches. if dataset size is not a multiple of mini batches, replicate \n #extra data (at random)\n np.random.seed(3435)\n if datasets[0].shape[0] % batch_size > 0:\n extra_data_num = batch_size - datasets[0].shape[0] % batch_size\n random_index = np.random.permutation(np.arange(datasets[0].shape[0])) \n random_index.astype('int32')\n train_set = datasets[0][random_index,:]\n train_set_weights = datasets_weights[0][random_index,:,:]\n extra_data = train_set[:extra_data_num]\n extra_data_weights = train_set_weights[:extra_data_num]\n new_data=np.append(datasets[0],extra_data,axis=0)\n new_data_weights = np.append(datasets_weights[0],extra_data_weights,axis = 0)\n else:\n new_data = datasets[0]\n new_data_weights = datasets_weights[0]\n random_index = np.random.permutation(np.arange(new_data.shape[0])) \n random_index.astype('int32')\n new_data = new_data[random_index]\n new_data_weights = new_data_weights[random_index]\n n_batches = new_data.shape[0]/batch_size\n n_train_batches = int(np.round(n_batches*0.9))\n \n test_set_x = np.asarray(datasets[1][:,:img_h] ,\"float32\")\n test_set_x_topic = np.asarray(datasets_weights[1][:,:img_h,:] ,\"float32\")\n test_set_y = np.asarray(datasets[1][:,-1],\"int32\")\n if use_valid_set:\n train_set = new_data[:n_train_batches*batch_size,:]\n train_set_weights = new_data_weights[:n_train_batches*batch_size,:,:]\n val_set = new_data[n_train_batches*batch_size:,:]\n val_set_weights = new_data_weights[n_train_batches*batch_size:,:,:] \n train_set_x, train_set_x_topic, train_set_y = shared_dataset((train_set[:,:img_h],train_set_weights,train_set[:,-1]))\n val_set_x, val_set_x_topic, val_set_y = shared_dataset((val_set[:,:img_h],val_set_weights,val_set[:,-1]))\n n_val_batches = n_batches - n_train_batches\n val_model = theano.function([index], classifier.errors(y),\n givens={\n x: val_set_x[index * batch_size: (index + 1) * batch_size],\n x_topic: val_set_x_topic[index * batch_size: (index + 1) * batch_size],\n y: val_set_y[index * batch_size: (index + 1) * batch_size]})\n else:\n train_set = new_data[:,:] \n train_set_x, train_set_x_topic, train_set_y = shared_dataset((train_set[:,:img_h],train_set_weights,train_set[:,-1])) \n \n #make theano functions to get train/val/test errors\n test_model = theano.function([index], classifier.errors(y),\n givens={\n x: train_set_x[index * batch_size: (index + 1) * batch_size],\n x_topic: train_set_x_topic[index * batch_size: (index + 1) * batch_size],\n y: train_set_y[index * batch_size: (index + 1) * batch_size]}) \n train_model = theano.function([index], cost, updates=grad_updates,\n givens={\n x: train_set_x[index*batch_size:(index+1)*batch_size],\n x_topic: train_set_x_topic[index * batch_size: (index + 1) * batch_size],\n y: train_set_y[index*batch_size:(index+1)*batch_size]}) \n test_pred_layers = []\n test_size = test_set_x.shape[0]\n \n \n\n test_layer0_input_words = Words[T.cast(x.flatten(),dtype=\"int32\")].reshape((test_size,1,img_h,Words.shape[1])) \n test_layer0_inputs_topics = []\n for i in range(num_topics):\n sin_topic = x_topic[:,:,i]\n Topic = Topics[i].reshape((1,Topics[i].shape[0]))\n weights = sin_topic.flatten()\n weights = weights.reshape((weights.shape[0],1))\n test_layer0_inputs_topics.append(T.dot(weights, Topic))\n test_layer0_input_topics = T.concatenate(test_layer0_inputs_topics,1)\n test_layer0_input_topics = test_layer0_input_topics.reshape((test_size,1,img_h,num_topics*topic_dim))\n test_layer0_input = T.concatenate([test_layer0_input_words,test_layer0_input_topics],3) \n\n\n\n for conv_layer in conv_layers:\n test_layer0_output = conv_layer.predict(test_layer0_input, test_size)\n test_pred_layers.append(test_layer0_output.flatten(2))\n test_layer1_input = T.concatenate(test_pred_layers, 1)\n test_y_pred = classifier.predict(test_layer1_input)\n\n test_error = T.mean(T.neq(test_y_pred, y))\n test_model_all = theano.function([x,x_topic,y], test_error) \n \n #start training over mini-batches\n print '... training'\n epoch = 0\n best_val_perf = 0\n val_perf = 0\n test_perf = 0 \n cost_epoch = 0 \n while (epoch < n_epochs): \n epoch = epoch + 1\n if shuffle_batch:\n for minibatch_index in np.random.permutation(range(n_train_batches)):\n cost_epoch = train_model(minibatch_index)\n set_zero(zero_vec)\n else:\n for minibatch_index in xrange(n_train_batches):\n cost_epoch = train_model(minibatch_index) \n set_zero(zero_vec)\n train_losses = [test_model(i) for i in xrange(n_train_batches)]\n train_perf = 1 - np.mean(train_losses)\n if use_valid_set:\n val_losses = [val_model(i) for i in xrange(n_val_batches)]\n val_perf = 1- np.mean(val_losses)\n\n if val_perf >= best_val_perf:\n params_conv = [] \n params_output = {}\n test_loss = test_model_all(test_set_x,test_set_x_topic, test_set_y) \n test_perf = 1- test_loss \n best_val_perf = val_perf \n for conv_layer in conv_layers:\n params_conv.append(conv_layer.get_params())\n params_output = classifier.get_params()\n word_vec = Words.get_value()\n Topic_vec = Topics.get_value()\n else :\n val_perf = 0 \n if show_states:\n print('epoch %i, train perf %f %%, val perf %f' % (epoch, train_perf * 100., val_perf*100.))\n \n if not use_valid_set:\n params_conv = [] \n params_output = {}\n test_loss = test_model_all(test_set_x,test_set_x_topic, test_set_y) \n test_perf = 1- test_loss \n \n for conv_layer in conv_layers:\n params_conv.append(conv_layer.get_params())\n params_output = classifier.get_params()\n word_vec = Words.get_value()\n Topic_vec = Topics.get_value() \n \n return test_perf, [params_conv, params_output, word_vec,Topic_vec]", "def build_encoder(self):\n with tf.variable_scope(\"encoder\") as scope:\n length1 = tf.to_int32(tf.reduce_sum(self.encode_mask1, 1), name=\"length1\")\n\n if self.config.bidirectional_encoder:\n if self.config.encoder_dim % 2:\n raise ValueError(\n \"encoder_dim must be even when using a bidirectional encoder.\")\n num_units = self.config.encoder_dim // 2\n cell_fw = self._initialize_gru_cell(num_units) # Forward encoder\n cell_bw = self._initialize_gru_cell(num_units) # Backward encoder\n _, states = tf.nn.bidirectional_dynamic_rnn(\n cell_fw=cell_fw,\n cell_bw=cell_bw,\n inputs=self.encode_emb1,\n sequence_length=length1,\n dtype=tf.float32,\n scope=scope)\n thought_vectors1 = tf.concat(states, 1, name=\"thought_vectors1\")\n else:\n cell = self._initialize_gru_cell(self.config.encoder_dim)\n _, state = tf.nn.dynamic_rnn(\n cell=cell,\n inputs=self.encode_emb1,\n sequence_length=length1,\n dtype=tf.float32,\n scope=scope)\n # Use an identity operation to name the Tensor in the Graph.\n thought_vectors1 = tf.identity(state, name=\"thought_vectors1\")\n \n scope.reuse_variables()\n\n length2 = tf.to_int32(tf.reduce_sum(self.encode_mask2, 1), name=\"length2\")\n\n if self.config.bidirectional_encoder:\n if self.config.encoder_dim % 2:\n raise ValueError(\n \"encoder_dim must be even when using a bidirectional encoder.\")\n num_units = self.config.encoder_dim // 2\n cell_fw = self._initialize_gru_cell(num_units) # Forward encoder\n cell_bw = self._initialize_gru_cell(num_units) # Backward encoder\n _, states = tf.nn.bidirectional_dynamic_rnn(\n cell_fw=cell_fw,\n cell_bw=cell_bw,\n inputs=self.encode_emb2,\n sequence_length=length2,\n dtype=tf.float32,\n scope=scope)\n thought_vectors2 = tf.concat(states, 1, name=\"thought_vectors2\")\n else:\n cell = self._initialize_gru_cell(self.config.encoder_dim)\n _, state = tf.nn.dynamic_rnn(\n cell=cell,\n inputs=self.encode_emb2,\n sequence_length=length2,\n dtype=tf.float32,\n scope=scope)\n # Use an identity operation to name the Tensor in the Graph.\n thought_vectors2 = tf.identity(state, name=\"thought_vectors2\")\n\n self.thought_vectors1 = thought_vectors1\n self.thought_vectors2 = thought_vectors2", "def encode(self, n_dimension=2, learning_rate=0.01, training_epochs=10, batch_size=400):\n X = tf.placeholder(tf.float32,[None, self.n_input])\n tf.set_random_seed(50)\n \n \n n_hidden_layer1 = int(math.pow(2, int(2*math.log(self.n_input,2)/3+math.log(n_dimension,2)/3)))\n n_hidden_layer2 = int(math.pow(2, int(math.log(self.n_input,2)/3+2*math.log(n_dimension,2)/3)))\n n_hidden_layer3 = n_dimension\n \n weights = {\n 'encoder_w1':tf.Variable(tf.random_normal([self.n_input, n_hidden_layer1])),\n 'encoder_w2':tf.Variable(tf.random_normal([n_hidden_layer1, n_hidden_layer2])),\n 'encoder_w3':tf.Variable(tf.random_normal([n_hidden_layer2, n_hidden_layer3])),\n \n 'decoder_w1':tf.Variable(tf.random_normal([n_hidden_layer3, n_hidden_layer2])),\n 'decoder_w2':tf.Variable(tf.random_normal([n_hidden_layer2, n_hidden_layer1])),\n 'decoder_w3':tf.Variable(tf.random_normal([n_hidden_layer1, self.n_input])),\n }\n \n biases = {\n 'encoder_b1':tf.Variable(tf.random_normal([n_hidden_layer1])),\n 'encoder_b2':tf.Variable(tf.random_normal([n_hidden_layer2])),\n 'encoder_b3':tf.Variable(tf.random_normal([n_hidden_layer3])),\n \n 'decoder_b1':tf.Variable(tf.random_normal([n_hidden_layer2])),\n 'decoder_b2':tf.Variable(tf.random_normal([n_hidden_layer1])),\n 'decoder_b3':tf.Variable(tf.random_normal([self.n_input])),\n }\n \n \n def encoder(x):\n layer_1 = tf.nn.sigmoid(tf.add(tf.matmul(x, weights['encoder_w1']), biases['encoder_b1']))\n layer_2 = tf.nn.sigmoid(tf.add(tf.matmul(layer_1, weights['encoder_w2']), biases['encoder_b2']))\n layer_3 = tf.nn.sigmoid(tf.add(tf.matmul(layer_2, weights['encoder_w3']), biases['encoder_b3']))\n \n return layer_3\n\n def decoder(x):\n layer_1 = tf.nn.sigmoid(tf.add(tf.matmul(x, weights['decoder_w1']), biases['decoder_b1']))\n layer_2 = tf.nn.sigmoid(tf.add(tf.matmul(layer_1, weights['decoder_w2']), biases['decoder_b2']))\n layer_3 = tf.nn.sigmoid(tf.add(tf.matmul(layer_2, weights['decoder_w3']), biases['decoder_b3']))\n \n return layer_3\n \n encoder_op = encoder(X)\n decoder_op = decoder(encoder_op)\n\n y_pred = decoder_op\n y_true = X\n\n cost = tf.reduce_mean(tf.pow(y_pred - y_true, 2))\n optimizer = tf.train.AdamOptimizer(learning_rate).minimize(cost)\n \n \n with tf.Session() as sess:\n init = tf.global_variables_initializer()\n sess.run(init)\n n_batch = int(self.data.shape[0]/batch_size)\n for epoch in tqdm(range(training_epochs)):\n for batch_idx in range(n_batch):\n start = batch_idx * batch_size\n stop = start + batch_size\n _, encoder_result = sess.run([optimizer, encoder_op], feed_dict={X: self.data[start:stop]})\n self.X_test = sess.run(encoder_op, feed_dict={X:self.data})\n self.X_cost = sess.run(cost, feed_dict={X:self.data})\n \n return self.X_test, self.X_cost", "def train_miniautoencoder(self, train_X, depth):\n # Initialize mininet\n network = ann.ANN(train_X,\n hidden_depths=[depth],\n eta=self.eta,\n lamb=self.lamb,\n batch_size=self.batch_size,\n activation_type=self.activation_type)\n # Fit net\n network.fit(self.epochs)\n # Feedforward data array and obtain encoded data array\n data_encoded = encode(train_X, network)\n # Store in class container\n self.data_container.append(data_encoded)\n self.weights_container.append(network.weights)\n self.bias_container.append(network.biases)", "def build(\n n_iter=500, encoding_dim=3, depth=2, nh=20, activation='linear',\n initial_learning_rate=1e-3, solver='Adam', batch_size=32,\n random_state=10, early_stopping=False, patience=10, lamda=1e-1,\n knob_cols=None, auto_refit=True, max_refit_attempts=10):\n assert knob_cols is not None\n\n encoder_hidden_layers = [int(nh / (2**i)) for i in range(depth - 1)]\n if len(encoder_hidden_layers) > 0:\n if 0 in encoder_hidden_layers or encoder_hidden_layers[-1] < encoding_dim:\n return None\n decoder_hidden_layers = encoder_hidden_layers[::-1]\n hidden_layer_sizes = encoder_hidden_layers + \\\n [encoding_dim] + decoder_hidden_layers\n activations = [activation] * 2 * depth\n ae = FancyAutoEncoder(\n n_iter, hidden_layer_sizes, activations, initial_learning_rate,\n solver=solver, batch_size=batch_size, random_state=random_state,\n early_stopping=early_stopping, patience=patience, lamda=lamda,\n knob_cols=knob_cols, auto_refit=auto_refit,\n max_refit_attempts=max_refit_attempts)\n return ae", "def apply_activation(intermediate_output, intermediate_activation):\n if intermediate_activation is None:\n return intermediate_output\n\n if intermediate_activation == 'gelu':\n intermediate_output = nn.gelu(intermediate_output)\n elif intermediate_activation == 'relu':\n intermediate_output = nn.relu(intermediate_output)\n elif intermediate_activation == 'sigmoid':\n intermediate_output = nn.sigmoid(intermediate_output)\n elif intermediate_activation == 'softmax':\n intermediate_output = nn.softmax(intermediate_output)\n elif intermediate_activation == 'celu':\n intermediate_output = nn.celu(intermediate_output)\n elif intermediate_activation == 'elu':\n intermediate_output = nn.elu(intermediate_output)\n elif intermediate_activation == 'log_sigmoid':\n intermediate_output = nn.log_sigmoid(intermediate_output)\n elif intermediate_activation == 'log_softmax':\n intermediate_output = nn.log_softmax(intermediate_output)\n elif intermediate_activation == 'soft_sign':\n intermediate_output = nn.soft_sign(intermediate_output)\n elif intermediate_activation == 'softplus':\n intermediate_output = nn.softplus(intermediate_output)\n elif intermediate_activation == 'swish':\n intermediate_output = nn.swish(intermediate_output)\n elif intermediate_activation == 'tanh':\n intermediate_output = jnp.tanh(intermediate_output)\n else:\n raise NotImplementedError('%s activation function is not yet supported.' %\n intermediate_activation)\n\n return intermediate_output", "def create_model_optimizer(net,alpha):\n optimizer = chainer.optimizers.Adam(alpha=alpha)\n optimizer.setup(net)\n return optimizer", "def forward(self, src, mask):\n bs = src.shape[0]\n src = src.permute(2, 0, 1)\n m = src \n enc_embed = self.enc_embed.weight.unsqueeze(1).repeat(1, bs, 1)\n for layer in self.encoder_layers:\n m = layer(m,\n pos=enc_embed,\n src_mask = mask\n )\n return m.permute(1, 2, 0), enc_embed.permute(1, 2, 0)", "def rebuild(data, mult=None):\n if data.__class__ is str:\n data = numpy.load(data).item()\n p = data['params']\n model = deepAE(n_ins=p['n_ins'], numpy_rng=None, theano_rng=None,\n hidden_layers_sizes=p['hidden_layers_sizes'],\n corruption_levels=p['corruption_levels'])\n model_weights = []\n for i in xrange(len(data['model_weights'])):\n model_weights.append(theano.shared(value=data['model_weights'][i][1],\n name=data['model_weights'][i][0],\n borrow=True))\n model.rebuild_layers(model_weights, mult=mult)\n return model", "def complex_encoder(self, x, reuse=True): \n\t\tenc_layers = self.enc_layers\n\t\tregularizer = tf.contrib.layers.l2_regularizer(scale=self.reg_param)\n\t\twith tf.variable_scope('model', reuse=reuse):\n\t\t\twith tf.variable_scope('encoder', reuse=reuse):\n\t\t\t\tconv1 = tf.layers.conv2d(x, 32, 4, strides=(2,2), padding=\"SAME\", activation=tf.nn.elu, kernel_regularizer=regularizer, reuse=reuse, name='conv1')\n\t\t\t\tconv2 = tf.layers.conv2d(conv1, 32, 4, strides=(2,2), padding=\"SAME\", activation=tf.nn.elu, kernel_regularizer=regularizer, reuse=reuse, name='conv2')\n\t\t\t\tconv3 = tf.layers.conv2d(conv2, 64, 4, strides=(2,2), padding=\"SAME\", activation=tf.nn.elu, kernel_regularizer=regularizer, reuse=reuse, name='conv3')\n\t\t\t\tconv4 = tf.layers.conv2d(conv3, 64, 4, strides=(2,2), padding=\"SAME\", activation=tf.nn.elu, kernel_regularizer=regularizer, reuse=reuse, name='conv4')\n\t\t\t\tconv5 = tf.layers.conv2d(conv4, 256, 4, padding=\"VALID\", activation=tf.nn.elu, kernel_regularizer=regularizer, reuse=reuse, name='conv5')\n\t\t\t\tflattened = tf.reshape(conv5, (-1, 256*1*1))\n\t\t\t\tz_mean = tf.layers.dense(flattened, enc_layers[-1], activation=None, use_bias=False, kernel_regularizer=regularizer, reuse=reuse, name='fc-final')\n\t\treturn z_mean", "def run_net(inputs, **parameter):\n\n #---- set numpy random state for each run----\n np.random.set_state(np_state)\n\n # -----parameter setting-------\n n_ex = 1600\n n_inh = int(n_ex/4)\n n_input = MNIST_shape[1]*coding_n\n n_read = n_ex+n_inh\n\n R = parameter['R']\n f_in = parameter['f_in']\n f_EE = parameter['f_EE']\n f_EI = parameter['f_EI']\n f_IE = parameter['f_IE']\n f_II = parameter['f_II']\n\n A_EE = 60*f_EE\n A_EI = 60*f_EI\n A_IE = 60*f_IE\n A_II = 60*f_II\n A_inE = 60*f_in\n A_inI = 60*f_in\n\n tau_ex = parameter['tau_ex']*coding_duration\n tau_inh = parameter['tau_inh']*coding_duration\n tau_read= 30\n\n p_inE = parameter['p_in']*0.1\n p_inI = parameter['p_in']*0.1\n\n #------definition of equation-------------\n neuron_in = '''\n I = stimulus(t,i) : 1\n '''\n\n neuron = '''\n tau : 1\n dv/dt = (I-v) / (tau*ms) : 1 (unless refractory)\n dg/dt = (-g)/(3*ms) : 1\n dh/dt = (-h)/(6*ms) : 1\n I = (g+h)+13.5: 1\n x : 1\n y : 1\n z : 1\n '''\n\n neuron_read = '''\n tau : 1\n dv/dt = (I-v) / (tau*ms) : 1\n dg/dt = (-g)/(3*ms) : 1 \n dh/dt = (-h)/(6*ms) : 1\n I = (g+h): 1\n '''\n\n synapse = '''\n w : 1\n '''\n\n on_pre_ex = '''\n g+=w\n '''\n\n on_pre_inh = '''\n h-=w\n '''\n\n # -----Neurons and Synapses setting-------\n Input = NeuronGroup(n_input, neuron_in, threshold='I > 0', method='euler', refractory=0 * ms,\n name = 'neurongroup_input')\n\n G_ex = NeuronGroup(n_ex, neuron, threshold='v > 15', reset='v = 13.5', method='euler', refractory=3 * ms,\n name ='neurongroup_ex')\n\n G_inh = NeuronGroup(n_inh, neuron, threshold='v > 15', reset='v = 13.5', method='euler', refractory=2 * ms,\n name ='neurongroup_in')\n\n G_readout = NeuronGroup(n_read, neuron_read, method='euler', name='neurongroup_read')\n\n S_inE = Synapses(Input, G_ex, synapse, on_pre = on_pre_ex ,method='euler', name='synapses_inE')\n\n S_inI = Synapses(Input, G_inh, synapse, on_pre = on_pre_ex ,method='euler', name='synapses_inI')\n\n S_EE = Synapses(G_ex, G_ex, synapse, on_pre = on_pre_ex ,method='euler', name='synapses_EE')\n\n S_EI = Synapses(G_ex, G_inh, synapse, on_pre = on_pre_ex ,method='euler', name='synapses_EI')\n\n S_IE = Synapses(G_inh, G_ex, synapse, on_pre = on_pre_inh ,method='euler', name='synapses_IE')\n\n S_II = Synapses(G_inh, G_inh, synapse, on_pre = on_pre_inh ,method='euler', name='synapses_I')\n\n S_E_readout = Synapses(G_ex, G_readout, 'w = 1 : 1', on_pre=on_pre_ex, method='euler')\n\n S_I_readout = Synapses(G_inh, G_readout, 'w = 1 : 1', on_pre=on_pre_inh, method='euler')\n\n #-------initialization of neuron parameters----------\n G_ex.v = '13.5+1.5*rand()'\n G_inh.v = '13.5+1.5*rand()'\n G_readout.v = '0'\n G_ex.g = '0'\n G_inh.g = '0'\n G_readout.g = '0'\n G_ex.h = '0'\n G_inh.h = '0'\n G_readout.h = '0'\n G_ex.tau = tau_ex\n G_inh.tau = tau_inh\n G_readout.tau = tau_read\n\n [G_ex,G_in] = base.allocate([G_ex,G_inh],10,10,20)\n\n # -------initialization of network topology and synapses parameters----------\n S_inE.connect(condition='j<0.3*N_post', p = p_inE)\n S_inI.connect(condition='j<0.3*N_post', p = p_inI)\n S_EE.connect(condition='i != j', p='0.3*exp(-((x_pre-x_post)**2+(y_pre-y_post)**2+(z_pre-z_post)**2)/R**2)')\n S_EI.connect(p='0.2*exp(-((x_pre-x_post)**2+(y_pre-y_post)**2+(z_pre-z_post)**2)/R**2)')\n S_IE.connect(p='0.4*exp(-((x_pre-x_post)**2+(y_pre-y_post)**2+(z_pre-z_post)**2)/R**2)')\n S_II.connect(condition='i != j', p='0.1*exp(-((x_pre-x_post)**2+(y_pre-y_post)**2+(z_pre-z_post)**2)/R**2)')\n S_E_readout.connect(j='i')\n S_I_readout.connect(j='i+n_ex')\n\n S_inE.w = function.gamma(A_inE, S_inE.w.shape)\n S_inI.w = function.gamma(A_inI, S_inI.w.shape)\n S_EE.w = function.gamma(A_EE, S_EE.w.shape)\n S_IE.w = function.gamma(A_IE, S_IE.w.shape)\n S_EI.w = function.gamma(A_EI, S_EI.w.shape)\n S_II.w = function.gamma(A_II, S_II.w.shape)\n\n S_EE.pre.delay = '1.5*ms'\n S_EI.pre.delay = '0.8*ms'\n S_IE.pre.delay = '0.8*ms'\n S_II.pre.delay = '0.8*ms'\n\n # ------create network-------------\n net = Network(collect())\n net.store('init')\n\n # ------run network-------------\n stimulus = TimedArray(inputs[0], dt=Dt)\n net.run(duration * Dt)\n states = net.get_states()['neurongroup_read']['v']\n net.restore('init')\n return (states, inputs[1])", "def __init__(self, numpy_rng, theano_rng=None, n_ins=784,\r\n hidden_layers_sizes=[500, 500], n_outs=10,\r\n corruption_levels=[0.1, 0.1]):\r\n\r\n self.sigmoid_layers = []\r\n self.dA_layers = []\r\n self.params = []\r\n self.n_layers = len(hidden_layers_sizes)\r\n\r\n assert self.n_layers > 0\r\n\r\n if not theano_rng:\r\n theano_rng = RandomStreams(numpy_rng.randint(2 ** 30))\r\n # allocate symbolic variables for the data\r\n self.x = T.matrix('x') # the data is presented as rasterized images\r\n self.y = T.ivector('y') # the labels are presented as 1D vector of\r\n # [int] labels\r\n\r\n # The SdA is an MLP, for which all weights of intermediate layers\r\n # are shared with a different denoising autoencoders\r\n # We will first construct the SdA as a deep multilayer perceptron,\r\n # and when constructing each sigmoidal layer we also construct a\r\n # denoising autoencoder that shares weights with that layer\r\n # During pretraining we will train these autoencoders (which will\r\n # lead to chainging the weights of the MLP as well)\r\n # During finetunining we will finish training the SdA by doing\r\n # stochastich gradient descent on the MLP\r\n\r\n for i in xrange(self.n_layers):\r\n # construct the sigmoidal layer\r\n\r\n # the size of the input is either the number of hidden units of\r\n # the layer below or the input size if we are on the first layer\r\n if i == 0:\r\n input_size = n_ins\r\n else:\r\n input_size = hidden_layers_sizes[i - 1]\r\n\r\n # the input to this layer is either the activation of the hidden\r\n # layer below or the input of the SdA if you are on the first\r\n # layer\r\n if i == 0:\r\n layer_input = self.x\r\n else:\r\n layer_input = self.sigmoid_layers[-1].output\r\n\r\n sigmoid_layer = HiddenLayer(rng=numpy_rng,\r\n input=layer_input,\r\n n_in=input_size,\r\n n_out=hidden_layers_sizes[i],\r\n activation=T.nnet.sigmoid)\r\n # add the layer to our list of layers\r\n self.sigmoid_layers.append(sigmoid_layer)\r\n # its arguably a philosophical question...\r\n # but we are going to only declare that the parameters of the\r\n # sigmoid_layers are parameters of the StackedDAA\r\n # the visible biases in the dA are parameters of those\r\n # dA, but not the SdA\r\n self.params.extend(sigmoid_layer.params)\r\n\r\n # Construct a denoising autoencoder that shared weights with this\r\n # layer\r\n dA_layer = dA(numpy_rng=numpy_rng,\r\n theano_rng=theano_rng,\r\n input=layer_input,\r\n n_visible=input_size,\r\n n_hidden=hidden_layers_sizes[i],\r\n W=sigmoid_layer.W,\r\n bhid=sigmoid_layer.b)\r\n self.dA_layers.append(dA_layer)\r\n\r\n # We now need to add a logistic layer on top of the MLP\r\n self.logLayer = LogisticRegression(\r\n input=self.sigmoid_layers[-1].output,\r\n n_in=hidden_layers_sizes[-1], n_out=n_outs)\r\n\r\n self.params.extend(self.logLayer.params)\r\n # construct a function that implements one step of finetunining\r\n\r\n # compute the cost for second phase of training,\r\n # defined as the negative log likelihood\r\n self.finetune_cost = self.logLayer.negative_log_likelihood(self.y)\r\n # compute the gradients with respect to the model parameters\r\n # symbolic variable that points to the number of errors made on the\r\n # minibatch given by self.x and self.y\r\n self.errors = self.logLayer.errors(self.y)", "def __init__(self, input_dim=(3, 32, 32), num_filters=32, filter_size=7,\n hidden_dim=100, num_classes=10, weight_scale=1e-3, reg=0.0,\n dtype=np.float32):\n self.params = {}\n self.reg = reg\n self.dtype = dtype\n \n ############################################################################\n # TODO: Initialize weights and biases for the three-layer convolutional #\n # network. Weights should be initialized from a Gaussian with standard #\n # deviation equal to weight_scale; biases should be initialized to zero. #\n # All weights and biases should be stored in the dictionary self.params. #\n # Store weights and biases for the convolutional layer using the keys 'W1' #\n # and 'b1'; use keys 'W2' and 'b2' for the weights and biases of the #\n # hidden affine layer, and keys 'W3' and 'b3' for the weights and biases #\n # of the output affine layer. #\n ############################################################################\n C, H, W = input_dim\n self.params['W1'] = np.random.randn(num_filters, C, filter_size, filter_size) * weight_scale\n self.params['b1'] = np.zeros(num_filters)\n self.params['W2'] = np.random.randn( num_filters * (0.5 * H) * (0.5 * W), hidden_dim) * weight_scale # * sqrt(2.0/n)\n self.params['b2'] = np.zeros(hidden_dim)\n self.params['W3'] = np.random.randn(hidden_dim, num_classes) * weight_scale # * sqrt(2.0/n)\n self.params['b3'] = np.zeros(num_classes)\n ############################################################################\n # END OF YOUR CODE #\n ############################################################################\n\n for k, v in self.params.iteritems():\n self.params[k] = v.astype(dtype)", "def inference_fconv_m2i(addmotion=False, alpha=1.,input_shape=[None, 22,22,10,1],\n input_shape_m=[None, 22,22,10,3],\n n_filters=[1, 32, 32, 32],\n filter_sizes=[3, 2, 3, 2],\n corruption=False):\n \n # input to the network\n x = tf.placeholder(\n tf.float32, input_shape, name='x')\n m = tf.placeholder(\n tf.float32, input_shape_m, name='m')\n t = tf.placeholder(\n tf.float32, input_shape, name='t')\n keep_prob = tf.placeholder(tf.float32, name='keep_prob') #dropout (keep probability)\n \n encoder_i = []\n encoder_m = []\n encoder_main = []\n shapes_main = []\n shapes_i = []\n shapes_m = []\n \n #keep_prob=1.\n ### BRANCH 3d images\n '''\n with tf.variable_scope('img_conv1_1') as scope:\n shapes_i.append(x.get_shape().as_list())\n nfeaturemap = 3\n W = weight_variable([2, 2, 2, 1, nfeaturemap])\n b = bias_variable([nfeaturemap])\n output = tf.nn.relu(conv3d(x, W) + b)\n encoder_i.append(W)\n input_nfeaturemap = nfeaturemap\n current_input = output\n \n '''\n current_input = x\n input_nfeaturemap = 1\n #current_input = tf.multiply(current_input, m,)\n if addmotion:\n current_input = tf.concat([current_input, m], axis=4)\n input_nfeaturemap += 3\n \n with tf.variable_scope('img_conv1_1') as scope:\n shapes_i.append(current_input.get_shape().as_list())\n nfeaturemap = 128\n W = weight_variable([3, 3, 3, input_nfeaturemap, nfeaturemap])\n b = bias_variable([nfeaturemap])\n output = tf.nn.elu(conv3d(current_input, W) + b)\n encoder_i.append(W)\n input_nfeaturemap = nfeaturemap\n current_input = output\n \n current_input = tf.nn.dropout(current_input, keep_prob, [tf.shape(x)[0],1,1,1,input_nfeaturemap])\n #current_input = max_pool_2x2(current_input)\n #input_nfeaturemap = 1\n if addmotion:\n current_input = tf.concat([current_input, m], axis=4)\n input_nfeaturemap += 3\n \n with tf.variable_scope('img_conv1_2') as scope:\n shapes_i.append(current_input.get_shape().as_list())\n nfeaturemap = 64\n W = weight_variable([2, 2, 2, input_nfeaturemap, nfeaturemap])\n b = bias_variable([nfeaturemap])\n output = tf.nn.elu(conv3d(current_input, W) + b)\n encoder_i.append(W)\n input_nfeaturemap = nfeaturemap\n current_input = output\n \n current_input = tf.nn.dropout(current_input, keep_prob, [tf.shape(x)[0],1,1,1,input_nfeaturemap])\n if addmotion:\n current_input = tf.concat([current_input, m], axis=4)\n input_nfeaturemap += 3\n \n \n with tf.variable_scope('img_conv1_3') as scope:\n shapes_i.append(current_input.get_shape().as_list())\n nfeaturemap = 16\n W = weight_variable([2, 2, 2, input_nfeaturemap, nfeaturemap])\n b = bias_variable([nfeaturemap])\n output = tf.nn.elu(conv3d(current_input, W) + b)\n encoder_i.append(W)\n input_nfeaturemap = nfeaturemap\n current_input = output\n \n # resize upsampling\n #current_input = resize_volumes(current_input, 2, 2, 2) \n if addmotion:\n current_input = tf.concat([current_input, m], axis=4)\n input_nfeaturemap += 3\n \n with tf.variable_scope('deconv_m_2') as scope:\n shapes_i.append(current_input.get_shape().as_list())\n nfeaturemap = 3\n W = weight_variable([1, 1, 1, input_nfeaturemap, nfeaturemap])\n b = bias_variable([nfeaturemap])\n output = conv3d(current_input, W) + b\n encoder_m.append(W)\n #input_nfeaturemap = nfeaturemap\n m_hat = output\n \n with tf.variable_scope('img_conv1_3') as scope:\n shapes_i.append(current_input.get_shape().as_list())\n nfeaturemap = 1\n W = weight_variable([1, 1, 1, input_nfeaturemap, nfeaturemap])\n b = bias_variable([nfeaturemap])\n output = conv3d(current_input, W) + b\n encoder_i.append(W)\n input_nfeaturemap = nfeaturemap\n y = output\n \n \n \n \n #current_input = tf.concat([branch_image, branch_motion], axis=4)\n #input_nfeaturemap = 128\n #current_input = tf.multiply(branch_image,branch_motion)\n #print tf.shape(current_input)[-1]\n #tf.shape(current_input)[-1]\n #\n \n # Max pooling\n #current_input = max_pool_2x2(current_input)\n #\n ''' \n with tf.variable_scope('conv3_2') as scope:\n shapes_main.append(current_input.get_shape().as_list())\n nfeaturemap = 16\n W = weight_variable([1, 1, 1, input_nfeaturemap, nfeaturemap])\n b = bias_variable([nfeaturemap])\n output = tf.nn.relu(conv3d(current_input, W) + b)\n encoder_main.append(W)\n input_nfeaturemap = nfeaturemap\n current_input = output\n \n with tf.variable_scope('deconv_i_1') as scope:\n shapes_i.append(z.get_shape().as_list())\n nfeaturemap = 64\n W = weight_variable([3, 3, 3, z_input_nfeaturemap, nfeaturemap])\n b = bias_variable([nfeaturemap])\n output = tf.nn.relu(conv3d(z, W) + b)\n encoder_i.append(W)\n input_nfeaturemap = nfeaturemap\n current_input = output\n \n #current_input = tf.nn.dropout(current_input, keep_prob, [tf.shape(x)[0],1,1,1,input_nfeaturemap])\n \n with tf.variable_scope('deconv_i_2') as scope:\n shapes_i.append(current_input.get_shape().as_list())\n nfeaturemap = 1\n W = weight_variable([1, 1, 1, input_nfeaturemap, nfeaturemap])\n b = bias_variable([nfeaturemap])\n output = conv3d(current_input, W) + b\n encoder_i.append(W)\n input_nfeaturemap = nfeaturemap\n y = output\n \n with tf.variable_scope('deconv_m_1') as scope:\n shapes_i.append(z.get_shape().as_list())\n nfeaturemap = 64\n W = weight_variable([3, 3, 3, z_input_nfeaturemap, nfeaturemap])\n b = bias_variable([nfeaturemap])\n output = tf.nn.relu(conv3d(z, W) + b)\n encoder_m.append(W)\n input_nfeaturemap = nfeaturemap\n current_input = output\n \n \n \n '''\n \n loss_m = tf.reduce_mean(tf.square(m-m_hat))\n loss_i = tf.reduce_mean(tf.square(t-y))\n cost = alpha*loss_i #+ loss_m\n\n # %%\n return {'x': x, 't':t, 'm': m, 'm_hat':m_hat, 'y': y, 'cost': cost, 'loss_i':loss_i, 'loss_m':loss_m, 'keep_prob': keep_prob, 'encoder_main':encoder_main, 'encoder_i':encoder_i, 'encoder_m':encoder_m}", "def _create_encoder(self):\n\n def _init_weights(layer):\n \"\"\"Initializes the weights of a layer based on type.\"\"\"\n if isinstance(layer, (nn.Conv2d, nn.Linear)):\n torch.nn.init.xavier_uniform_(layer.weight)\n try:\n # Some layers may not have biases, so catch the exception and pass.\n layer.bias.data.fill_(0.0)\n except AttributeError:\n pass\n\n kernel_size = 5\n pad = 2\n input_channels = 1\n first_conv_channels = 6\n second_conv_channels = 16\n max_pool_kernel = 2\n linear_size = 120\n n_pixels = 7\n\n encoder = nn.Sequential(\n nn.Conv2d(\n input_channels, first_conv_channels, kernel_size, padding=pad),\n nn.BatchNorm2d(first_conv_channels),\n nn.ReLU(),\n nn.MaxPool2d(max_pool_kernel),\n nn.Conv2d(\n first_conv_channels, second_conv_channels, kernel_size,\n padding=pad),\n nn.BatchNorm2d(second_conv_channels),\n nn.ReLU(),\n nn.MaxPool2d(max_pool_kernel),\n utils.Flatten(),\n nn.Linear(n_pixels * n_pixels * self.n_digits * second_conv_channels,\n linear_size),\n nn.BatchNorm1d(linear_size),\n nn.ReLU(),\n nn.Linear(linear_size, self.embedding_dim),\n nn.Linear(self.embedding_dim, self.n_classes, bias=False),\n )\n\n encoder.apply(_init_weights)\n\n # This is the empirical approximation for initialization the vMF\n # distributions for each class in the final layer.\n if self.use_vmf:\n utils.vmf_class_weight_init(encoder[-1].weight, self.kappa_confidence,\n self.embedding_dim)\n\n return encoder", "def build_train_augmentor(cfg: CfgNode, keep_uncropped: bool = False, keep_non_smoothed: bool = False):\n aug_list = []\n \n names = cfg.AUGMENTOR.ADDITIONAL_TARGETS_NAME\n types = cfg.AUGMENTOR.ADDITIONAL_TARGETS_TYPE\n if names is None:\n additional_targets = None\n else:\n assert len(names) == len(types)\n additional_targets = {}\n for i in range(len(names)):\n additional_targets[names[i]] = types[i]\n\n #1. rotate\n if cfg.AUGMENTOR.ROTATE.ENABLED:\n aug_list.append(\n Rotate(rot90=cfg.AUGMENTOR.ROTATE.ROT90,\n p=cfg.AUGMENTOR.ROTATE.P,\n additional_targets=additional_targets))\n\n #2. rescale\n if cfg.AUGMENTOR.RESCALE.ENABLED:\n aug_list.append(\n Rescale(p=cfg.AUGMENTOR.RESCALE.P,\n additional_targets=additional_targets))\n\n #3. flip\n if cfg.AUGMENTOR.FLIP.ENABLED:\n aug_list.append(\n Flip(do_ztrans=cfg.AUGMENTOR.FLIP.DO_ZTRANS,\n p=cfg.AUGMENTOR.FLIP.P, \n additional_targets=additional_targets))\n\n #4. elastic\n if cfg.AUGMENTOR.ELASTIC.ENABLED:\n aug_list.append(\n Elastic(alpha=cfg.AUGMENTOR.ELASTIC.ALPHA, \n sigma=cfg.AUGMENTOR.ELASTIC.SIGMA, \n p=cfg.AUGMENTOR.ELASTIC.P,\n additional_targets=additional_targets))\n\n #5. grayscale\n if cfg.AUGMENTOR.GRAYSCALE.ENABLED:\n aug_list.append(\n Grayscale(p=cfg.AUGMENTOR.GRAYSCALE.P,\n additional_targets=additional_targets))\n\n #6. missingparts\n if cfg.AUGMENTOR.MISSINGPARTS.ENABLED:\n aug_list.append(\n MissingParts(iterations=cfg.AUGMENTOR.MISSINGPARTS.ITER,\n p=cfg.AUGMENTOR.MISSINGPARTS.P,\n additional_targets=additional_targets))\n\n #7. missingsection\n if cfg.AUGMENTOR.MISSINGSECTION.ENABLED and not cfg.DATASET.DO_2D:\n aug_list.append(\n MissingSection(\n num_sections=cfg.AUGMENTOR.MISSINGSECTION.NUM_SECTION,\n p=cfg.AUGMENTOR.MISSINGSECTION.P, \n additional_targets=additional_targets))\n\n #8. misalignment\n if cfg.AUGMENTOR.MISALIGNMENT.ENABLED and not cfg.DATASET.DO_2D:\n aug_list.append(\n MisAlignment( \n displacement=cfg.AUGMENTOR.MISALIGNMENT.DISPLACEMENT,\n rotate_ratio=cfg.AUGMENTOR.MISALIGNMENT.ROTATE_RATIO,\n p=cfg.AUGMENTOR.MISALIGNMENT.P,\n additional_targets=additional_targets))\n\n #9. motion-blur\n if cfg.AUGMENTOR.MOTIONBLUR.ENABLED:\n aug_list.append(\n MotionBlur( \n sections=cfg.AUGMENTOR.MOTIONBLUR.SECTIONS, \n kernel_size=cfg.AUGMENTOR.MOTIONBLUR.KERNEL_SIZE,\n p=cfg.AUGMENTOR.MOTIONBLUR.P,\n additional_targets=additional_targets))\n\n #10. cut-blur\n if cfg.AUGMENTOR.CUTBLUR.ENABLED:\n aug_list.append(\n CutBlur(length_ratio=cfg.AUGMENTOR.CUTBLUR.LENGTH_RATIO, \n down_ratio_min=cfg.AUGMENTOR.CUTBLUR.DOWN_RATIO_MIN,\n down_ratio_max=cfg.AUGMENTOR.CUTBLUR.DOWN_RATIO_MAX,\n downsample_z=cfg.AUGMENTOR.CUTBLUR.DOWNSAMPLE_Z,\n p=cfg.AUGMENTOR.CUTBLUR.P,\n additional_targets=additional_targets))\n\n #11. cut-noise\n if cfg.AUGMENTOR.CUTNOISE.ENABLED:\n aug_list.append(\n CutNoise(length_ratio=cfg.AUGMENTOR.CUTNOISE.LENGTH_RATIO, \n scale=cfg.AUGMENTOR.CUTNOISE.SCALE,\n p=cfg.AUGMENTOR.CUTNOISE.P, \n additional_targets=additional_targets))\n\n # compose the list of transforms\n augmentor = Compose(transforms=aug_list, \n input_size=cfg.MODEL.INPUT_SIZE, \n smooth=cfg.AUGMENTOR.SMOOTH,\n keep_uncropped=keep_uncropped, \n keep_non_smoothed=keep_non_smoothed,\n additional_targets=additional_targets)\n\n return augmentor", "def __init__(self, encoder_size, decoder_size, label_size):\n super(BilinearAttention, self).__init__()\n self.W = nn.Parameter(torch.zeros(label_size, decoder_size, encoder_size))\n self.u = nn.Parameter(torch.zeros(label_size, encoder_size))\n self.v = nn.Parameter(torch.zeros(label_size, decoder_size))\n self.b = nn.Parameter(torch.zeros(label_size))\n \n nn.init.xavier_uniform_(self.W)\n nn.init.xavier_uniform_(self.u)\n nn.init.xavier_uniform_(self.v)", "def run_net(inputs, **parameter):\n\n # ---- set numpy random state for each run----\n np.random.set_state(np_state)\n\n # -----parameter setting-------\n n_ex = 1600\n n_inh = int(n_ex / 4)\n n_input = MNIST_shape[1] * coding_n\n n_read = n_ex + n_inh\n\n R = parameter['R']\n f_in = parameter['f_in']\n f_EE = parameter['f_EE']\n f_EI = parameter['f_EI']\n f_IE = parameter['f_IE']\n f_II = parameter['f_II']\n\n A_EE = 60 * f_EE\n A_EI = 60 * f_EI\n A_IE = 60 * f_IE\n A_II = 60 * f_II\n A_inE = 60 * f_in\n A_inI = 60 * f_in\n\n tau_ex = parameter['tau_ex'] * coding_duration\n tau_inh = parameter['tau_inh'] * coding_duration\n tau_read = 30\n\n p_inE = parameter['p_in'] * 0.1\n p_inI = parameter['p_in'] * 0.1\n\n # ------definition of equation-------------\n neuron_in = '''\n I = stimulus(t,i) : 1\n '''\n\n neuron = '''\n tau : 1\n dv/dt = (I-v) / (tau*ms) : 1 (unless refractory)\n dg/dt = (-g)/(3*ms) : 1\n dh/dt = (-h)/(6*ms) : 1\n I = (g+h)+13.5: 1\n x : 1\n y : 1\n z : 1\n '''\n\n neuron_read = '''\n tau : 1\n dv/dt = (I-v) / (tau*ms) : 1\n dg/dt = (-g)/(3*ms) : 1 \n dh/dt = (-h)/(6*ms) : 1\n I = (g+h): 1\n '''\n\n synapse = '''\n w : 1\n '''\n\n on_pre_ex = '''\n g+=w\n '''\n\n on_pre_inh = '''\n h-=w\n '''\n\n # -----Neurons and Synapses setting-------\n Input = NeuronGroup(n_input, neuron_in, threshold='I > 0', method='euler', refractory=0 * ms,\n name='neurongroup_input')\n\n G_ex = NeuronGroup(n_ex, neuron, threshold='v > 15', reset='v = 13.5', method='euler', refractory=3 * ms,\n name='neurongroup_ex')\n\n G_inh = NeuronGroup(n_inh, neuron, threshold='v > 15', reset='v = 13.5', method='euler', refractory=2 * ms,\n name='neurongroup_in')\n\n G_readout = NeuronGroup(n_read, neuron_read, method='euler', name='neurongroup_read')\n\n S_inE = Synapses(Input, G_ex, synapse, on_pre=on_pre_ex, method='euler', name='synapses_inE')\n\n S_inI = Synapses(Input, G_inh, synapse, on_pre=on_pre_ex, method='euler', name='synapses_inI')\n\n S_EE = Synapses(G_ex, G_ex, synapse, on_pre=on_pre_ex, method='euler', name='synapses_EE')\n\n S_EI = Synapses(G_ex, G_inh, synapse, on_pre=on_pre_ex, method='euler', name='synapses_EI')\n\n S_IE = Synapses(G_inh, G_ex, synapse, on_pre=on_pre_inh, method='euler', name='synapses_IE')\n\n S_II = Synapses(G_inh, G_inh, synapse, on_pre=on_pre_inh, method='euler', name='synapses_I')\n\n S_E_readout = Synapses(G_ex, G_readout, 'w = 1 : 1', on_pre=on_pre_ex, method='euler')\n\n S_I_readout = Synapses(G_inh, G_readout, 'w = 1 : 1', on_pre=on_pre_inh, method='euler')\n\n # -------initialization of neuron parameters----------\n G_ex.v = '13.5+1.5*rand()'\n G_inh.v = '13.5+1.5*rand()'\n G_readout.v = '0'\n G_ex.g = '0'\n G_inh.g = '0'\n G_readout.g = '0'\n G_ex.h = '0'\n G_inh.h = '0'\n G_readout.h = '0'\n G_ex.tau = tau_ex\n G_inh.tau = tau_inh\n G_readout.tau = tau_read\n [G_ex, G_in] = base.allocate([G_ex, G_inh], 10, 10, 20)\n\n # -------initialization of network topology and synapses parameters----------\n S_inE.connect(condition='j<0.3*N_post', p=p_inE)\n S_inI.connect(condition='j<0.3*N_post', p=p_inI)\n S_EE.connect(condition='i != j', p='0.3*exp(-((x_pre-x_post)**2+(y_pre-y_post)**2+(z_pre-z_post)**2)/R**2)')\n S_EI.connect(p='0.2*exp(-((x_pre-x_post)**2+(y_pre-y_post)**2+(z_pre-z_post)**2)/R**2)')\n S_IE.connect(p='0.4*exp(-((x_pre-x_post)**2+(y_pre-y_post)**2+(z_pre-z_post)**2)/R**2)')\n S_II.connect(condition='i != j', p='0.1*exp(-((x_pre-x_post)**2+(y_pre-y_post)**2+(z_pre-z_post)**2)/R**2)')\n S_E_readout.connect(j='i')\n S_I_readout.connect(j='i+n_ex')\n\n S_inE.w = function.gamma(A_inE, S_inE.w.shape)\n S_inI.w = function.gamma(A_inI, S_inI.w.shape)\n S_EE.w = function.gamma(A_EE, S_EE.w.shape)\n S_IE.w = function.gamma(A_IE, S_IE.w.shape)\n S_EI.w = function.gamma(A_EI, S_EI.w.shape)\n S_II.w = function.gamma(A_II, S_II.w.shape)\n\n S_EE.pre.delay = '1.5*ms'\n S_EI.pre.delay = '0.8*ms'\n S_IE.pre.delay = '0.8*ms'\n S_II.pre.delay = '0.8*ms'\n\n # ------create network-------------\n net = Network(collect())\n net.store('init')\n\n # ------run network-------------\n stimulus = TimedArray(inputs[0], dt=Dt)\n net.run(duration * Dt)\n states = net.get_states()['neurongroup_read']['v']\n net.restore('init')\n return (states, inputs[1])", "def make_neural_net_two_layer():\n i0 = Input('i0', -1.0) # this input is immutable\n i1 = Input('i1', 0.0)\n i2 = Input('i2', 0.0)\n seed_random()\n wt1 = random_weight()\n wt2 = random_weight()\n wt3 = random_weight()\n wt4 = random_weight()\n wt5 = random_weight()\n wt6 = random_weight()\n\t\n w1A = Weight('w1A', wt1)\n w2A = Weight('w2A', wt2)\n w1B = Weight('w1B', wt3)\n w2B = Weight('w2B', wt4)\n wA = Weight('wA', -1)\n wB = Weight('wB', -1)\n wAC = Weight('wAC', wt5)\n wBC = Weight('wBC', wt6)\n wC = Weight('wC', -1)\n\n # Inputs must be in the same order as their associated weights\n A = Neuron('A', [i1,i2,i0], [w1A,w2A,wA])\n B = Neuron('B', [i1,i2,i0], [w1B,w2B,wB])\n C = Neuron('C', [A,B,i0], [wAC,wBC,wC])\n P = PerformanceElem(C, 0.0)\n\n net = Network(P,[A, B, C])\n return net", "def encoder(x, h_dim, reuse=False, reorder_layers=True, training=False):\n if(reorder_layers):\n block = conv_block_reordered\n else:\n block = conv_block\n with tf.variable_scope('encoder', reuse=reuse):\n net = block(x, h_dim, name='conv_1', training=training, block_idx=0)\n net = block(net, h_dim, name='conv_2', training=training, block_idx=1)\n net = block(net, h_dim, name='conv_3', training=training, block_idx=2)\n net = block(net, h_dim, name='conv_4', training=training, block_idx=3)\n embeddings = tf.contrib.layers.flatten(net)\n return embeddings", "def build_cnn(input_var=None):\n\n # input layer\n network = lasagne.layers.InputLayer(\n shape=(\n None,\n 1,\n 128,\n 129\n ),\n input_var=input_var\n )\n\n # conv\n network = lasagne.layers.Conv2DLayer(\n lasagne.layers.batch_norm(network), # Batch norm on incoming\n num_filters=32, # Number of convolution filters to use\n filter_size=(5, 5),\n stride=(1, 1), # Stride fo (1,1)\n pad='same', # Keep output size same as input\n nonlinearity=lasagne.nonlinearities.leaky_rectify, #rectify, # ReLU\n W=lasagne.init.GlorotUniform() # W initialization\n )\n\n # conv\n #network = lasagne.layers.Conv2DLayer(\n #lasagne.layers.batch_norm(network), # Batch norm on incoming\n #num_filters=32, # Number of convolution filters to use\n #filter_size=(5, 5),\n #stride=(1, 1), # Stride fo (1,1)\n #pad='same', # Keep output size same as input\n #nonlinearity=lasagne.nonlinearities.leaky_rectify, #rectify, # ReLU\n #W=lasagne.init.GlorotUniform() # W initialization\n #)\n\n # pool (2x2 max pool)\n network = lasagne.layers.MaxPool2DLayer(\n network, pool_size=(2, 2)\n )\n\n # conv\n network = lasagne.layers.Conv2DLayer(\n lasagne.layers.batch_norm(network), # Batch norm on incoming\n num_filters=32, # Number of convolution filters to use\n filter_size=(3, 3),\n stride=(1, 1), # Stride fo (1,1)\n pad='same', # Keep output size same as input\n nonlinearity=lasagne.nonlinearities.leaky_rectify, #rectify, # ReLU\n W=lasagne.init.GlorotUniform() # W initialization\n )\n\n # conv\n #network = lasagne.layers.Conv2DLayer(\n #lasagne.layers.batch_norm(network), # Batch norm on incoming\n #num_filters=32, # Number of convolution filters to use\n #filter_size=(3, 3),\n #stride=(1, 1), # Stride fo (1,1)\n #pad='same', # Keep output size same as input\n #nonlinearity=lasagne.nonlinearities.leaky_rectify, #rectify, # ReLU\n #W=lasagne.init.GlorotUniform() # W initialization\n #)\n\n # pool (2x2 max pool)\n network = lasagne.layers.MaxPool2DLayer(\n network, pool_size=(2, 2)\n )\n\n # Fully-connected layer of 256 units with 50% dropout on its inputs\n network = lasagne.layers.DenseLayer(\n lasagne.layers.dropout(network, p=.5),\n num_units=256,\n nonlinearity=lasagne.nonlinearities.leaky_rectify, #rectify, # ReLU\n W=lasagne.init.HeUniform() # W initialization\n )\n\n # Finally add a 1-unit softmax output layer\n network = lasagne.layers.DenseLayer(\n network,\n num_units=1,\n nonlinearity=lasagne.nonlinearities.sigmoid\n )\n\n return network", "def run_finetuning(experiment,\n X_train, y_train, X_valid, y_valid, X_test, y_test,\n model_path, prev_model_1_path, prev_model_2_path,\n code_size_1=1000, code_size_2=600):\n\n # Hyperparameters\n learning_rate = 0.0003\n dropout_1 = 0.4\n dropout_2 = 0.6\n # initial_momentum = 0.1\n # final_momentum = 0.9 # Increase momentum along epochs to avoid fluctiations\n # saturate_momentum = 100\n\n training_iters = 150\n start_saving_at = 5\n batch_size = 32\n n_classes = 2\n\n if os.path.isfile(model_path) or \\\n os.path.isfile(model_path + \".meta\"):\n return\n\n # Convert output to one-hot encoding\n y_train = np.array([to_softmax(n_classes, y) for y in y_train])\n y_valid = np.array([to_softmax(n_classes, y) for y in y_valid])\n y_test = np.array([to_softmax(n_classes, y) for y in y_test])\n\n # Load pretrained encoder weights\n ae1 = load_ae_encoder(X_train.shape[1], code_size_1, prev_model_1_path)\n ae2 = load_ae_encoder(code_size_1, code_size_2, prev_model_2_path)\n\n # Initialize NN model with the encoder weights\n model = nn(X_train.shape[1], n_classes, [\n {\"size\": code_size_1, \"actv\": tf.nn.tanh},\n {\"size\": code_size_2, \"actv\": tf.nn.tanh},\n ], [\n {\"W\": ae1[\"W_enc\"], \"b\": ae1[\"b_enc\"]},\n {\"W\": ae2[\"W_enc\"], \"b\": ae2[\"b_enc\"]},\n ])\n\n # Place GD + momentum optimizer\n # model[\"momentum\"] = tf.placeholder(\"float32\")\n optimizer = tf.train.AdamOptimizer(learning_rate, beta1=0.9).minimize(model[\"cost\"])\n\n # Place Adam optimizer\n # optimizer = tf.train.AdamOptimizer(learning_rate).minimize(model[\"cost\"]) # cross entropy\n\n # Make prediction and Compute accuracies\n logits = model[\"output\"]\n pred = tf.argmax(model[\"output\"], 1)\n correct_prediction = tf.equal(pred, tf.argmax(model[\"expected\"], 1))\n accuracy = tf.reduce_mean(tf.cast(correct_prediction, \"float\"))\n\n # Store prediction\n best_prediction = None\n best_logits = None\n acc_list = np.zeros((training_iters, 3))\n loss_list = np.zeros((training_iters, 3))\n auc_list = np.zeros((training_iters, 3))\n\n # Initialize Tensorflow session\n init = tf.group(tf.global_variables_initializer(), tf.local_variables_initializer())\n with tf.Session() as sess:\n sess.run(init)\n\n # Define model saver\n saver = tf.train.Saver(model[\"params\"], write_version=tf.train.SaverDef.V2)\n\n # Initialize with an absurd cost, accuracy, auc for model selection\n best_acc = 0.0\n\n # Iterate Epochs\n for epoch in range(training_iters):\n\n # randomly shuffle data\n index = np.arange(X_train.shape[0])\n random.shuffle(index)\n\n X_train = X_train[index,]\n y_train = y_train[index]\n\n # exclude case that are all 1 or all 0\n if 0 in np.sum(y_train, axis=0):\n continue\n\n # Break training set into batches\n batches = range(len(X_train) // batch_size)\n costs = np.zeros((len(batches), 3))\n accs = np.zeros((len(batches), 3))\n AUCs = np.zeros((len(batches), 3))\n prediction = None\n logit = None\n\n # Compute momentum saturation\n # alpha = float(epoch) / float(saturate_momentum)\n # if alpha < 0.:\n # alpha = 0.\n # if alpha > 1.:\n # alpha = 1.\n # momentum = initial_momentum * (1 - alpha) + alpha * final_momentum\n\n for ib in batches:\n\n # Compute start and end of batch from training set data array\n from_i = ib * batch_size\n to_i = (ib + 1) * batch_size\n\n # Select current batch\n batch_xs, batch_ys = X_train[from_i:to_i], y_train[from_i:to_i]\n if 0 in np.sum(batch_ys, axis=0):\n continue\n\n # Run optimization and retrieve training cost and accuracy\n _, cost_train, acc_train, true_y, pred_y = sess.run(\n [optimizer, model[\"cost\"], accuracy, model['expected'], model['output']],\n feed_dict={\n model[\"input\"]: batch_xs,\n model[\"expected\"]: batch_ys,\n model[\"dropouts\"][0]: dropout_1,\n model[\"dropouts\"][1]: dropout_2\n }\n )\n # Compute AUC score\n AUC_train = roc_auc_score(np.argmax(true_y, 1), pred_y[:, 1])\n\n # Compute validation cost and accuracy\n cost_valid, acc_valid, true_y, pred_y = sess.run(\n [model[\"cost\"], accuracy, model['expected'], model['output']],\n feed_dict={\n model[\"input\"]: X_valid,\n model[\"expected\"]: y_valid,\n model[\"dropouts\"][0]: 1.0,\n model[\"dropouts\"][1]: 1.0\n }\n )\n AUC_valid = roc_auc_score(np.argmax(true_y, 1), pred_y[:, 1])\n\n # Compute test cost and accuracy\n logit, prediction, cost_test, acc_test, true_y, pred_y = sess.run(\n [logits, pred, model[\"cost\"], accuracy, model['expected'], model['output']],\n feed_dict={\n model[\"input\"]: X_test,\n model[\"expected\"]: y_test,\n model[\"dropouts\"][0]: 1.0,\n model[\"dropouts\"][1]: 1.0\n }\n )\n AUC_test = roc_auc_score(np.argmax(true_y, 1), pred_y[:, 1])\n\n costs[ib] = [cost_train, cost_valid, cost_test]\n accs[ib] = [acc_train, acc_valid, acc_test]\n AUCs[ib] = [AUC_train, AUC_valid, AUC_test]\n\n # Compute the average costs from all batches\n costs = costs.mean(axis=0)\n cost_train, cost_valid, cost_test = costs\n loss_list[epoch] = cost_train, cost_valid, cost_test\n\n # Compute the average accuracy from all batches\n accs = accs.mean(axis=0)\n acc_train, acc_valid, acc_test = accs\n acc_list[epoch] = acc_train, acc_valid, acc_test\n\n # Compute the average AUC for all batches\n AUCs = AUCs.mean(axis=0)\n AUC_train, AUC_valid, AUC_test = AUCs\n auc_list[epoch] = AUC_train, AUC_valid, AUC_test\n\n # Pretty print training info\n print(format_config(\n \"Exp={experiment}, Model=mlp, Iter={epoch:5d}, Acc={acc_train:.6f} {acc_valid:.6f} {acc_test:.6f}, \\\n AUC={AUC_train:.6f} {AUC_valid:.6f} {AUC_test:.6f}\",\n {\n \"experiment\": experiment,\n \"epoch\": epoch,\n \"acc_train\": acc_train,\n \"acc_valid\": acc_valid,\n \"acc_test\": acc_test,\n \"AUC_train\": AUC_train,\n \"AUC_valid\": AUC_valid,\n \"AUC_test\": AUC_test\n }\n ))\n\n # Save better model if optimization achieves a lower accuracy\n # and avoid initial epochs because of the fluctuations\n if acc_valid > best_acc and epoch > start_saving_at:\n best_prediction = prediction\n best_logits = logit\n print(\"Saving better model\")\n saver.save(sess, model_path)\n best_acc = acc_valid\n return best_prediction, best_logits, acc_list, loss_list, auc_list", "def __init__(self, embed_size, dropout=0.5, image_model='resnet101', simple=False, pretrained=True):\n super(EncoderCNN, self).__init__()\n resnet = globals()[image_model](pretrained=pretrained)\n modules = list(resnet.children())[:-2] # delete the last fc layer.\n self.resnet = nn.Sequential(*modules)\n \n self.linear = nn.Sequential(nn.Conv2d(resnet.fc.in_features, embed_size, kernel_size=1, padding=0),\n nn.Dropout2d(dropout))\n\n self.simple = simple\n if simple:\n self.avgpool = nn.AdaptiveAvgPool2d((1, 1))", "def soft_update_target_network(self):\n \n pars_behavior = self.model.get_weights() # these have form [W1, b1, W2, b2, ..], Wi = weights of layer i\n pars_target = self.target_model.get_weights() # bi = biases in layer i\n \n for par_behavior,par_target in zip(pars_behavior,pars_target):\n par_target = par_target*(1-self.tau) + par_behavior*self.tau\n pars_target[ctr] = par_target\n\n self.target_model.set_weights(pars_target)", "def Transition(inputs, num_outchannels, training, data_format):\n trans_output = conv2d_fixed_padding(\n inputs=inputs, filters=num_outchannels,\n kernel_size=3, strides=2, data_format=data_format)\n trans_output = batch_norm(trans_output, training, data_format)\n trans_output = tf.nn.relu(trans_output)\n return trans_output", "def _construct_ae(self):\n if self.joint_train:\n self.critic.trainable = False\n autoencoder = Model(self.encoder.input,\n [self.decoder(self.encoder.output),\n self.critic(self.encoder.output)])\n autoencoder.compile(optimizer=self.ae_opt(lr=self.ae_learning_rate),\n loss=['binary_crossentropy',\n 'binary_crossentropy'],\n loss_weights=[self.reconst_weight,\n self.adv_weight])\n else:\n autoencoder = Model(self.encoder.input,\n self.decoder(self.encoder.output))\n autoencoder.compile(optimizer=self.ae_opt(lr=self.ae_learning_rate),\n loss='mse')\n return autoencoder", "def amoebanetd(num_classes: int = 10,\n num_layers: int = 4,\n num_filters: int = 512,\n ) -> nn.Sequential:\n channel = num_filters // 4\n\n def make_layer(channel: int,\n num_layers: int,\n genotype: Genotype,\n ) -> Tuple[nn.Sequential, int]:\n n = num_layers\n channel_prev_prev, channel_prev, channel_curr = channel, channel, channel\n cells = []\n\n reduction_prev = False\n reduction = True\n channel_curr *= 2\n cell, multiplier = make_cell(genotype, channel_prev_prev,\n channel_prev, channel_curr, reduction, reduction_prev)\n channel_prev_prev, channel_prev = channel_prev, multiplier * channel_curr\n cells.append(cell)\n\n reduction_prev = True\n reduction = True\n channel_curr *= 2\n cell, multiplier = make_cell(genotype, channel_prev_prev,\n channel_prev, channel_curr, reduction, reduction_prev)\n channel_prev_prev, channel_prev = channel_prev, multiplier * channel_curr\n cells.append(cell)\n\n reduction = False\n reduction_prev = True\n for _ in range(n):\n cell, multiplier = make_cell(genotype, channel_prev_prev,\n channel_prev, channel_curr, reduction, reduction_prev)\n channel_prev_prev, channel_prev = channel_prev, multiplier * channel_curr\n cells.append(cell)\n reduction_prev = False\n\n reduction_prev = False\n reduction = True\n channel_curr *= 2\n cell, multiplier = make_cell(genotype, channel_prev_prev,\n channel_prev, channel_curr, reduction, reduction_prev)\n channel_prev_prev, channel_prev = channel_prev, multiplier * channel_curr\n cells.append(cell)\n\n reduction = False\n reduction_prev = True\n for _ in range(n):\n cell, multiplier = make_cell(genotype, channel_prev_prev,\n channel_prev, channel_curr, reduction, reduction_prev)\n channel_prev_prev, channel_prev = channel_prev, multiplier * channel_curr\n cells.append(cell)\n reduction_prev = False\n\n reduction_prev = False\n reduction = True\n channel_curr *= 2\n cell, multiplier = make_cell(genotype, channel_prev_prev,\n channel_prev, channel_curr, reduction, reduction_prev)\n channel_prev_prev, channel_prev = channel_prev, multiplier * channel_curr\n cells.append(cell)\n\n reduction = False\n reduction_prev = True\n for _ in range(n):\n cell, multiplier = make_cell(genotype, channel_prev_prev,\n channel_prev, channel_curr, reduction, reduction_prev)\n channel_prev_prev, channel_prev = channel_prev, multiplier * channel_curr\n cells.append(cell)\n reduction_prev = False\n\n return nn.Sequential(*cells), channel_prev\n\n cells, channel_prev = make_layer(channel, num_layers, amoebanetd_genotype)\n\n model = nn.Sequential(OrderedDict([\n ('stem', make_stem(channel)),\n ('cells', cells),\n ('fin', Classifier(channel_prev, num_classes))\n ]))\n\n return flatten_sequential(model)", "def soft_update_target_network(self):\n \n pars_behavior = self.model.get_weights() # these have form [W1, b1, W2, b2, ..], Wi = weights of layer i\n pars_target = self.target_model.get_weights() # bi = biases in layer i\n \n ctr = 0\n for par_behavior,par_target in zip(pars_behavior,pars_target):\n par_target = par_target*(1-self.tau) + par_behavior*self.tau\n pars_target[ctr] = par_target\n ctr += 1\n\n self.target_model.set_weights(pars_target)", "def _build(self):\n self.autoencoder = autoencoder(\n images = self.images,\n name = 'autoencoder')\n # self.hash = self.autoencoder.hash\n self.z = tf.placeholder(tf.float32, shape = [None, CODEWORD_LENGTH], \n name = 'z-layer') \n with tf.variable_scope('latent_space') as scope:\n for i in range(CODEWORD_LENGTH):\n tf.summary.histogram('z_' + str(i) ,self.z[:,i]) \n tf.summary.histogram('codeword_' + str(i), self.autoencoder.codeword[:,i])\n self.generator = gan_generator( z = self.z,\n input_params = self.autoencoder.params,\n name = 'gan_generator')\n self.discriminator = gan_discriminator ( images = self.images,\n generation = self.generator.generation,\n name = 'gan_discriminator' )\n self.generator.cook(fake = self.discriminator.fake)\n mean_z, var_z = tf.nn.moments(self.z, axes=[0])\n mean_codeword, var_codeword = tf.nn.moments(self.autoencoder.codeword, axes = [0])\n\n with tf.variable_scope ('divergence') as scope:\n tf.summary.scalar( tensor = tf.nn.l2_loss(mean_z-mean_codeword), name = 'mean divergence')\n tf.summary.scalar( tensor = tf.nn.l2_loss(var_z-var_codeword), name = 'variance divergence')\n # divergence = self.hash ", "def create_network(outfname_train, outfname_deploy, N_conv_layers=3, N_fully_connected_layers=3, batch_size_train=100,batch_size_test=100, source_train='datatrain', source_test='datatest', num_output_conv=32, kernel_size=3, weight_std_conv=0.01, activation='relu', num_output_fully_connected=64, weight_std_fully_connected=0.01, do_batchnorm=1, do_last_batchnorm=1, scale=1,shift=0, weight_std_affine=0, use_softmax=0, num_classes=3, input_dim_1=1,input_dim_2=3, input_dim_3=32, input_dim_4=32, use_lowrank=1, T_dimension=None, softmax_weight=1, lowrank_weight=1, data_type='lmdb'):\n\n if T_dimension==None:\n T_dimension = num_classes\n \n train_txt = \"\"\n deploy_txt = \"\"\n\n train_txt += data_layer(name='data_layer', source_train=source_train, batch_size_train=batch_size_train, source_test=source_test, batch_size_test=batch_size_test, data_type=data_type)\n\n deploy_txt += deploy_data_layer(name='data_layer', input_dim_1=input_dim_1, input_dim_2=input_dim_2, input_dim_3=input_dim_3, input_dim_4=input_dim_4)\n\n last_name = 'data'\n\n ####### CONVOLUTIONAL LAYERS\n for i in range(N_conv_layers):\n conv_name = 'conv%i' % (i+1)\n top = conv_name\n\n conv_txt = convolution_layer(conv_name, last_name, num_output=num_output_conv, kernel_size=kernel_size, weight_std=weight_std_conv)\n\n train_txt += conv_txt\n deploy_txt += conv_txt\n \n if activation == 'pool':\n pool_name = 'pool%i' % (i+1)\n activation_txt = pooling_layer(pool_name, conv_name)\n last_name = pool_name\n elif activation == 'relu':\n relu_name = 'relu%i' % (i+1)\n activation_txt = relu_layer(relu_name, conv_name)\n last_name = conv_name\n else:\n raise Exception('Unknown activation')\n \n\n train_txt += activation_txt\n deploy_txt += activation_txt\n\n \n\n ####### FULLY CONNECTED LAYERS\n for i in range(N_fully_connected_layers):\n fully_connected_name = 'ip%i' % (i+1)\n\n fully_connected_txt = fully_connected_layer(fully_connected_name, last_name, num_output=num_output_fully_connected, weight_std=weight_std_fully_connected)\n\n relu_name = 'iprelu%i' % (i+1)\n relu_txt = relu_layer(relu_name, fully_connected_name)\n\n batchnorm_name = 'ipbn%i' % (i+1)\n\n if do_batchnorm and i<N_fully_connected_layers-1:\n batchnorm_txt_train = batchnorm_layer(batchnorm_name, fully_connected_name, use_global_stats=False, phase='TRAIN', deploy=False)\n batchnorm_txt_test = batchnorm_layer(batchnorm_name, fully_connected_name, use_global_stats=True, phase='TEST', deploy=False)\n \n batchnorm_txt_deploy = batchnorm_layer(batchnorm_name, fully_connected_name, deploy=True)\n scale_txt = ''\n \n last_name = batchnorm_name\n \n elif do_last_batchnorm:\n batchnorm_txt_train = batchnorm_layer(batchnorm_name, fully_connected_name, use_global_stats=False, phase='TRAIN', deploy=False)\n batchnorm_txt_test = batchnorm_layer(batchnorm_name, fully_connected_name, use_global_stats=True, phase='TEST', deploy=False)\n \n batchnorm_txt_deploy = batchnorm_layer(batchnorm_name, fully_connected_name, deploy=True)\n scale_name = 'ipbnscaled%i' % (i+1)\n\n scale_txt = scale_layer(scale_name, batchnorm_name, scale=scale,shift=shift)\n \n last_name = scale_name\n else:\n batchnorm_txt_train = ''\n batchnorm_txt_test = ''\n batchnorm_txt_deploy = ''\n last_name = fully_connected_name\n scale_txt = ''\n \n train_txt += fully_connected_txt + relu_txt + batchnorm_txt_train + batchnorm_txt_test + scale_txt\n deploy_txt += fully_connected_txt + relu_txt + batchnorm_txt_deploy + scale_txt\n \n\n\n\n\n # add affine layer on top of funnel layer \n affine_name = 'affine' # (matrix T)\n affine_txt = fully_connected_layer(affine_name, last_name, num_output=T_dimension, weight_std=weight_std_affine)\n\n train_txt += affine_txt\n deploy_txt += affine_txt\n \n # apply lowrank loss to output of 'affine' layer [conv - fully_connected -\n # funnel - affine - lowrank] the lowrank output is located in affine. The\n # 'funnel' layer is used to allow softmax to separate between classes before\n # LRT\n if use_lowrank:\n lowrank_txt = lowrank_layer('lowrank_loss', affine_name, loss_weight=lowrank_weight)\n train_txt += lowrank_txt\n\n if use_softmax:\n # apply softmax loss to output of funnel layer [conv - fully_connected - funnel - softmax]\n # add one affine layer to reduce from num_output_fully_connected to num_classes\n\n # apr 4. trying on top of fully connected layer\n funnel_name = 'funnel'\n funnel_txt = fully_connected_layer(funnel_name, last_name, num_output=num_classes, weight_std=weight_std_fully_connected)\n\n train_txt += funnel_txt\n deploy_txt += funnel_txt\n\n softmax_txt = softmax_layer('softmax_loss', funnel_name, loss_weight=softmax_weight)\n train_txt += softmax_txt\n\n write_to_file(outfname_train, train_txt)\n write_to_file(outfname_deploy, deploy_txt)\n\n \n return train_txt, deploy_txt", "def encoder_activation_func(num_layer):\n ec_funct = []\n for i in range(num_layer):\n ec_funct.append('relu')\n ec_funct.append('softmax')\n\n return ec_funct", "def __call__(self, x, is_training, nfilt=32, reuse=False):\n with tf.variable_scope(self.name):\n x = tf.reshape(x, [-1, self.input_dim, self.input_dim, self.channels])\n\n # attnh1 = unet_conv(x, nfilt*1, 'attnh1', reuse, is_training, use_batch_norm=False)\n # attn1 = unet_conv_t(attnh1, None, 1, 'attn1', reuse, is_training, activation=tf.nn.tanh)\n\n # attnh2 = unet_conv(attnh1, nfilt*2, 'attnh2', reuse, is_training)\n # attn2 = unet_conv_t(attnh2, None, nfilt*2, 'attn2_1', reuse, is_training)\n # attn2 = unet_conv_t(attn2, None, 1, 'attn2_2', reuse, is_training, activation=tf.nn.tanh)\n\n # attnh3 = unet_conv(attnh2, nfilt*4, 'attnh3', reuse, is_training)\n # attn3 = unet_conv_t(attnh3, None, nfilt*4, 'attn3_1', reuse, is_training)\n # attn3 = unet_conv_t(attn3, None, nfilt*2, 'attn3_2', reuse, is_training)\n # attn3 = unet_conv_t(attn3, None, 1, 'attn3_3', reuse, is_training, activation=tf.nn.tanh)\n\n # salience = tf.concat([attn1, attn2, attn3], 3)\n # salience = conv(salience, 1, 'salience', s=1, reuse=reuse)\n # salience = tf.reshape(salience, (-1, self.input_dim*self.input_dim*1))\n # salience = tf.nn.softmax(salience)\n # salience = tf.reshape(salience, (-1, self.input_dim,self.input_dim,1))\n\n h1 = unet_conv(x, nfilt*1, 'h1', reuse, is_training, use_batch_norm=False)\n h2 = unet_conv(h1, nfilt*2, 'h2', reuse, is_training)\n h3 = unet_conv(h2, nfilt*4, 'h3', reuse, is_training)\n out = unet_conv(h3, 1, 'out', reuse, is_training, use_batch_norm=False, activation=None)\n\n return out", "def setup_forward(self, W, input_data, prefix=\"\"):\n \n def loop_body(i, activations, outputcollect):\n \n if self.config['sequence_input']:\n # Cut out the correct input\n if self.config['net_input_add_onehot']:\n inp = tf.slice(input_data, (0,i), (self.config['batch_size'], 1), name=prefix+\"/inputSlice\") # <batch_size, 1>\n inp = tf.squeeze(inp, 1, name=prefix+\"/inputSqueeze\") # <batch_size>\n inp = tf.one_hot(indices=inp, depth=self.config['num_input']) # <batch_size, num_input>\n else:\n inp = tf.slice(input_data, (0,i,0), (self.config['batch_size'], 1, self.config['num_input']), name=prefix+\"/inputSlice\") # <batch_size, 1, num_input>\n inp = tf.squeeze(inp, 1, name=prefix+\"/inputSqueeze\") # <batch_size, num_input>\n else:\n inp = input_data\n inp = self.setup_print(inp, \"input data\")\n \n # Concatenate input, bias, activations\n inp = tf.concat([inp, self.bias, activations], axis=1, name=prefix+\"/stepconcat\") # <batch_size, from>\n inp = tf.expand_dims(inp, 1) # <batch_size, 1, from>\n \n # Fully connected\n # <batch_size, 1, to> <= <batch_size, 1, from> @ <batch_size, from, to>\n activations = tf.matmul(inp, W, name=prefix+\"/stepmatmul\")\n activations = tf.squeeze(activations, 1) # <batch_size, to>\n \n # Leaky ReLU\n # This allows values to blow up\n ## activations = tf.maximum(activations, activations * .3, name=prefix+\"/lrelu\")\n \n # Sigmoid\n activations = tf.sigmoid(activations) # <batch_size, to>\n \n # Store the output if we need outputs from all timesteps\n # Alternative may be: https://stackoverflow.com/questions/39157723/how-to-do-slice-assignment-in-tensorflow/43139565#43139565\n if self.config['sequence_output']:\n output = tf.slice( # -> <batch_size, output>\n activations, \n (0,0), \n (self.config['batch_size'], self.config['num_output']), \n name=prefix+\"/outputslice\"\n )\n output = tf.expand_dims(output, axis=1) # <batch_size, 1, output>\n outputcollect = tf.concat([outputcollect, output], axis=1)\n \n return tf.add(i,1), activations, outputcollect\n \n loop_out = tf.while_loop(\n cond=(lambda\n i, \n activations,\n outputcollect:\n tf.less(i, self.config['timesteps'])\n ),\n body=loop_body,\n loop_vars=[\n self.initial_i,\n self.initial_activations,\n self.initial_output\n ],\n shape_invariants=[\n self.initial_i.get_shape(),\n self.initial_activations.get_shape(),\n tf.TensorShape([self.config['batch_size'], None, self.config['num_output']])\n ],\n back_prop=False,\n # return_same_structure=True,\n name=prefix+\"/loop\"\n )\n \n # Get the output\n if self.config['sequence_output']:\n output = loop_out[2]\n # Set shape otherwise broadcasting messes this up\n output.set_shape((self.config['batch_size'], self.config['timesteps'], self.config['num_output']))\n else:\n activations = loop_out[1] # <batch_size, to>\n output = tf.slice( # -> <batch_size, output>\n activations, \n (0,0), \n (self.config['batch_size'], self.config['num_output']), \n name=prefix+\"/outputslice\"\n )\n\n if self.config['net_add_softmax']:\n # tf.nn.softmax\n output = tf.exp(output) / tf.expand_dims(tf.reduce_sum(tf.exp(output), axis=-1), axis=-1)\n \n return output", "def update_target_network(self, tau):\n for t, e in zip(\n self.target_network.trainable_variables, self.online_network.trainable_variables\n ):\n t.assign(t * (1-tau) + e * tau)" ]
[ "0.65535045", "0.6422185", "0.63456124", "0.6290219", "0.6285251", "0.6273079", "0.6235958", "0.6228888", "0.6139146", "0.6119213", "0.6110673", "0.61030644", "0.6072735", "0.60715985", "0.60660934", "0.6034425", "0.60192865", "0.5931092", "0.5930418", "0.59202427", "0.5898143", "0.58960783", "0.58803874", "0.5854353", "0.58519053", "0.5848351", "0.58136046", "0.58133835", "0.5797672", "0.57908964", "0.5778991", "0.57781786", "0.5776933", "0.5740618", "0.57144755", "0.5707022", "0.5703498", "0.5701401", "0.5693553", "0.5689935", "0.5687562", "0.5685976", "0.56693393", "0.56671137", "0.56632596", "0.5661997", "0.5649752", "0.56496334", "0.5638612", "0.56357074", "0.5628955", "0.56200016", "0.5607412", "0.55866426", "0.5586019", "0.5580473", "0.5559599", "0.5557985", "0.5556203", "0.5541926", "0.5540892", "0.55357826", "0.5526102", "0.55200887", "0.5518248", "0.55180436", "0.55137753", "0.55054086", "0.5501545", "0.5501392", "0.54993784", "0.5498944", "0.5498423", "0.54741174", "0.5469829", "0.54625446", "0.5459451", "0.5456294", "0.5448352", "0.5448252", "0.5440894", "0.5436709", "0.5434627", "0.54341346", "0.5434129", "0.5428092", "0.5426491", "0.5424429", "0.54162824", "0.5415193", "0.5412081", "0.54085404", "0.5406863", "0.5406134", "0.54057384", "0.54020065", "0.53981364", "0.5395661", "0.53941625", "0.53935945" ]
0.719997
0
Helper for running guider start tests.
def _guider_start(self, nCall, nInfo, nWarn, nErr, finish=False, didFail=False): cmdState = self.actorState.gotoField cmdState.reinitialize(self.cmd) result = masterThread.guider_start(self.cmd, cmdState, myGlobals.actorState, 'gotoField') self.assertEqual(result, not didFail) self._check_cmd(nCall, nInfo, nWarn, nErr, finish, didFail=didFail)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def startTestRun(self):", "def __main() :\n launchTests()", "def runTests(self):\n \n pass", "def test_run_started(self):", "def main():\n run_test_all()", "def startTestHook(self):", "def run_main(): # pragma: no cover\n RunTestsCLI.run()", "def startTest(asset):", "def run(context, path=\"\"):\n common.success(f\"Tests {path} running \")\n return start.run_python(\n context,\n f\"-m pytest {path}\"\n )", "def run(self, test, env):\n\n raise NotImplementedError", "def test_script(self) -> None:\n main()", "def run_starter(self, expect_to_fail=False):", "def runtest(self):", "def do_test(self):\n\t\tshutit_global.shutit_global_object.yield_to_draw()\n\t\tif not self.build['dotest']:\n\t\t\tself.log('Tests configured off, not running',level=logging.DEBUG)\n\t\t\treturn\n\t\t# Test in reverse order\n\t\tself.log('PHASE: test', level=logging.DEBUG)\n\t\tself.stop_all()\n\t\tself.start_all()\n\t\tfor module_id in self.module_ids(rev=True):\n\t\t\t# Only test if it's installed.\n\t\t\tif self.is_installed(self.shutit_map[module_id]):\n\t\t\t\tself.log('RUNNING TEST ON: ' + module_id, level=logging.DEBUG)\n\t\t\t\tself.login(prompt_prefix=module_id,command=shutit_global.shutit_global_object.bash_startup_command,echo=False)\n\t\t\t\tif not self.shutit_map[module_id].test(self):\n\t\t\t\t\tself.fail(module_id + ' failed on test', shutit_pexpect_child=self.get_shutit_pexpect_session_from_id('target_child').pexpect_child) # pragma: no cover\n\t\t\t\tself.logout(echo=False)", "def run_test_suite():\n local('. fabric_factory/ve/bin/activate; fabric_factory/src/project/manage.py test')", "def test_start(self):\n self.fail(\"write a test\")", "def run_tests(self):\n raise NotImplementedError", "def test(self):\n for arch, python in self.python:\n self.run(f\"{python} -m pytest\")", "def runTest(self):\n self.testsActivated = True\n self.session = subprocess.Popen([\"sudo\", \"python3\", TEST_RUNNER_PATH], preexec_fn=os.setsid)", "def start_test_exec(cls):\n time_str = cls.get_current_time()\n os.system(\"robot -l ./logs/log_{0}.html -r ./logs/report_{0}.html -o ./logs/output_{0}.xml \\\n ./test_suite/{1}\".format(time_str, test_suite))", "def run_tests(self, test_labels):\n import pytest\n\n argv = []\n if self.verbosity == 0:\n argv.append('--quiet')\n if self.verbosity == 2:\n argv.append('--verbose')\n if self.verbosity == 3:\n argv.append('-vv')\n if self.failfast:\n argv.append('--exitfirst')\n if self.keepdb:\n argv.append('--reuse-db')\n\n argv.extend(test_labels)\n return pytest.main(argv)", "def _run_ci_test():\n _run_install(False)\n _run_coverage_html(False)\n _run_typecheck_xml(False)\n _run_lint(True)", "def tests():\n api.local('nosetests')", "def main():\n tng.api.runner()", "def RunTest(self):\n self.TestLs()\n self.TestTerminate()\n self.TestMultipleProcesses()", "def runner(app):\n return app.test_cli_runner()", "def runner(app):\n return app.test_cli_runner()", "def runner(app):\n return app.test_cli_runner()", "def runner(app):\n\n return app.test_cli_runner()", "def testInit(self):\n self.globalInit()\n self.test.start()", "def _run_local_tests(self, *args, **kwargs):\n pass", "def run_test(self):\n raise NotImplementedError", "def test_example_runs(self):\n run_example(\n verbose=False,\n testapp=self.testapp,\n )", "def _Main():\n\n options, args = run_tests_util.ParseArgs('gtest')\n test_runner = run_tests_util.TestRunner(\n script_dir=SCRIPT_DIR,\n build_dir_var_name='GMOCK_BUILD_DIR',\n injected_build_dir_finder=GetGmockBuildDir)\n tests = test_runner.GetTestsToRun(args,\n options.configurations,\n options.built_configurations)\n if not tests:\n sys.exit(1) # Incorrect parameters given, abort execution.\n\n sys.exit(test_runner.RunTests(tests[0], tests[1]))", "def start_test_instance(test_name=None):\n env.warn_only = True\n if test_name is not None:\n instances = [test_name]\n else:\n output = run('ls -1 %s' % env.site_root)\n instances = [x.strip() for x in output.split(\"\\n\")]\n for item in instances:\n sudo(\"start %s\" % item.strip())", "def tests():", "def task_test(argv):\n run_tests(\"python2\", argv)\n run_tests(\"python3\", argv)", "def start_fixture(self):\n pass", "def startTest(testname, host):\r\n host, UNDI = getBoxInfo()\r\n runID = uuid.uuid5(uuid.NAMESPACE_DNS, host)\r\n lg.info(\"Test, %s, run started on %s/%s with runID %s\"%(testname, host, UNDI, runID.hex))\r\n def abort():\r\n lg.info(\"Aborting test run %s for test %s on %s/%s\"%(runID.hex, testname, host, UNDI))\r\n return\r\n def end():\r\n lg.info(\"Ending test run %s for test %s on %s/%s\"%(runID.hex, testname, host, UNDI))\r\n return\r\n return end, abort", "def setUp(self):\n self.p = Process(target = start_server)\n self.p.start()\n time.sleep(0.5)\n #init_fakeDB()\n time.sleep(0.5)", "def setUp(self):\n self.p = Process(target = start_server)\n self.p.start()\n time.sleep(0.5)\n #init_fakeDB()\n time.sleep(0.5)", "def setUp(self):\n self.p = Process(target = start_server)\n self.p.start()\n time.sleep(0.5)\n #init_fakeDB()\n time.sleep(0.5)", "def startTestRun(self, test):\n self.runTime= time.time()\n self.logger.debug(\"\\nBeginning ForceBalance test suite at %s\\n\" % time.strftime('%x %X %Z'))", "def setUp(self):\n self.p = Process(target = start_server)\n self.p.start()\n time.sleep(0.5)", "def pytest_started_handling_group(session, worker):", "def run(self):\n if self.all:\n cmd = self.apply_options(self.test_all_cmd)\n self.call_and_exit(cmd)\n else:\n cmds = (self.apply_options(self.unit_test_cmd, (\"coverage\",)),)\n if self.coverage:\n cmds += (self.apply_options(self.coverage_cmd),)\n self.call_in_sequence(cmds)", "def setUp(self):\r\n print('---------------------------------------------\\n')\r\n print('STARTING TEST...')", "def runTest(self):\n self.setUp()\n self.test_JupyterNotebooks1()", "def main() -> bool:\n global logger\n logger = setup_logger(\"nitpycker\")\n plugin_manager = Manager()\n plugin_manager.load_plugins()\n args = parse_args(plugin_manager)\n if plugin_manager.enable_plugins(args.plugins, args):\n exit(2)\n\n plugin_manager.pre_test_discovery()\n tests = unittest.defaultTestLoader.discover(args.start_directory, pattern=args.pattern)\n plugin_manager.post_test_discovery()\n tests = plugin_manager.filter_tests(tests)\n report = ParallelRunner(plugin_manager, process_number=args.process_number, verbosity=args.verbosity).run(tests)\n return not report.wasSuccessful()", "def setUp(self):\n super().setUp()\n self.runner = CliRunner()", "def setup(self, args={}):\n\n return Status.RUN", "def test():\n test_app()\n test_pagebrowser()", "def run_test(test_name):\n\n print 'Running %s_test...' % test_name\n os.system('./test_%s.py' % test_name)\n print", "def main_tester():\n create_tester_paths()\n _logger.info(' -- tester init done setting up paths and db file.')", "def test_begin(self):", "def setUp(self):\n pyauto.PyUITest.setUp(self)\n\n webapp = self.InstallExtension(self.GetWebappPath())\n self.host.LaunchApp(webapp)\n self.account = self.GetPrivateInfo()['test_chromoting_account']", "def setUpModule(): # noqa\n base.enabledPlugins.append('jobs')\n base.enabledPlugins.append('romanesco')\n base.enabledPlugins.append('gravatar')\n base.enabledPlugins.append('minerva')\n base.startServer()", "def startTestRun(self, event):\n self.prof = cProfile.Profile()\n event.executeTests = self.prof.runcall", "def test_base(self):\n self.render_config_template(\n )\n\n proc = self.start_beat()\n self.wait_until(lambda: self.log_contains(\"mockbeat start running.\"))\n proc.check_kill_and_wait()\n assert self.log_contains(\"mockbeat stopped.\")", "def setUp(self):\n MainTests.setUp(self)", "def main():\n tester = Tester()\n # parse args, load configuration and create all required objects.\n tester.setup_experiment()\n # GO!\n tester.run_experiment()", "def pytest_configure() -> None: # pragma: no cover\n print(\"Starting server app\")\n PROC.start()\n time.sleep(1)\n if PROC.exitcode is not None:\n pytest.exit(\"Failed to start the server, exit code {}\\nLogs are in logs/server.log\".format(PROC.exitcode))\n return\n\n create_generated_client()", "def test(base_url='http://localhost:8000/'):\n with env.cd(settings.PROJECT_PATH):\n # env.run('python rnacentral/apiv1/tests.py --base_url=%s' % base_url)\n env.run('python rnacentral/portal/tests/selenium_tests.py --base_url %s --driver=phantomjs' % base_url) # pylint: disable=C0301\n env.run('python rnacentral/apiv1/search/sequence/tests.py --base_url %s' % base_url) # pylint: disable=C0301", "def setUpModule():\n base.enabledPlugins.append('jobs')\n base.enabledPlugins.append('romanesco')\n base.enabledPlugins.append('gravatar')\n base.enabledPlugins.append('minerva')\n base.startServer(False)", "def start_test_run(self, request):\n request.worker.initialize_test_run(request.message.tests,\n request.message.run_data)\n\n return SuccessReply()", "def main():\n\n parser = argparse.ArgumentParser(prog=\"run_test.py\",\n formatter_class=argparse.RawTextHelpFormatter)\n parser.add_argument('id', help=\"Id of a test\")\n args = parser.parse_args()\n\n configure_logger()\n\n test_info = TESTS.get(args.id, None)\n if not test_info:\n test_info.log.error(f'{args.id} does not exist')\n exit(ErrorCode.CRITICAL)\n os.environ['DISPLAY'] = \":0.0\"\n\n test = Test(args.id, test_info)\n result = test.run()\n\n test.log.info('#' * 80)\n if not result:\n test.log.error('TEST FAILED')\n else:\n test.log.info('TEST PASSED')\n test.log.info('#' * 80)\n exit(not result)", "def test():\n nose.run()", "def runner_setup():\n runner = ClassicRunner()\n yield runner", "def test_single_test_case():\n pass", "def run_tests(virtual_env):\n signal.signal(signal.SIGINT, signal.SIG_IGN)\n args = [\n 'python',\n 'setup.py',\n 'nosetests',\n '--with-coverage',\n '--with-xunit',\n ]\n subprocess.call(args, cwd=abspath(join(HOLLAND_ROOT, 'holland-core')), env=virtual_env)\n subprocess.call(['coverage', 'xml'], cwd=abspath(join(HOLLAND_ROOT, 'holland-core')), env=virtual_env)\n for plugin_dir in open(join(HOLLAND_ROOT, 'plugins', 'ACTIVE')):\n plugin_dir = plugin_dir.rstrip()\n plugin_path = join(HOLLAND_ROOT, 'plugins', plugin_dir)\n subprocess.call(args, cwd=plugin_path, env=virtual_env)\n subprocess.call(['coverage', 'xml'], cwd=plugin_path, env=virtual_env)\n for addon_dir in open(join(HOLLAND_ROOT, 'addons', 'ACTIVE')):\n addon_dir = addon_dir.rstrip()\n addon_path = join(HOLLAND_ROOT, 'addons', addon_dir)\n subprocess.call(args, cwd=addon_path, env=virtual_env)\n subprocess.call(['coverage', 'xml'], cwd=plugin_path, env=virtual_env)\n #return subprocess.call(args, env=virtual_env)", "def runTest(self):\n self.setUp()\n self.test_ProstateReporting1()", "def _run_ci_integration_test():\n _run_install(False)\n _run_integration_tests_on_github(False)", "def runTest(self):\n self.setUp()\n self.test_ExtendSpine1()", "def test_functionality(self):\n self.browserObject = globalVars.browserObject\n\n self.preRunSetup()\n \n self.runTestCase()\n \n self.postRunCleanup()", "def before_productline_steps():\n cmd = ['phantomjs', '--webdriver', '4444']\n click.echo(\"Running command\" + subprocess.list2cmdline(cmd))\n process = subprocess.Popen(cmd)\n RUNNING_TEST_PROCESSES.append(process)", "def run_tests(self):\n\n self.manifest_path = os.path.join('tests',\n 'functional',\n 'manifest.ini')\n TestRun.run_tests(self)", "def main():\n test_runner = TestRunner(\n FLAGS.workspace, FLAGS.bench_home, imagenet_dir=FLAGS.train_data_dir)\n test_runner.run_tests(FLAGS.test_list.split(','))", "def pytest(context):\n exec_cmd = \"pytest\"\n run_cmd(context, exec_cmd)", "def task_test():\n return {\n 'actions': ['py.test tests/'],\n }", "def spec_tests():\n pass", "def test_app():\n pass", "def test():\n for cmd in [\n \"pytest --verbose --cov pike/ --cov-report term --cov-report html tests/\",\n ]:\n _run_in_venv(shlex.split(cmd))\n for linter in [[\"black\", \"--check\"], [\"flake8\"], [\"isort\", \"--check\"]]:\n _run_in_venv(linter + TEST_FILES)\n\n _run_in_venv(\n [\"mypy\", \"pike/\", \"tests/\", \"setup.py\", \"pikefile.py\", \"--show-error-codes\"]\n )\n _run_in_venv([\"mypy\", \"examples/\"])\n _run_in_venv([\"bandit\", \"-r\", \"pike/\"])", "def setUp(self):\r\n self.app = app.test_client()\r\n self.app.testing = True", "def setUp(self) -> None:\n self.app = app.app.test_client()\n self.app.testing = True", "def test_pre_cli_init(run):\n out, err = run(dork.cli.the_predork_cli, [], *(\"\", \"-i\", \"test\"))\n assert \"test\" in out, \\\n \"Failed run the dork.cli.the_predork_cli method: {err}\"\\\n .format(err=err)\n out, err = run(dork.cli.the_predork_cli, [], *(\"\", \"-i\", \":test\"))\n assert \"does not exist\" in out, \\\n \"Failed run the dork.cli.the_predork_cli method: {err}\"\\\n .format(err=err)", "def main():\n grid_tester_cpu = GridTesterCPU()\n\n # parse args, load configuration and create all required objects.\n grid_tester_cpu.setup_grid_experiment()\n\n # GO!\n grid_tester_cpu.run_grid_experiment()", "def startTestRun(self):\n self.startTime = time.time()\n # Really verbose information\n if self.verbose > 2:\n self.stream.writeln(self.colors.bold(pretty_version() + \"\\n\"))", "def test_functionality(self):\n \n self.browserObject = globalVars.browserObject\n\n self.preRunSetup()\n \n self.runTestCase()\n \n self.postRunCleanup()\n \n self.logout()", "def run_tests():\n test_command = \"pytest -s \" + os.path.join(root_path, \"cases\", \"test_cases.py::TestCases::test_cases\") + \" --html=\" + os.path.join(root_path, \"reports\", \"qa_testing_report.html\")\n\n subprocess.run(test_command, shell=True)", "def test_start_test(self):\n self.protocol.startTest(self.test)\n self.assertEqual(self.io.getvalue(), compat._b(\n \"test: %s\\n\" % self.test.id()))", "def main():\n driver = Driver()\n driver.start()", "def test_testutils():\n build()\n sh(\"%s psutil\\\\tests\\\\test_testutils.py\" % PYTHON)", "def test():\n with lcd(BASEDIR):\n local('virtenv/bin/coverage run runtests.py -v2')\n local('virtenv/bin/coverage report -m')", "def run_tests(self, test_labels, extra_tests=None, **kwargs):\n argv = []\n if self.verbosity == 0:\n argv.append('--quiet')\n elif self.verbosity == 2:\n argv.append('--verbose')\n elif self.verbosity == 3:\n argv.append('-vv')\n if self.failfast:\n argv.append('--exitfirst')\n if self.keepdb:\n argv.append('--reuse-db')\n if self.junit_xml:\n argv.append(f'--junit-xml={self.junit_xml}')\n\n argv.extend(test_labels)\n return pytest.main(argv)", "def before_run_tests(cls):\n pass", "def runTest(self):\r\n self.setUp()\r\n self.test_sceneImport24281()", "def main():\n try:\n unittest.main(testLoader=BetterLoader(), defaultTest='suite')\n except Exception:\n import sys\n import traceback\n traceback.print_exc()\n sys.exit(1)", "def main():\n try:\n args = get_args()\n validate_args(args)\n applog = AppLogger(name=\"DENT\")\n loop = asyncio.get_event_loop()\n pytest.testbed = TestBed(applog, loop, args)\n loop.run_until_complete(setup(args, applog))\n loop = asyncio.get_event_loop()\n loop.run_until_complete(\n testbed_update_login_banner(pytest.testbed.devices, args, applog, add=True)\n )\n run_tests(args, applog)\n loop = asyncio.get_event_loop()\n loop.run_until_complete(\n testbed_update_login_banner(pytest.testbed.devices, args, applog, add=False)\n )\n except argparse.ArgumentTypeError as e:\n print(\"Invalid arguments. Err: %s\" % str(e))\n except Exception as e:\n print(\"Error occured in testbed. Err: %s\" % str(e))\n traceback.print_exc()\n finally:\n if pytest and hasattr(pytest, \"testbed\") and pytest.testbed:\n loop = asyncio.get_event_loop()\n loop.run_until_complete(pytest.testbed.cleanup())", "def test_launch(self):\n\n\n username,userpass = self.testdata.find_account_for('toolsubmitter')\n\n self.utils.account.login_as(username,userpass)\n\n self.contribtool.launch(TOOLNAME,username,userpass)", "def run_quick_test(self, context: ResourceCommandContext, test: str) -> None:\n self.handler.run_quick_test(context, test)", "def start_test(self, request):\n request.worker.start_test(request.message.test_id)\n\n return SuccessReply()" ]
[ "0.78004026", "0.7614107", "0.7351775", "0.7290789", "0.7199481", "0.71878326", "0.70207834", "0.6993264", "0.6858419", "0.6842322", "0.6831787", "0.6791457", "0.6771266", "0.6755483", "0.670912", "0.6685418", "0.66745853", "0.66695005", "0.65984964", "0.6530515", "0.65222645", "0.6512471", "0.6480288", "0.6448864", "0.6437596", "0.6421368", "0.6421368", "0.6421368", "0.64142823", "0.64014864", "0.639646", "0.6395921", "0.6384487", "0.6382991", "0.6371017", "0.6369478", "0.6363393", "0.6349725", "0.6344923", "0.6336869", "0.6336869", "0.6336869", "0.6326886", "0.63077444", "0.6284161", "0.62760496", "0.62750614", "0.62690103", "0.6247331", "0.62360567", "0.62344664", "0.6218695", "0.6210875", "0.6195932", "0.61856675", "0.61787844", "0.6163861", "0.61591655", "0.6149759", "0.6138469", "0.613752", "0.6133711", "0.6132466", "0.6130715", "0.61293757", "0.61184746", "0.61173415", "0.6116993", "0.61139923", "0.6112615", "0.61089337", "0.60994285", "0.6096984", "0.6087", "0.6078367", "0.6075544", "0.6073287", "0.6068913", "0.6067078", "0.60666955", "0.60645926", "0.60634977", "0.6056684", "0.60461515", "0.60432535", "0.6038045", "0.6037228", "0.60339904", "0.60308504", "0.60229987", "0.60210526", "0.6020958", "0.60185605", "0.60177314", "0.60164005", "0.6011662", "0.60067314", "0.60034657", "0.5997635", "0.59934896", "0.5990314" ]
0.0
-1
ffs open, 3x axis clear, guider on
def test_guider_start_ffsClosed(self): self._guider_start(6, 20, 0, 0)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def clear_crossfilter1(self):\n print ('Trigger clear')\n self.query_dict = {}\n self.plot_data = None\n self.create_figure_new()\n layout_doc.children[4].children[0] = self.p", "def clear_crossfilter2(self):\n print ('Trigger clear')\n self.query_dict = {}\n self.plot_data = None\n self.create_figure_new()\n layout_doc.children[4].children[1] = self.p", "def clear(self):\n row, col = self.selected\n if self.cubes[row][col].value == 0:\n self.cubes[row][col].set_temp(0)", "def abrir(self):\n self.x0 = [val for val in self.x0]\n self.x = [val for val in self.x]\n self.tipos = [val for val in self.tipos]\n self.mask_fr = [val for val in self.mask_fr]\n self.mask_in = [val for val in self.mask_in]\n self.open = True", "def clean_graph(self):\n #self.time = 0#\n \n # values of microcontroller\n #if self.graf_t.buffer_info()[1] != 0:\n for a in range(self.graf_t.buffer_info()[1]):\n self.graf_t.pop()\n \n for a in range(self.graf_r.buffer_info()[1]):\n self.graf_r.pop()\n\n for a in range(self.graf_x0.buffer_info()[1]):\n self.graf_x0.pop()\n\n for a in range(self.graf_x1.buffer_info()[1]):\n self.graf_x1.pop()\n\n for a in range(self.graf_u.buffer_info()[1]):\n self.graf_u.pop()\n \n self.referenceLine.set_data(self.graf_t, self.graf_r)\n self.x0Line.set_data(self.graf_t, self.graf_x0)\n self.x1Line.set_data(self.graf_t, self.graf_x1)\n self.uLine.set_data(self.graf_t, self.graf_u)\n \n try:\n #Draw the lines\n if self.checkBox_R.isChecked():\n self.mplWidget.canvas.ax.draw_artist(self.referenceLine)\n if self.checkBox_x0.isChecked():\n self.mplWidget.canvas.ax.draw_artist(self.x0Line)\n if self.checkBox_U.isChecked():\n self.mplWidget.canvas.ax.draw_artist(self.uLine)\n if self.checkBox_x1.isChecked():\n self.mplWidget.canvas.ax.draw_artist(self.x1Line)\n except AssertionError:\n pass\n try:\n self.mplWidget.canvas.blit(self.mplWidget.canvas.ax.bbox)\n except AttributeError:\n pass\n \n # force an image redraw\n self.mplWidget.canvas.draw()", "def clear(self):\n self.np.fill(OFF)\n self.np.show()\n return True", "def clear(self):\n if self.flag == 0:\n for coord in INDICES:\n self.kill(coord)\n self.chart[coord] = DEAD", "def toggle_zero_grid(self, x):\r\n self.konfig.zero.set_grid(x)\r\n self.zeroGraf.toggle_grid(x)", "def front_wheel_from_axis():", "def cerrar(self):\n self.x0 = np.array(self.x0, dtype=float)\n self.x = np.array(self.x, dtype=float)\n self.tipos = np.array(self.tipos, dtype=int)\n self.mask_fr = self.tipos == 1\n self.mask_in = self.tipos == 2\n self.num_fr = np.sum(self.mask_fr)\n self.num_in = np.sum(self.mask_in)\n self.open = False", "def clear_graphs(self):\n for ax in (self.master_plot, self.time_velocity, self.time_power, self.power_velocity):\n ax.cla()", "def off(self,ax):\n # remove cell lines if thery are on the plot\n # (if new axes are created the cell lines will be not there)\n for line in self.cell_lines:\n try:\n ax.lines.remove(line)\n except ValueError:\n pass\n # set lines and coordinates to empty lists \n self.cell_lines = []\n self.xx_cells = []", "def onSetToCustDims(self, evt):\n\t\tself.halfResampleZ.Enable(0)\n\t\tself.fourthResampleZ.Enable(0)\n\t\t\n\t\tfor obj in [self.factorLabel, self.dimLabel, self.newDimX, self.newDimY, self.newDimZ, self.factorX, self.factorY, self.factorZ]:\n\t\t\tobj.Enable(1)\n\t\ttry:\n\t\t\trx = int(self.newDimX.GetValue())\n\t\t\try = int(self.newDimY.GetValue())\n\t\t\trz = int(self.newDimZ.GetValue())\n\t\t\tself.currSize = (rx, ry, rz)\n\t\texcept:\n\t\t\tpass", "def correct(self):\n self.parent.copyCurrentWinState(self.pltw)\n self.pltw.blklst[self.blkno][self.ypos] = self.data[1] - self.data[2]\n self.pltw.updatePlot()\n self.pltw.dirty = True\n self.pltw.activecurv = self.cpos\n self.parent.updateUI()\n self.hide()", "def clear_axes_selection(self):\n self.x_axis = ''\n self.y_axis = ''\n self.non_numeric_x_axis = False\n self.count_desired = False\n self.header_choices('x')", "def plot_clear():\n plt.cla()", "def cb_reset(event):\n axDirichlet.cla()\n # Reset Sliders\n sAlpha0.reset() # resetが駄目!一番最初に戻ってしまう\n sAlpha1.reset()\n sAlpha2.reset()\n alpha_update = [sAlpha0.val, sAlpha1.val, sAlpha2.val]\n print('alpha_update=', alpha_update)\n\n # ML\n lambda_ML = CatML.MLinfer(x_cat)\n\n axML.cla()\n drawBarGraph( axML, \"ML\", lambda_ML, bar_y_max, col_ML ) # Draw Bar graph\n\n\n # MAP\n dirichlet.set_param(alpha_update)\n lambda_MAP = CatMAP.MAPinfer(x_cat, dirichlet)\n\n axMAP.cla()\n drawBarGraph( axMAP, \"MAP\", lambda_MAP, bar_y_max, col_MAP ) # Draw Bar Graph\n\n # Bayes\n posteriorDirichlet.set_param(alpha_update)\n posteriorDirichlet.calcPosterior(x_cat)\n lambda_Bayes = np.zeros(3)\n for k in range(3):\n lambda_Bayes[k] = posteriorDirichlet.BayesInfer(k)\n\n axBayes.cla()\n drawBarGraph( axBayes, \"Bayes\", lambda_Bayes, bar_y_max, col_Bayes ) # Draw Bar Graph\n\n draw_pdf_contours(axDirichlet, dirichlet, True) # Draw Dirichlet\n\n print('Reset')\n print('lambda_ML =', lambda_ML)\n print('lambda_MAP =', lambda_MAP)\n print('lambda_Bayes=', lambda_Bayes)\n draw_point(axDirichlet, lambda_ML, col_ML)\n draw_point(axDirichlet, lambda_MAP, col_MAP)\n draw_point(axDirichlet, lambda_Bayes, col_Bayes)\n draw_point(axLikelihood, lambda_ML, col_ML)\n draw_point(axPosteriorDirichlet, lambda_MAP, col_MAP)\n draw_point(axPosteriorDirichlet, lambda_Bayes, col_Bayes)\n\n fig.canvas.draw_idle()", "def all_off(self):\n self.fill_off()\n self.update()\n self.fill_off()\n self.update()", "def clear(self):\r\n\t\tself.grid.fill(False)", "def toggle_axis(self):\n # cycle through three axis states\n states = [False, 'world', 'all']\n # the state after toggling\n index = (states.index(self.view['axis']) + 1) % len(states)\n # update state to next index\n self.view['axis'] = states[index]\n # perform gl actions\n self.update_flags()", "def __reset_crosshair(self):\n self.lhor.set_ydata(self.y_coord)\n self.lver.set_xdata(self.x_coord)", "def cells_off(self):\n self.plotter.cells_off(self.ax)\n self.fig.canvas.draw()", "def switch_frequency_plot_channel_three(self):\n if self.plot_channel_key_booleans[2]:\n self.plot_channel_key_booleans[2] = False\n self.parent_widget.graph_channel_three_button.setStyleSheet(\n \"background-color:rgb(%d,%d,%d)\" % (255, 255, 255))\n\n else:\n self.plot_channel_key_booleans[2] = True\n self.parent_widget.graph_channel_three_button.setStyleSheet(\n \"background-color:rgb(%d,%d,%d)\" % (LINE_COLORS[2]))", "def View_Preorder( self ):\r\n cb.order = 0\r\n self.system.Draw( )", "def float_to_front(qtile):\n for window in qtile.current_group.windows:\n if window.floating:\n window.cmd_bring_to_front()", "def clickClearReferences(self, event):\n self.whiteReference = None\n self.lightBtn.color = '0.85'\n self.darkReference = None\n self.darkBtn.color = '0.85'\n plt.pause(0.3)\n self.axes.autoscale_view()", "def cygx3IndFlux(self):\n # --------------------------------------------------------------------------------------------- #\n # Read data\n fitsNnam = os.path.join(self.workpath, 'LCresults.fits')\n lcTab = Table.read(fitsNnam)\n detect = lcTab['ts'] >= self.tsmin\n lcTab = lcTab[detect] \n\n ind08 = (lcTab['mjd'] > 54700) & (lcTab['mjd'] < 54900) \n flux08 = lcTab['flux'][ind08]\n fluxerr08 = lcTab['fluxerr'][ind08]\n index08 = lcTab['index'][ind08]\n indexerr08 = lcTab['indexerr'][ind08]\n\n ind09 = (lcTab['mjd'] > 54900) & (lcTab['mjd'] < 55100) \n flux09 = lcTab['flux'][ind09]\n fluxerr09 = lcTab['fluxerr'][ind09]\n index09 = lcTab['index'][ind09]\n indexerr09 = lcTab['indexerr'][ind09]\n\n scale = 10**int(np.floor(np.log10( np.mean( np.concatenate( (flux08, flux09), axis=0) ) ))) \n\n # --------------------------------------------------------------------------------------------- #\n # Plot\n indplt = FermiPlot(savepath='', xsize=8.5, ysize=6)\n indplt.figname = os.path.join(self.workpath, 'IndvsFlux.pdf')\n indplt.xlabel = r'Flux ($10^{%d}$ ph\\,cm$^{-2}$\\,s$^{-1}$)'%(int(np.log10(scale)))\n indplt.ylabel = r'Index'\n indplt.mksize = 2\n indplt.color = self.lblue\n indplt.label = r'2008'\n indplt.plot(x=flux08/scale, xerr=fluxerr08/scale, y=index08, yerr=indexerr08)\n indplt.color = self.loran\n indplt.label = r'2009'\n indplt.plot(x=flux09/scale, xerr=fluxerr09/scale, y=index09, yerr=indexerr09)\n indplt.save()\n\n print(\"\\t=== Figure '{}' created ===\".format(indplt.figname)) \n return", "def plot3d(self):\n plot_rupture_wire3d(self)", "def cla(self):\n self.disable_mouse_rotation()\n Axes.cla(self)\n self.grid(rcParams['axes3d.grid'])", "def reset(self):\n try:\n self.ax.cla()\n except Exception as e:\n print 'Exception BasePlot:', e\n raise e\n \n self._plotbuffer = { pat: [0 for _ in range(self._plotlength)] for pat in self._patterns }\n self._timestampbuffer = { pat: [0 for _ in range(self._plotlength)] for pat in self._patterns }\n self.ax.set_axis_bgcolor('black')\n self.ax.set_xticks([])\n self.ax.set_yticks([])", "def reset(self):\n self.u0.fill(0.)\n self.u1.fill(0.)\n self.u2.fill(0.)\n self.time = 0.", "def update(self, i):\n\n self.current_position = self.mediaPlayer.position()\n \t\n \n\n \"\"\"\n \"Record mode\" and \"wide x-axis mode\" shouls not work together. Wide mode is only for reading data, not writing data. \n The user is not allowed to write data when 16 000 points are displayed (wide mode) on tha diagram. If he does so, the frequency of the graph points decreases with time. \n \"\"\"\n \n if self.checkbox.isChecked():\n self.wideRadio.setEnabled(False)\n if not self.checkbox.isChecked():\n self.wideRadio.setEnabled(True)\n if self.wideRadio.isChecked():\n self.checkbox.setEnabled(False)\n if not self.wideRadio.isChecked():\n self.checkbox.setEnabled(True)\n \n\n\n if self.checkbox.isChecked() and self.mediaPlayer.state() == QMediaPlayer.PlayingState:\n \n self.savedRecently = False\n\n\n self.current_position = self.mediaPlayer.position()\n\n \n if self.xValues == []:\n # \"If the list of xValues is empty\". This happens only in the start of the plotting process.\n self.xValues.append(self.current_position)\n self.yValues.append(self.mouseY)\n self.colors.append(self.currentColor)\n\n #self.position_index = self.xValues.index(self.current_position)\n \n\n if self.xValues != []:\n\n if self.current_position > max(self.xValues):\n # \"If the point is bigger than the last point\". I.e if the point will be plotted in the end of the current graph.\n\n self.xValues.append(self.current_position)\n self.yValues.append(self.mouseY)\n self.colors.append(self.currentColor)\n\n self.position_index = self.xValues.index(self.current_position)\n\n if self.current_position < max(self.xValues):\n # \"If the point is smaller than the last point\". I.e if the point will be plotted in the middle of the current graph.\n\n \n if self.mediaPlayer.position() < 100:\n # The program has a problem of removing a point if x=0. This if-statement solves the problem.\n self.xValues.pop(0)\n self.yValues.pop(0)\n self.colors.pop(0)\n \n\n\n # Clearing all the points that are 100 ms (or less) in front of the current position. \n for number in range(self.current_position, self.current_position + 100):\n if number in self.xValues:\n self.yValues.pop(self.xValues.index(number))\n self.colors.pop(self.xValues.index(number))\n self.xValues.remove(number)\n \n \n \n # Plot new points\n bisect.insort(self.xValues,self.current_position) # Through this method, the element is inserted in order.\n self.yValues.insert(self.xValues.index(self.current_position), self.mouseY)\n self.colors.insert(self.xValues.index(self.current_position), self.currentColor)\n\n self.position_index = self.xValues.index(self.current_position)\n \n\n\n # View modes: zoom or wide.\n\n if self.zoomRadio.isChecked():\n self.canvas.axes.set_ylim(0, 100)\n self.canvas.axes.set_xlim(self.current_position-5000, self.current_position+5000)\n\n self.update_tempLists()\n\n self.curve = self.canvas.axes.scatter(self.tempXList, self.tempYList, s=10 , c=self.tempCList)\n\n\n\n if self.wideRadio.isChecked():\n self.canvas.axes.set_ylim(0, 100)\n\n if self.mediaPlayer.duration() != 0:\n self.canvas.axes.set_xlim(0, self.mediaPlayer.duration())\n elif self.xValues != []:\n self.canvas.axes.set_xlim(0, max(self.xValues))\n\n self.curve = self.canvas.axes.scatter(self.xValues, self.yValues, s=10 , c=self.colors)\n\n \n\n # I remove the previous vertical and horizontal lines. If I do not remove them, the program gets slower and slower, and the frequency of the points decreases with time.\n self.hline.remove()\n self.vline.remove()\n \n # New vertical and horizontal lines are created and updated to the correct values.\n self.vline = self.canvas.axes.axvline(x=self.mediaPlayer.position(), color='gray',linestyle=\":\")\n self.hline = self.canvas.axes.axhline(y=self.mouseY, color='gray',linestyle=\":\")\n\n\n\n return [self.curve] + [self.vline] + [self.hline]", "def emit_reset(self):\n for name in self.layout.axes:\n params = self.layout.axes_options.get(name, DEFAULT_AXIS_OPTIONS)\n self.write_event(ecodes.EV_ABS, name, int(sum(params[1:3]) / 2))\n\n for name in self.layout.buttons:\n self.write_event(ecodes.EV_KEY, name, False)\n\n for name in self.layout.hats:\n self.write_event(ecodes.EV_ABS, name, 0)\n\n self.device.syn()", "def f_fixed(self):\n self.fx_free = self.fy_free = self.fz_free = False\n return self", "def resetFrame(self):\n with self.lock:\n hbin = self.hbin\n vbin = self.vbin\n if hbin != 1:\n self.setHBin(1)\n if vbin != 1:\n self.setVBin(1)\n with self.lock:\n self.expArea = self.defaultExpArea\n x1, y1, x2, y2 = self.expArea\n self.xsize = x2 - x1\n self.ysize = y2 - y1", "def drawAxes(t):\r\n t.speed(0)\r\n t.pd()\r\n t.forward(500)\r\n t.back(500)", "def reset(self):\n # Don't reset axis labels\n self.range = ((0, 2, 1),) * self.ndim\n self.current_step = (0,) * self.ndim\n self.order = tuple(range(self.ndim))", "def clear(self):\n for i in range(len(self.canvas)):\n self.canvas[i] = 0", "def cells_off(self,ax):\n self.cells.off(ax)", "def disable_cl1(self):\n self.write_versa5(0x10,0xc4) ## Enable xtal and clock\n self.write_versa5(0x21,0x81) ## Use and enable divider\n self.write_versa5(0x13,0x00) ## Use CL1 input instead of xtal\n self.write_versa5(0x10,0x80) ## Enable xtal input only\n self.write_versa5(0x17,0x04) ## Change top multiplier to 0x44\n self.write_versa5(0x18,0x40)", "def reset_axis_counters(self):\n\n self.column_counter = 0\n self.row_counter = 0", "def fix(hobj):\n\n h.execute('create axon[2]', hobj)\n\n for sec in hobj.axon:\n sec.L = 30\n sec.diam = 1\n hobj.axonal.append(sec=sec)\n hobj.all.append(sec=sec) # need to remove this comment\n\n hobj.axon[0].connect(hobj.soma[0], 0.5, 0)\n hobj.axon[1].connect(hobj.axon[0], 1, 0)\n\n h.define_shape()", "def _visibleChannels_changed(self):\n for i in range(0,8):\n if i in self.visibleChannels:\n self.masterContainer.plots[\"channel\"+str(i)][0].visible=True\n else:\n print i\n self.masterContainer.plots[\"channel\"+str(i)][0].visible=False", "def plot(self,ax):\n if self.show_cells_flag:\n self.cells_on(ax)\n else:\n self.cells.off(ax)", "def clear(self):\n self._fig = go.Figure()", "def _clear(self, event):\n if self.ignore(event) or self._changed_canvas():\n return\n self._background = self.canvas.copy_from_bbox(self.ax.bbox)\n self.ax.draw_artist(self._checks)\n if hasattr(self, '_lines'):\n for l1, l2 in self._lines:\n self.ax.draw_artist(l1)\n self.ax.draw_artist(l2)", "def clear(self) -> None:\n for y in range(self.width):\n for x in range(self.height):\n self.set_value(Point(y, x), FieldState.EMPTY)", "def sink_floats(qtile):\n for window in qtile.current_group.windows:\n if window.floating:\n window.toggle_floating()", "def reset(self):\r\n # reset Wheel encoders\r\n self.start_time = time.time()\r\n [left_start, right_start] = self.Roomba.Query(43, 44)\r\n self.Motion.reset(left_start, right_start)\r\n # reset bumper\r\n self.bumper.reset()\r\n\r\n #reset grid world data\r\n self.action=[0.0,0.0]\r\n self.grid_state= [0,0,0]\r\n self.real_state = [0.0, 0.0, 0.0]\r\n self.trans_model = None\r\n pass", "def reset(self):\n self._x = 0\n self._y = 0", "def __clear_all(self,event):\n \n self.CoagS = \\\n self.CoagS_term = \\\n self.GR_term = \\\n self.J_time = \\\n self.J = \\\n self.J_lims = \\\n self.mmd_time = \\\n self.mmd_dp = \\\n self.mmd_time_sr = \\\n self.mmd_dp_sr = np.array([])\n \n # Initialize all np.nan variables\n self.J_peak = \\\n self.J_halfmax = \\\n self.J_median = \\\n self.gr = np.nan\n \n # Clears polygon used to outline particle mode\n self.polyx = \\\n self.polyy = \\\n self.polyx_out = \\\n self.polyy_out = np.array([])\n self.poly.set_xy(np.ones((2,2))*np.nan)\n self.poly_out.set_xy(np.ones((2,2))*np.nan)\n \n self.box_gr.set_val(\"%.2f\" % self.gr)\n self.box_J_peak.set_val(\"%.2f\" % self.J_peak)\n self.box_J_median.set_val(\"%.2f\" % self.J_median)\n self.box_J_halfmax.set_val(\"%.2f\" % self.J_halfmax)\n\n # clear average mode diameters and fit\n self.mmd_plot.set_data(np.nan,np.nan)\n self.mmd_plot_sr.set_data(np.nan,np.nan)\n self.mmd_fit_sr.set_data(np.nan,np.nan)\n self.J_plot.set_data(np.nan,np.nan)\n self.J_fit.set_data(np.nan,np.nan)\n self.J_vertical_line1.set_xdata(np.nan)\n self.J_vertical_line2.set_xdata(np.nan)\n self.gr_term_plot.set_data(np.nan,np.nan)\n self.coags_term_plot.set_data(np.nan,np.nan)\n self.dNdt_term_plot.set_data(np.nan,np.nan)\n\n plt.draw()", "def _set_None(self):\n for obj in self.axes:\n obj._set_None()\n self.normalizations = None\n self.FTparameters = None\n self.values = None\n # Set to None the properties inherited from Data\n super(DataND, self)._set_None()", "def clearLineshape(self):\n self.x = np.arange(self.start,self.stop,round(self.step,4))\n self.lineshape = np.zeros(len(self.x))", "def View_Inorder( self ):\r\n cb.order = 1\r\n self.system.Draw( )", "def clear(self):\n\n # Clear\n self.axes.cla()\n try:\n self.figure.clf()\n except KeyError:\n FlatCAMApp.App.log.warning(\"KeyError in MPL figure.clf()\")\n\n # Re-build\n self.figure.add_axes(self.axes)\n self.axes.set_aspect(1)\n self.axes.grid(True)\n\n # Re-draw\n self.canvas.draw_idle()", "def OnFloated(self, event):\n self._floating = True\n wx.PostEvent(self, wxDockPaneFloatedEvent())", "def ClearDisplay():\n display.fill(0)", "def tick(self):", "def _clear(self, event):\n if self.ignore(event) or self._changed_canvas():\n return\n self._background = self.canvas.copy_from_bbox(self.ax.bbox)\n self.ax.draw_artist(self._buttons)\n if hasattr(self, \"_circles\"):\n for circle in self._circles:\n self.ax.draw_artist(circle)", "def fullLatticeCheckChanged(self, val):\n if val == QtCore.Qt.Unchecked:\n self.writeFullLattice = False\n else:\n self.writeFullLattice = True", "def reset(self):\n super(PolygonTool, self).reset()\n # self.__nsides = None\n # self.__increment = None\n # self.__external = False # make this adjustable?\n self.__center = None\n for _i in range(self.__nsides):\n self.__xpts[_i] = 0.0\n self.__ypts[_i] = 0.0", "def setNoZeroColor():\n dislin.nobgd()", "def show_grid(self):\n for ax in (self.time_velocity, self.time_power, self.power_velocity):\n ax.grid(True)", "def reset(self):\n self.tile=\"\"", "def clearup(self):\n\t\tself.outChannel.clearup()\n\t\tself.inChannel.clearup()", "def clear(self):\n self.tiempos.clear()\n for curva in self.curvas:\n curva.valores.clear()\n curva.maximo = float('-inf')\n curva.minimo = float('inf')\n self.queue_draw()", "def _switch(self):\n self.fill= not self.fill", "def clear_screen(self):\n if self.x:\n self.move_cur_up((self.prev_x+1)/self.get_col_width())\n self.clear_line(self.get_num_lines(self.prev_lines) +\n self.get_num_lines(['>' + self.prev_str + ' ']))\n #time.sleep(2)", "def reset(self) -> None:\n self.f1.reset()", "def ToggleAllGizmoLocalMode( self ):\n\n value = self.gizmoMgr.GetGizmoLocal( 'pos' )\n self.gizmoMgr.SetGizmoLocal( 'pos', not value )\n self.gizmoMgr.SetGizmoLocal( 'rot', not value )\n self.gizmoMgr.SetGizmoLocal( 'scl', not value )", "def update(self):\n self.plot.draw()\n \n func=str(self.edit1b.currentText())\n if self.win.test()==0:\n x=np.linspace(0,10,200)\n elif self.win.test()==1:\n x=np.linspace(0,0.40,200)\n \n pattern1=r'Steel'\n pattern2=r'Aluminium'\n pattern3=r'[\\d]+'\n \n if (func!='Comparison Chart'):\n self.edit2b.setDisabled(False)\n self.edit3b.setDisabled(False)\n self.edit4b.setDisabled(False)\n if (func=='Quenched/Tempered Steel'):\n alpha = 0.0025\n elif (func=='Annealed Steel'):\n alpha = 0.01\n elif (func=='Steel (input Su)'):\n S = str(self.edit2b.text())\n if (self.win.test()==0):\n S = str(float(S)/6.895)\n alpha = notch.alpha(eval(S))\n elif (func=='Aluminium Alloy 356.0 as cast'):\n rho = 0.08\n elif (func=='Aluminium Alloy 6061'):\n rho = 0.025\n elif (func=='Aluminium Alloy 7075'):\n rho = 0.015\n elif (func=='Material dropdown'):\n pass\n \n y1=[]\n if re.search(pattern1,func):\n Su=notch.su_s(alpha)\n if (self.win.test()==0):\n Su = Su*6.895\n for i in range(len(x)):\n y1.append(notch.nsp(alpha,x[i],self.win.test()))\n y=np.asarray(y1)\n if (re.search(pattern3,str(self.edit3b.text()))):\n r=eval(str(self.edit3b.text()))\n self.edit4b.setText(str(notch.nsp(alpha,r,self.win.test())))\n elif re.search(pattern2,func):\n Su=notch.su_a(rho)\n if (self.win.test()==0):\n Su = Su*6.895\n for i in range(len(x)):\n y1.append(notch.nsn(rho,x[i],self.win.test()))\n y=np.asarray(y1)\n if (re.search(pattern3,str(self.edit3b.text()))):\n r=eval(str(self.edit3b.text()))\n self.edit4b.setText(str(notch.nsn(rho,r,self.win.test())))\n \n self.edit2b.setText(str(Su))\n func1 = 'Steel (Su='+str(self.edit2b.text())+')'\n if (func!='Steel (input Su)'):\n self.plot.redraw(x,y,func, self.xlabel)\n elif (func=='Steel (input Su)'):\n self.plot.redraw(x,y,func1, self.xlabel)\n \n elif (func=='Comparison Chart'):\n self.edit2b.setText(\"\")\n self.edit2b.setDisabled(True)\n self.edit3b.setText(\"\")\n self.edit3b.setDisabled(True)\n self.edit4b.setText(\"\")\n self.edit4b.setDisabled(True)\n self.plot.draw_comp(self.xlabel, self.win.test())", "def enable_remove_plot(self):\n pass\n #if self.cb_plotpanel.GetCount() == 0:\n # self.bt_close_plot.Disable()\n #else:\n # self.bt_close_plot.Enable()", "def reset_graph(self):\n self.sick_per_timestep = []\n self.steps = []\n self.ax.clear()\n self.ax.set_xlabel(self.xlabel)\n self.ax.set_ylabel(self.ylabel)\n self.ax.set_title(self.title)", "def plot_blank(self):\n self.figure_bmp.SetBitmap(self.controller.plot_blank())", "def switch_frequency_plot_channel_six(self):\n if self.plot_channel_key_booleans[5]:\n self.plot_channel_key_booleans[5] = False\n self.parent_widget.graph_channel_six_button.setStyleSheet(\n \"background-color:rgb(%d,%d,%d)\" % (255, 255, 255))\n else:\n self.plot_channel_key_booleans[5] = True\n self.parent_widget.graph_channel_six_button.setStyleSheet(\n \"background-color:rgb(%d,%d,%d)\" % (LINE_COLORS[5]))", "def clear(self):\n black = neo.Color(0,0,0)\n self.set_all(black)\n self.draw()", "def on_click(event):\n ax = event.inaxes\n \n if ax is None:\n # Occurs when a region not in an axis is clicked...\n return\n \n if self.current_plot == 'single':\n if event.button is 1:\n if not self.ax_zoomed:\n # Change over to a single baseline plot\n try:\n self.ax_zoomed = True\n self.current_ax = ax\n ax.set_position([0.1, 0.05, 0.85, 0.80])\n ax.set_xlabel(\"Frequency\")\n #ax.set_ylabel(\"Time\")\n \n for axis in self.sp_fig.axes:\n if axis is not ax:\n axis.set_visible(False)\n \n except ValueError:\n raise\n self.sp_fig.canvas.mpl_disconnect(self.fig_connect)\n \n elif event.button is 3:\n if self.ax_zoomed:\n self.ax_zoomed = False\n #self.sp_fig.canvas.mpl_disconnect(self.fig_connect)\n self.updatePlot()\n \n else:\n # No need to re-draw the canvas if it's not a left or right click\n return\n \n elif self.current_plot == 'multi':\n if ax is None:\n # Occurs when a region not in an axis is clicked...\n return\n if event.button is 1:\n if not self.ax_zoomed:\n # Change over to a single baseline plot\n try:\n ant1, ant2 = ax.get_title().split(\" \")\n except:\n ant1 = int(ax.get_title().strip('Tile').strip('Antenna').strip('Stand'))\n ant2 = ant1 \n try:\n self.spin_ref_ant.setValue(int(ant1))\n self.spin_ref_ant2.setValue(int(ant2))\n self.plot_select.setCurrentIndex(0)\n self.current_plot = 'single'\n \n self.updatePlot()\n except:\n raise\n self.sp_fig.canvas.mpl_disconnect(self.fig_connect)\n \n elif event.button is 3:\n if not self.ax_zoomed:\n ax.set_position([0.1, 0.1, 0.85, 0.85])\n # TODO: fix labelling of zoom plots\n ax.set_xlabel(\"Frequency\")\n #ax.set_ylabel(\"Time\")\n self.orig_position = ax.get_position()\n for axis in event.canvas.figure.axes:\n # Hide all the other axes...\n if axis is not ax:\n axis.set_visible(False)\n self.ax_zoomed=True\n else:\n self.updatePlot()\n \n else:\n # No need to re-draw the canvas if it's not a left or right click\n return\n \n event.canvas.draw()", "def setup(self, flags):\n self.figure = pylab.figure(1)\n self.axes = {}\n self.stream_data = {}\n self.flags = flags", "def toggle_fore_mod(self, checked):\n for tile in self.tiles:\n tile.toggle_fore_mod(checked)", "def full_reset(self):\n self.at_cmd('CFUN=1')", "def overviewCommand(self):\n plt.figure(11)\n plt.clf()\n ax = plt.subplot(211)\n plt.plot(self.raw['OPDC'].data.field('TIME'),\n 1e6*self.raw['OPDC'].data.field('FUOFFSET'),\n color='r', label='FUOFFSET',\n linewidth=1, alpha=1) \n plt.plot(self.raw['OPDC'].data.field('TIME'),\n 1e6*(self.raw['OPDC'].data.field(self.DLtrack)-\n self.raw['OPDC'].data.field('PSP')),\n color='r', linewidth=3, alpha=0.5,\n label=self.DLtrack+'-PSP')\n plt.legend()\n plt.subplot(212, sharex=ax)\n plt.plot(self.raw['OPDC'].data.field('TIME'),\n 1e6*self.raw['OPDC'].data.field('FUOFFSET')-\n 1e6*(self.raw['OPDC'].data.field(self.DLtrack)-\n self.raw['OPDC'].data.field('PSP')),\n color='k', label='$\\Delta$',\n linewidth=1, alpha=1) \n \n signal = self.raw['OPDC'].data.field('FUOFFSET')\n plt.figure(12)\n plt.clf()\n ax2 = plt.subplot(111)\n Fs = 1e6/np.diff(self.raw['OPDC'].data.field('TIME')).mean()\n print Fs\n ax2.psd(signal[:50000], NFFT=5000, Fs=Fs, label='FUOFFSET',scale_by_freq=0)\n plt.legend()", "def clear(tft, oled):\n oled.fill(tft.BLACK)", "def switch_frequency_plot_channel_five(self):\n if self.plot_channel_key_booleans[4]:\n self.plot_channel_key_booleans[4] = False\n self.parent_widget.graph_channel_five_button.setStyleSheet(\n \"background-color:rgb(%d,%d,%d)\" % (255, 255, 255))\n else:\n self.plot_channel_key_booleans[4] = True\n self.parent_widget.graph_channel_five_button.setStyleSheet(\n \"background-color:rgb(%d,%d,%d)\" % (LINE_COLORS[4]))", "def switch_frequency_plot_channel_four(self):\n if self.plot_channel_key_booleans[3]:\n self.plot_channel_key_booleans[3] = False\n self.parent_widget.graph_channel_four_button.setStyleSheet(\n \"background-color:rgb(%d,%d,%d)\" % (255, 255, 255))\n else:\n self.plot_channel_key_booleans[3] = True\n self.parent_widget.graph_channel_four_button.setStyleSheet(\n \"background-color:rgb(%d,%d,%d)\" % (LINE_COLORS[3]))", "def switch_frequency_plot_channel_eight(self):\n if self.plot_channel_key_booleans[7]:\n self.plot_channel_key_booleans[7] = False\n self.parent_widget.graph_channel_eight_button.setStyleSheet(\n \"background-color:rgb(%d,%d,%d)\" % (255, 255, 255))\n else:\n self.plot_channel_key_booleans[7] = True\n self.parent_widget.graph_channel_eight_button.setStyleSheet(\n \"background-color:rgb(%d,%d,%d)\" % (LINE_COLORS[7]))", "def switch_frequency_plot_channel_one(self):\n if self.plot_channel_key_booleans[0]:\n self.plot_channel_key_booleans[0] = False\n self.parent_widget.graph_channel_one_button.setStyleSheet(\n \"background-color:rgb(%d,%d,%d)\" % (255, 255, 255))\n\n else:\n self.plot_channel_key_booleans[0] = True\n self.parent_widget.graph_channel_one_button.setStyleSheet(\n \"background-color:rgb(%d,%d,%d)\" % (LINE_COLORS[0]))", "def clearButtons(self):\n for ch in self.cboxes:\n ch.hide()\n for tbx in self.tboxes:\n tbx.hide()\n for btnum in reversed(range(self.flowLayout.layout.count())):\n item = self.flowLayout.layout.itemAt(btnum)\n if item is not None:\n self.flowLayout.layout.removeItem(item)\n r, c = self.flowLayout.items[item.widget()]\n del self.flowLayout.items[item.widget()]\n del self.flowLayout.rows[r][c]\n item.widget().hide()\n self.flowLayout.update()", "def clear(self):\n self._x_prev = None\n self._y_prev = None", "def reset(self):\n self.position = np.zeros(self.ndegres)\n self.velocity = np.zeros(self.ndegres)\n self.state = np.zeros(2*self.ndegres)\n self.flag = 0\n self.h_ref = np.array([self.ref for _ in range(self.horizon)])\n self.action = np.zeros(self.ACTION_DIM) \n self.h_action = np.zeros(self.ACTION_DIM*self.horizon)", "def side_wheel_from_axis():", "def clear_strip(self):\r\n wlogger.log_info(\"Clear Strip\")\r\n for led in range(self.num_led):\r\n self.set_pixel(led, 0, 0, 0)\r\n self.show()", "def reset(self):\n self.z[:] = 0", "def clear_at(self,x,y):\n\t\tself.set_at(x,y,False)", "def onClickCheckbutton(self):\r\n self.app.unbind()\r\n mask = []\r\n for val in self.intvars:\r\n mask.append(val.get())\r\n # Recreate fNIRS Channels with channel mask\r\n self.app.reconfigureChannels(self.app.dataPath,mask)\r\n self.app.bindHotkeys()", "def clear():\n\tglobal _s\n\t_s.screen.fill(_s.back)\n\t_s.tab(0,0)\n\t_flip()", "def up(self):\r\n self.brush_on = False", "def fill_single_world():\n if not front_is_clear():\n if not right_is_clear():\n if not left_is_clear():\n put_beeper()", "def clear(self):\n self._delayvalue = _CFG[\"delay\"]\n self._colormode = _CFG[\"colormode\"]\n self._delete(\"all\")\n self._bgpic = self._createimage(\"\")\n self._bgpicname = \"nopic\"\n self._tracing = 1\n self._updatecounter = 0\n self._turtles = []\n self.bgcolor(\"white\")\n for btn in 1, 2, 3:\n self.onclick(None, btn)\n self.onkeypress(None)\n for key in self._keys[:]:\n self.onkey(None, key)\n self.onkeypress(None, key)\n Myturtle._pen = None", "def plot(self):\n self.fig = plt.figure('black hole')\n self.fig.clf() #clear the graph to avoir superposing data from the same set (can be deactivated if need to superpose)\n self.ax = plt.subplot()\n\n if self.img2 is not None:\n self.ax.imshow(self.img2)\n else:\n print(\"No black hole deformation in the memory, displayed the original image instead.\")\n self.ax.imshow(self.img_debut)\n\n self.fig.canvas.set_window_title('Black hole')\n self.ax.set_title(\"scrool to zoom in or out \\nright click to add an offset in the background \\nleft click to refresh image \\n close the option windows to stop the program\")\n self.fig.canvas.mpl_connect('scroll_event', self.onscroll)\n self.fig.canvas.mpl_connect('button_press_event', self.onclick)\n self.fig.canvas.mpl_connect('axes_leave_event', self.disconnect)\n self.fig.canvas.mpl_connect('axes_enter_event', self.connect)\n\n self.draw()", "def clear(self):\n lines = self._lines\n image, bkg_image = self.image, self._image\n for line in lines: line.clear(image, bkg_image) #prej bkg_img\n self._cursor = 0", "def toggle_satni_grid(self, x):\r\n self.konfig.satni.set_grid(x)\r\n self.satniGraf.toggle_grid(x)" ]
[ "0.61160886", "0.6069212", "0.5878568", "0.5857923", "0.5843179", "0.58136004", "0.57567126", "0.57446754", "0.5731113", "0.57125556", "0.5617229", "0.55928105", "0.55445457", "0.5534347", "0.5533265", "0.55276537", "0.5508186", "0.549856", "0.5490645", "0.5472276", "0.5400834", "0.53722656", "0.5361674", "0.53379565", "0.53198034", "0.52910817", "0.52773637", "0.52771854", "0.5267548", "0.52616537", "0.5260294", "0.5257353", "0.5231599", "0.5231134", "0.5222504", "0.52208", "0.5204488", "0.5204395", "0.5203066", "0.5200858", "0.5197111", "0.5194087", "0.5191742", "0.51800424", "0.51542556", "0.51542425", "0.5150326", "0.5140875", "0.5131952", "0.5130377", "0.5128779", "0.51267934", "0.5126429", "0.5121851", "0.51210517", "0.51177543", "0.51130956", "0.5112082", "0.5110577", "0.509599", "0.50942147", "0.5092841", "0.5088458", "0.508814", "0.5087742", "0.5084447", "0.50831264", "0.5082854", "0.50822425", "0.5081408", "0.50773495", "0.50716174", "0.5070923", "0.506809", "0.50665647", "0.5062988", "0.50613916", "0.50590205", "0.5047464", "0.504644", "0.5044257", "0.50439954", "0.50429696", "0.50417197", "0.50403893", "0.5036552", "0.5031411", "0.50273067", "0.50212026", "0.50039774", "0.49999422", "0.49998775", "0.49966773", "0.49945095", "0.49942794", "0.49939296", "0.49923965", "0.49860862", "0.49829116", "0.49828577", "0.49817854" ]
0.0
-1
3x axis clear, guider on
def test_guider_start_ffsOpen(self): sopTester.updateModel('mcp', TestHelper.mcpState['boss_science']) self._guider_start(5, 17, 0, 0)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def clear_axes_selection(self):\n self.x_axis = ''\n self.y_axis = ''\n self.non_numeric_x_axis = False\n self.count_desired = False\n self.header_choices('x')", "def clean_axis(ax):\n ax.get_xaxis().set_ticks([])\n ax.get_yaxis().set_ticks([])\n for sp in ax.spines.values():\n sp.set_visible(False)", "def __set_ax_prop(self, ax):\n ax.set_xticks([])\n ax.set_yticks([])", "def clean_chart(ax):\n edges = ['top', 'bottom', 'right', 'left']\n ax.tick_params(color=cbox('dark gray'))\n for edge in edges:\n ax.spines[edge].set_color(cbox('dark gray'))\n ax.spines[edge].set_linewidth(0.4)\n ax.yaxis.label.set_color(cbox('dark gray'))\n ax.xaxis.label.set_color(cbox('dark gray'))\n ax.tick_params(axis='x', colors=(0.05, 0.05, 0.05), width=0.5, which='major', top=True)\n ax.tick_params(axis='y', colors=(0.05, 0.05, 0.05), width=0.5, which='major', left=True)", "def plot_clear():\n plt.cla()", "def clear_crossfilter1(self):\n print ('Trigger clear')\n self.query_dict = {}\n self.plot_data = None\n self.create_figure_new()\n layout_doc.children[4].children[0] = self.p", "def clear_graphs(self):\n for ax in (self.master_plot, self.time_velocity, self.time_power, self.power_velocity):\n ax.cla()", "def hide_x_ticks():\n ax = plt.gca()\n ax.axes.get_xaxis().set_ticks([])", "def clear(self):\n self.gridLayout.setRowStretch(self.gridLayout.rowCount()-1, 0)\n for i in reversed(range(self.gridLayout.count())):\n item = self.gridLayout.itemAt(i)\n axis = item.widget()\n if axis:\n self.gridLayout.removeWidget(axis)\n axis.hide()\n axis.deleteLater()\n else:\n self.gridLayout.removeItem(item)\n \n self.axisWidgets = []\n self.axesNames = []", "def cla(self):\n self.disable_mouse_rotation()\n Axes.cla(self)\n self.grid(rcParams['axes3d.grid'])", "def clear_crossfilter2(self):\n print ('Trigger clear')\n self.query_dict = {}\n self.plot_data = None\n self.create_figure_new()\n layout_doc.children[4].children[1] = self.p", "def clear(self):\n\n # Clear\n self.axes.cla()\n try:\n self.figure.clf()\n except KeyError:\n FlatCAMApp.App.log.warning(\"KeyError in MPL figure.clf()\")\n\n # Re-build\n self.figure.add_axes(self.axes)\n self.axes.set_aspect(1)\n self.axes.grid(True)\n\n # Re-draw\n self.canvas.draw_idle()", "def cla(self):\n # Don't forget to call the base class\n Axes.cla(self)\n \n x_min = 0\n y_min = 0\n x_max = 1\n y_max = 1\n x_spacing = 0.1\n y_spacing = 0.1\n self.xaxis.set_minor_locator(NullLocator())\n self.yaxis.set_minor_locator(NullLocator())\n self.xaxis.set_ticks_position('bottom')\n self.yaxis.set_ticks_position('left')\n Axes.set_xlim(self, x_min, x_max)\n Axes.set_ylim(self, y_min, y_max)\n self.xaxis.set_ticks(np.arange(x_min, x_max+x_spacing, x_spacing))\n self.yaxis.set_ticks(np.arange(y_min, y_max+y_spacing, y_spacing))", "def reset(self):\n try:\n self.ax.cla()\n except Exception as e:\n print 'Exception BasePlot:', e\n raise e\n \n self._plotbuffer = { pat: [0 for _ in range(self._plotlength)] for pat in self._patterns }\n self._timestampbuffer = { pat: [0 for _ in range(self._plotlength)] for pat in self._patterns }\n self.ax.set_axis_bgcolor('black')\n self.ax.set_xticks([])\n self.ax.set_yticks([])", "def plot3d(self):\n plot_rupture_wire3d(self)", "def fix_auto(self):\n if self.share_x:\n self.rescale_axes(x=True, y=False)\n self.fix_axes_ticks(axis='x')\n if self.share_y:\n self.rescale_axes(x=False, y=True)\n self.fix_axes_ticks(axis='y')", "def axis3D(xlow,xhigh,xfirst,xstep,ylow,yhigh,yfirst,ystep,\\\n zlow,zhigh,zfirst,zstep):\n dislin.graf3d(xlow,xhigh,xfirst,xstep,ylow,yhigh,yfirst,ystep,\\\n zlow,zhigh,zfirst,zstep)", "def clearZoomStack(self):\n self.setAxisAutoScale(Qwt.QwtPlot.xBottom)\n self.setAxisAutoScale(Qwt.QwtPlot.yLeft)\n self.replot()\n self.zoomer.setZoomBase()", "def plot_unit(x,y,z,elements,data,is2d,isplanet,datatype,options,fig,axgrid,gridindex):\n\t#if we are plotting 3d replace the current axis\n\tif not is2d:\n\t\taxgrid[gridindex].axis('off')\n\t\tax=inset_locator.inset_axes(axgrid[gridindex],width='100%',height='100%',loc=3,borderpad=0,axes_class=Axes3D)\n\t\tax.set_axis_bgcolor((0.7,0.7,0.7))\n\telse:\n\t\tax=axgrid[gridindex]\n\n\t#edgecolor\n\tedgecolor=options.getfieldvalue('edgecolor','None')\n\n\t# colormap\n\t# {{{ give number of colorlevels and transparency\n\tcolorlevels=options.getfieldvalue('colorlevels',128)\n\talpha=options.getfieldvalue('alpha',1)\n\t# }}}\n\t# {{{ define wich colormap to use \n\ttry:\n\t\tdefaultmap=plt.cm.get_cmap('viridis',colorlevels)\n\texcept AttributeError:\n\t\tprint(\"Viridis can't be found (probably too old Matplotlib) reverting to gnuplot colormap\")\n\t\tdefaultmap=truncate_colormap('gnuplot2',0.1,0.9,colorlevels)\n\tcmap=options.getfieldvalue('colormap',defaultmap)\n\tif options.exist('cmap_set_over'):\n\t\tover=options.getfieldvalue('cmap_set_over','0.5')\n\t\tcmap.set_over(over)\n\tif options.exist('cmap_set_under'):\n\t\tunder=options.getfieldvalue('cmap_set_under','0.5')\n\t\tcmap.set_under(under)\n\toptions.addfield('colormap',cmap)\n\t# }}}\t\n\t# {{{ if plotting only one of several layers reduce dataset, same for surface\n\tif options.getfieldvalue('layer',0)>=1:\n\t\tplotlayer=options.getfieldvalue('layer',0)\n\t\tif datatype==1:\n\t\t\tslicesize=np.shape(elements)[0]\n\t\telif datatype in [2,3]:\n\t\t\tslicesize=len(x)\n\t\tdata=data[(plotlayer-1)*slicesize:plotlayer*slicesize]\n\t# }}}\n\t# {{{ Get the colormap limits\n\tif options.exist('clim'):\n\t\tlims=options.getfieldvalue('clim',[np.amin(data),np.amax(data)])\n\telif options.exist('caxis'):\n\t\tlims=options.getfieldvalue('caxis',[np.amin(data),np.amax(data)])\n\telse:\n\t\tif np.amin(data)==np.amax(data):\n\t\t\tlims=[np.amin(data)-0.5,np.amax(data)+0.5]\n\t\telse:\n\t\t\tlims=[np.amin(data),np.amax(data)]\n\t# }}}\n\t# {{{ Set the spread of the colormap (default is normal\n\tif options.exist('log'):\n\t\tnorm = mpl.colors.LogNorm(vmin=lims[0], vmax=lims[1])\n\telse:\n\t\tnorm = mpl.colors.Normalize(vmin=lims[0], vmax=lims[1])\n\tif options.exist('log'):\n\t\tnorm = mpl.colors.LogNorm(vmin=lims[0], vmax=lims[1])\n\telse:\n\t\tnorm = mpl.colors.Normalize(vmin=lims[0], vmax=lims[1])\n\toptions.addfield('colornorm',norm)\n\t# }}}\n\t\n\t# Plot depending on the datatype\n\t# {{{ data are on elements\n\tif datatype==1:\n\t\tif is2d:\n\t\t\tif options.exist('mask'):\n\t\t\t\ttriangles=mpl.tri.Triangulation(x,y,elements,data.mask)\n\t\t\telse:\n\t\t\t\ttriangles=mpl.tri.Triangulation(x,y,elements)\n\t\t\ttri=ax.tripcolor(triangles,data,colorlevels,cmap=cmap,norm=norm,alpha=alpha,edgecolors=edgecolor)\n\t\telse:\n\n\n\t\t\t#first deal with colormap\n\t\t\tloccmap = plt.cm.ScalarMappable(cmap=cmap)\n\t\t\tloccmap.set_array([min(data),max(data)])\n\t\t\tloccmap.set_clim(vmin=min(data),vmax=max(data))\n\n\t\t\t#dealing with prism sides\n\t\t\trecface=np.vstack((elements[:,0],elements[:,1],elements[:,4],elements[:,3])).T\n\t\t\teltind=np.arange(0,np.shape(elements)[0])\n\t\t\trecface=np.vstack((recface,np.vstack((elements[:,1],elements[:,2],elements[:,5],elements[:,4])).T))\n\t\t\teltind=np.hstack((eltind,np.arange(0,np.shape(elements)[0])))\n\t\t\trecface=np.vstack((recface,np.vstack((elements[:,2],elements[:,0],elements[:,3],elements[:,5])).T))\n\t\t\teltind=np.hstack((eltind,np.arange(0,np.shape(elements)[0])))\n\t\t\ttmp = np.ascontiguousarray(np.sort(recface)).view(np.dtype((np.void, recface.dtype.itemsize * recface.shape[1])))\n\t\t\t_, idx, recur = np.unique(tmp, return_index=True, return_counts=True)\n\t\t\trecel= recface[idx[np.where(recur==1)]]\n\t\t\trecindex=eltind[idx[np.where(recur==1)]]\n\t\t\tfor i,rectangle in enumerate(recel):\n\t\t\t\trec=zip(x[rectangle],y[rectangle],z[rectangle])\n\t\t\t\tpl3=Poly3DCollection([rec])\n\t\t\t\tcolor=loccmap.to_rgba(data[recindex[i]])\n\t\t\t\tpl3.set_edgecolor(color)\n\t\t\t\tpl3.set_color(color)\n\t\t\t\tax.add_collection3d(pl3)\n\n\t\t\t#dealing with prism bases\n\t\t\ttriface=np.vstack((elements[:,0:3],elements[:,3:6]))\n\t\t\teltind=np.arange(0,np.shape(elements)[0])\n\t\t\teltind=np.hstack((eltind,np.arange(0,np.shape(elements)[0])))\n\t\t\ttmp = np.ascontiguousarray(triface).view(np.dtype((np.void, triface.dtype.itemsize * triface.shape[1])))\n\t\t\t_, idx,recur = np.unique(tmp, return_index=True,return_counts=True)\n\t\t\t#we keep only top and bottom elements\n\t\t\ttriel= triface[idx[np.where(recur==1)]]\n\t\t\ttriindex=eltind[idx[np.where(recur==1)]]\n\t\t\tfor i,triangle in enumerate(triel):\n\t\t\t\ttri=zip(x[triangle],y[triangle],z[triangle])\n\t\t\t\tpl3=Poly3DCollection([tri])\n\t\t\t\tcolor=loccmap.to_rgba(data[triindex[i]])\n\t\t\t\tpl3.set_edgecolor(color)\n\t\t\t\tpl3.set_color(color)\n\t\t\t\tax.add_collection3d(pl3)\n\t\n\t\t\tax.set_xlim([min(x),max(x)])\n\t\t\tax.set_ylim([min(y),max(y)])\n\t\t\tax.set_zlim([min(z),max(z)])\n\n\t\t\t#raise ValueError('plot_unit error: 3D element plot not supported yet')\n\t\treturn \n\t# }}}\n\t# {{{ data are on nodes\n\telif datatype==2:\n\t\tif is2d:\n\t\t\tif np.ma.is_masked(data):\n\t\t\t\tEltMask=np.asarray([np.any(np.in1d(index,np.where(data.mask))) for index in elements])\n\t\t\t\ttriangles=mpl.tri.Triangulation(x,y,elements,EltMask)\n\t\t\telse:\n\t\t\t\ttriangles=mpl.tri.Triangulation(x,y,elements)\n\t\t\ttri=ax.tricontourf(triangles,data,colorlevels,cmap=cmap,norm=norm,alpha=alpha)\n\t\t\tif edgecolor != 'None':\n\t\t\t\tax.triplot(x,y,elements,color=edgecolor)\n\t\telse:\n\t\t\t#first deal with the colormap\n\t\t\tloccmap = plt.cm.ScalarMappable(cmap=cmap)\n\t\t\tloccmap.set_array([min(data),max(data)])\n\t\t\tloccmap.set_clim(vmin=min(data),vmax=max(data))\n\t\t\t\n\t\t\t#deal with prism sides\n\t\t\trecface=np.vstack((elements[:,0],elements[:,1],elements[:,4],elements[:,3])).T\n\t\t\trecface=np.vstack((recface,np.vstack((elements[:,1],elements[:,2],elements[:,5],elements[:,4])).T))\n\t\t\trecface=np.vstack((recface,np.vstack((elements[:,2],elements[:,0],elements[:,3],elements[:,5])).T))\n\t\t\ttmp = np.ascontiguousarray(np.sort(recface)).view(np.dtype((np.void, recface.dtype.itemsize * recface.shape[1])))\n\t\t\t_, idx, recur = np.unique(tmp, return_index=True, return_counts=True)\n\t\t\trecel= recface[idx[np.where(recur==1)]]\n\t\t\tfor rectangle in recel:\n\t\t\t\trec=zip(x[rectangle],y[rectangle],z[rectangle])\n\t\t\t\tpl3=Poly3DCollection([rec])\n\t\t\t\tcolor=loccmap.to_rgba(np.mean(data[rectangle]))\n\t\t\t\tpl3.set_edgecolor(color)\n\t\t\t\tpl3.set_color(color)\n\t\t\t\tax.add_collection3d(pl3)\n\t\t\t\t\n\t\t\t#deal with prism faces\n\t\t\ttriface=np.vstack((elements[:,0:3],elements[:,3:6]))\n\t\t\ttmp = np.ascontiguousarray(triface).view(np.dtype((np.void, triface.dtype.itemsize * triface.shape[1])))\n\t\t\t_, idx,recur = np.unique(tmp, return_index=True,return_counts=True)\n\t\t\t#we keep only top and bottom elements\n\t\t\ttriel= triface[idx[np.where(recur==1)]]\n\t\t\tfor triangle in triel:\n\t\t\t\ttri=zip(x[triangle],y[triangle],z[triangle])\n\t\t\t\tpl3=Poly3DCollection([tri])\n\t\t\t\tcolor=loccmap.to_rgba(np.mean(data[triangle]))\n\t\t\t\tpl3.set_edgecolor(color)\n\t\t\t\tpl3.set_color(color)\n\t\t\t\tax.add_collection3d(pl3)\n\t\t\t\t\n\t\t\tax.set_xlim([min(x),max(x)])\n\t\t\tax.set_ylim([min(y),max(y)])\n\t\t\tax.set_zlim([min(z),max(z)])\n\t\t\t#raise ValueError('plot_unit error: 3D element plot not supported yet')\n\t\treturn\n\t# }}}\n\t# {{{ plotting quiver\n\telif datatype==3:\n\t\tif is2d:\n\t\t\tQ=plot_quiver(x,y,data,options,ax)\n\t\telse:\n\t\t\traise ValueError('plot_unit error: 3D node plot not supported yet')\n\t\treturn\n\t\n\t# }}}\n\t# {{{ plotting P1 Patch (TODO)\n\n\telif datatype==4:\n\t\tprint 'plot_unit message: P1 patch plot not implemented yet'\n\t\treturn\n\n\t# }}}\n\t# {{{ plotting P0 Patch (TODO)\n\n\telif datatype==5:\n\t\tprint 'plot_unit message: P0 patch plot not implemented yet'\n\t\treturn\n\n\t# }}}\n\telse:\n\t\traise ValueError('datatype=%d not supported' % datatype)", "def updatePlot(self,*args):\n # set x limits\n timeDisplayOptions = {'10 minutes':10,'1 hour':60,'6 hours':6*60,'24 hours':24*60,'All':0}\n try:\n lastDatetime = mpl.dates.num2date(self.stage60K.get_xdata()[-1])\n firstDatetime = mpl.dates.num2date(self.stage60K.get_xdata()[0])\n except IndexError: # no data yet\n now = datetime.datetime.utcnow().toordinal()\n firstDatetime = mpl.dates.num2date(now)\n lastDatetime = firstDatetime\n xMin = lastDatetime-datetime.timedelta(minutes=timeDisplayOptions[self.wScale.get()])\n xMin = max([ firstDatetime, xMin ])\n if self.wScale.get() == 'All':\n xMin = firstDatetime\n xMinIndex = numpy.searchsorted( self.stage60K.get_xdata(), mpl.dates.date2num(xMin) )\n # rescale axes, with the x being scaled by the slider\n if self.toolbar._active == 'HOME' or self.toolbar._active == None:\n ymin,ymax = 10000000, -10000000\n lineAndVar = { self.stage60K: self.t60K,\n self.stage03K: self.t3K,\n self.stageGGG: self.tGGG,\n self.stageFAA: self.tFAA }\n if len(self.stage60K.get_xdata()) > 1:\n for line in lineAndVar.keys():\n if lineAndVar[line].get() == 0:\n line.set_visible(False)\n else:\n line.set_visible(True)\n ydata = line.get_ydata()[xMinIndex:-1]\n try:\n ymin = min(ymin, numpy.nanmin(ydata))\n ymax = max(ymax, numpy.nanmax(ydata))\n except ValueError as e:\n pass\n self.ax.set_xlim(xMin,lastDatetime)\n self.ax.set_ylim(ymin - (ymax-ymin)/10, ymax + (ymax-ymin)/10)\n hfmt = mpl.dates.DateFormatter('%H:%M:%S', tz=tz.tzlocal())\n self.ax.xaxis.set_major_formatter(hfmt)\n self.fig.autofmt_xdate()\n self.fig.tight_layout()\n #draw\n self.canvas.draw()", "def clean_axes(axl):\n cleanAxes(axl)", "def init_plot():\n fig = plt.figure(constrained_layout=True, figsize=(7,9), dpi=130)\n gs = fig.add_gridspec(5, 1)\n ax2 = fig.add_subplot(gs[:1, :])\n ax1 = fig.add_subplot(gs[1:, :], projection='3d')\n\n tick_color = (0.2, 0.2, 0.2, 1.0)\n pane_color = (0.12, 0.12, 0.12, 1.0)\n ax1.w_xaxis.set_pane_color(pane_color)\n ax1.w_yaxis.set_pane_color(pane_color)\n ax1.w_zaxis.set_pane_color(pane_color)\n\n ax1.tick_params(axis='x', colors=tick_color)\n ax1.tick_params(axis='y', colors=tick_color)\n ax1.tick_params(axis='z', colors=tick_color)\n ax1.view_init(elev=90, azim=180)\n\n ax1.set_xlim3d(0, 80)\n ax1.set_zlim3d(-2, 5)\n \n return (ax1, ax2)", "def triangle_axis(ax):\n ax.spines['right'].set_visible(False)\n ax.spines['top'].set_visible(False)\n ax.xaxis.set_ticks_position('bottom')\n ax.yaxis.set_ticks_position('left')", "def _reset_ploting_area(self):\n\n # Clear any existing curves and markers\n self.clear()\n\n # Initialise canvas\n self.setTitle('Disperion Measure Step Plot')\n self.setCanvasBackground(Qt.Qt.white)\n self.plotLayout().setMargin(0)\n self.plotLayout().setCanvasMargin(0)\n self.plotLayout().setAlignCanvasToScales(True)\n self.setAxisTitle(QwtPlot.yLeft, 'Signal to Noise Ratio')\n self.setAxisTitle(QwtPlot.xBottom, 'Trial Dispersion Measure')\n\n # Attached Grid\n grid = Qwt.QwtPlotGrid()\n grid.attach(self)\n grid.setPen(Qt.QPen(Qt.Qt.black, 0, Qt.Qt.DotLine))", "def setAxisBackground(idx=-1):\n dislin.axsbgd(idx)", "def clean_graph(self):\n #self.time = 0#\n \n # values of microcontroller\n #if self.graf_t.buffer_info()[1] != 0:\n for a in range(self.graf_t.buffer_info()[1]):\n self.graf_t.pop()\n \n for a in range(self.graf_r.buffer_info()[1]):\n self.graf_r.pop()\n\n for a in range(self.graf_x0.buffer_info()[1]):\n self.graf_x0.pop()\n\n for a in range(self.graf_x1.buffer_info()[1]):\n self.graf_x1.pop()\n\n for a in range(self.graf_u.buffer_info()[1]):\n self.graf_u.pop()\n \n self.referenceLine.set_data(self.graf_t, self.graf_r)\n self.x0Line.set_data(self.graf_t, self.graf_x0)\n self.x1Line.set_data(self.graf_t, self.graf_x1)\n self.uLine.set_data(self.graf_t, self.graf_u)\n \n try:\n #Draw the lines\n if self.checkBox_R.isChecked():\n self.mplWidget.canvas.ax.draw_artist(self.referenceLine)\n if self.checkBox_x0.isChecked():\n self.mplWidget.canvas.ax.draw_artist(self.x0Line)\n if self.checkBox_U.isChecked():\n self.mplWidget.canvas.ax.draw_artist(self.uLine)\n if self.checkBox_x1.isChecked():\n self.mplWidget.canvas.ax.draw_artist(self.x1Line)\n except AssertionError:\n pass\n try:\n self.mplWidget.canvas.blit(self.mplWidget.canvas.ax.bbox)\n except AttributeError:\n pass\n \n # force an image redraw\n self.mplWidget.canvas.draw()", "def cla(self):\n\t\tAxes.cla(self)\n\t\tself.set_longitude_grid(45)\n\t\tself.set_latitude_grid(20)\n\t\t# Do not display ticks -- we only want gridlines and text\n\t\tself.xaxis.set_ticks_position('none')\n\t\tself.yaxis.set_ticks_position('none')\n\n\t\tself.x_lim = [-180, 180]\n\t\tself.y_lim = [-90, 90]\n\t\tself.set_xlim(self.x_lim)\n\t\tself.set_ylim(self.y_lim)", "def empty_figure() -> object:\n figure = go.Figure(go.Scatter(x=[], y=[]))\n figure.update_layout(template=None)\n figure.update_xaxes(showgrid=False, showticklabels=False, zeroline=False)\n figure.update_yaxes(showgrid=False, showticklabels=False, zeroline=False)\n\n return figure", "def clear(self):\r\n\t\tself.grid.fill(False)", "def setBorder3D():\n dislin.box3d()", "def noAxisLines(axes='XYZ'):\n dislin.frame(0)\n dislin.noline(axes)", "def AllSidesTicks( ax ):\n return; ## depreceate for now as it doesn't work correctly...\n axy = ax.twinx()\n axy.set_ylim( ax.get_ylim() )\n axy.set_yscale( ax.get_yscale() )\n axy.set_yticklabels(labels=[])\n\n axx = ax.twiny()\n axx.set_xlim( ax.get_xlim() )\n axx.set_xscale( ax.get_xscale() )\n axx.set_xticklabels(labels=[])", "def remove_border(axes=None, top=False, right=False, left=True, bottom=True):\n ax = axes or plt.gca()\n ax.spines['top'].set_visible(top)\n ax.spines['right'].set_visible(right)\n ax.spines['left'].set_visible(left)\n ax.spines['bottom'].set_visible(bottom)\n\n #turn off all ticks\n ax.yaxis.set_ticks_position('none')\n ax.xaxis.set_ticks_position('none')\n\n #now re-enable visibles\n if top:\n ax.xaxis.tick_top()\n if bottom:\n ax.xaxis.tick_bottom()\n if left:\n ax.yaxis.tick_left()\n if right:\n ax.yaxis.tick_right()", "def clear(self):\n self._fig = go.Figure()", "def toggle_zero_grid(self, x):\r\n self.konfig.zero.set_grid(x)\r\n self.zeroGraf.toggle_grid(x)", "def cells_off(self):\n self.plotter.cells_off(self.ax)\n self.fig.canvas.draw()", "def cleanAxis(axisName, ticksOnly=False, complete=False):\r\n axisName.xaxis.set_ticks_position('bottom')\r\n axisName.yaxis.set_ticks_position('left')\r\n axisName.xaxis.labelpad = 2\r\n if not ticksOnly:\r\n axisName.spines['top'].set_visible(False)\r\n axisName.spines['right'].set_visible(False)\r\n if complete:\r\n axisName.spines['top'].set_visible(False)\r\n axisName.spines['right'].set_visible(False)\r\n axisName.spines['bottom'].set_visible(False)\r\n axisName.spines['left'].set_visible(False)\r\n return axisName", "def clear(self):\n self.tiempos.clear()\n for curva in self.curvas:\n curva.valores.clear()\n curva.maximo = float('-inf')\n curva.minimo = float('inf')\n self.queue_draw()", "def reset(self):\n # Don't reset axis labels\n self.range = ((0, 2, 1),) * self.ndim\n self.current_step = (0,) * self.ndim\n self.order = tuple(range(self.ndim))", "def off(self,ax):\n # remove cell lines if thery are on the plot\n # (if new axes are created the cell lines will be not there)\n for line in self.cell_lines:\n try:\n ax.lines.remove(line)\n except ValueError:\n pass\n # set lines and coordinates to empty lists \n self.cell_lines = []\n self.xx_cells = []", "def initialize_axes(self):\r\n self.x_lim = np.array([self.vals[:, 0].min(), self.vals[:, 0].max()])\r\n self.y_lim = np.array([self.vals[:, 1].min(), self.vals[:, 1].max()])\r\n self.z_lim = np.array([self.vals[:, 2].min(), self.vals[:, 2].max()])", "def setup_axes3(fig, rect):\n\n # rotate a bit for better orientation\n tr_rotate = Affine2D().translate(-95, 0)\n\n # scale degree to radians\n tr_scale = Affine2D().scale(np.pi/180., 1.)\n\n tr = tr_rotate + tr_scale + PolarAxes.PolarTransform()\n\n grid_locator1 = angle_helper.LocatorHMS(4)\n tick_formatter1 = angle_helper.FormatterHMS()\n\n grid_locator2 = MaxNLocator(3)\n\n ra0, ra1 = 8.*15, 14.*15\n cz0, cz1 = 0, 14000\n grid_helper = floating_axes.GridHelperCurveLinear(\n tr, extremes=(ra0, ra1, cz0, cz1),\n grid_locator1=grid_locator1,\n grid_locator2=grid_locator2,\n tick_formatter1=tick_formatter1,\n tick_formatter2=None)\n\n ax1 = floating_axes.FloatingSubplot(fig, rect, grid_helper=grid_helper)\n fig.add_subplot(ax1)\n\n # adjust axis\n ax1.axis[\"left\"].set_axis_direction(\"bottom\")\n ax1.axis[\"right\"].set_axis_direction(\"top\")\n\n ax1.axis[\"bottom\"].set_visible(False)\n ax1.axis[\"top\"].set_axis_direction(\"bottom\")\n ax1.axis[\"top\"].toggle(ticklabels=True, label=True)\n ax1.axis[\"top\"].major_ticklabels.set_axis_direction(\"top\")\n ax1.axis[\"top\"].label.set_axis_direction(\"top\")\n\n ax1.axis[\"left\"].label.set_text(r\"cz [km$^{-1}$]\")\n ax1.axis[\"top\"].label.set_text(r\"$\\alpha_{1950}$\")\n\n # create a parasite axes whose transData in RA, cz\n aux_ax = ax1.get_aux_axes(tr)\n\n aux_ax.patch = ax1.patch # for aux_ax to have a clip path as in ax\n ax1.patch.zorder = 0.9 # but this has a side effect that the patch is\n # drawn twice, and possibly over some other\n # artists. So, we decrease the zorder a bit to\n # prevent this.\n\n return ax1, aux_ax", "def clear(self):\n self._plt.clear()\n self._layer_items = {}", "def setAxisLengths3D(x=2.,y=2.,z=2.):\n dislin.axis3d(x,y,z)", "def _setFig(self):\n self.p.background_fill_color = grey['light']\n self.p.xgrid.grid_line_color = None\n self.p.ygrid.grid_line_color = None\n self.p.ygrid.grid_line_dash = 'dotted'\n self.p.ygrid.grid_line_dash = 'dotted'\n\n self.p.xgrid.minor_grid_line_color = grey['median']\n self.p.ygrid.minor_grid_line_color = grey['median']\n self.p.xgrid.minor_grid_line_dash = 'dotted'\n self.p.ygrid.minor_grid_line_dash = 'dotted'\n\n self.p.xaxis.axis_label = \"tsne_feature_0\"\n self.p.yaxis.axis_label = \"tsne_feature_1\"", "def _set_axes(self):\n self += helper.line(stroke=\"black\", x1=self.__dict__['x'], x2=self.__dict__['x'], y1=0, y2=self.__dict__['y']*2)\n self += helper.line(stroke=\"black\", x1=0, x2=self.__dict__['x']*2, y1=self.__dict__['y'], y2=self.__dict__['y'])", "def centerAxis():\n dislin.center()", "def axes_set_better_defaults(ax, axes_color = '#777777', grid = False):\n ax.set_axis_bgcolor((1, 1, 1))\n ax.grid(grid)\n for key in ax.spines.keys():\n if ax.spines[key].get_visible():\n ax.spines[key].set_color(axes_color)\n ax.tick_params(axis = 'x', colors = axes_color)\n ax.tick_params(axis = 'y', colors = axes_color)\n ax.figure.set_facecolor('white')\n ax.figure.canvas.draw()", "def __draw_xaxis(self):\n self.ax.set_xlim(self.xlims)\n # put x ticks on top\n xticks = [1]\n xticks.extend(range(5, self.xmax+5, 5))\n fs = self.settings.rcParams[\"axes.labelsize\"] if self.settings.otherParams[\n \"xlabel.fontsize\"] is None else self.settings.otherParams[\"xlabel.fontsize\"]\n color = self.settings.rcParams[\"axes.labelcolor\"] if self.settings.otherParams[\n \"xlabel.color\"] is None else self.settings.otherParams[\"xlabel.color\"]\n self.ax.set_xticks(xticks)\n self.ax.set_xticklabels(xticks[:-1])\n self.ax.set_xlabel(self.xaxis_label, fontsize=fs, color=color)\n self.ax.xaxis.set_label_coords(\n *self.settings.otherParams[\"xlabel.position\"])", "def front_wheel_from_axis():", "def show_grid(self):\n for ax in (self.time_velocity, self.time_power, self.power_velocity):\n ax.grid(True)", "def _InitAxes( self ):\n self.ax = self.fig.add_subplot( 111 )", "def __reset_crosshair(self):\n self.lhor.set_ydata(self.y_coord)\n self.lver.set_xdata(self.x_coord)", "def cb_reset(event):\n axDirichlet.cla()\n # Reset Sliders\n sAlpha0.reset() # resetが駄目!一番最初に戻ってしまう\n sAlpha1.reset()\n sAlpha2.reset()\n alpha_update = [sAlpha0.val, sAlpha1.val, sAlpha2.val]\n print('alpha_update=', alpha_update)\n\n # ML\n lambda_ML = CatML.MLinfer(x_cat)\n\n axML.cla()\n drawBarGraph( axML, \"ML\", lambda_ML, bar_y_max, col_ML ) # Draw Bar graph\n\n\n # MAP\n dirichlet.set_param(alpha_update)\n lambda_MAP = CatMAP.MAPinfer(x_cat, dirichlet)\n\n axMAP.cla()\n drawBarGraph( axMAP, \"MAP\", lambda_MAP, bar_y_max, col_MAP ) # Draw Bar Graph\n\n # Bayes\n posteriorDirichlet.set_param(alpha_update)\n posteriorDirichlet.calcPosterior(x_cat)\n lambda_Bayes = np.zeros(3)\n for k in range(3):\n lambda_Bayes[k] = posteriorDirichlet.BayesInfer(k)\n\n axBayes.cla()\n drawBarGraph( axBayes, \"Bayes\", lambda_Bayes, bar_y_max, col_Bayes ) # Draw Bar Graph\n\n draw_pdf_contours(axDirichlet, dirichlet, True) # Draw Dirichlet\n\n print('Reset')\n print('lambda_ML =', lambda_ML)\n print('lambda_MAP =', lambda_MAP)\n print('lambda_Bayes=', lambda_Bayes)\n draw_point(axDirichlet, lambda_ML, col_ML)\n draw_point(axDirichlet, lambda_MAP, col_MAP)\n draw_point(axDirichlet, lambda_Bayes, col_Bayes)\n draw_point(axLikelihood, lambda_ML, col_ML)\n draw_point(axPosteriorDirichlet, lambda_MAP, col_MAP)\n draw_point(axPosteriorDirichlet, lambda_Bayes, col_Bayes)\n\n fig.canvas.draw_idle()", "def clearLineshape(self):\n self.x = np.arange(self.start,self.stop,round(self.step,4))\n self.lineshape = np.zeros(len(self.x))", "def setup_axes3(fig, rect, component, fig_title=None, title_size=25, inclined=False, ylim=None):\n\n # Angle in degree\n angle_ticks = [(0., r\"$0^\\circ$\"),\n (15., r\"$15^\\circ$\"),\n (30., r\"$30^\\circ$\"),\n (45., r\"$45^\\circ$\"),\n (60., r\"$60^\\circ$\"),\n (75., r\"$75^\\circ$\"),\n (90., r\"$90^\\circ$\")]\n\n if not inclined:\n # rotate a bit for better orientation\n tr_rotate = Affine2D().translate(90, 0)\n\n # scale degree to radians\n tr_scale = Affine2D().scale(np.pi / 180., 1.)\n\n # ploting zenith angle range\n ra0, ra1 = 0., 100.\n\n grid_locator1 = FixedLocator([v for v, s in angle_ticks])\n tick_formatter1 = DictFormatter(dict(angle_ticks))\n\n else:\n # rotate a bit for better orientation\n tr_rotate = Affine2D().translate(-5, 0)\n\n # scale degree to radians\n tr_scale = Affine2D().scale(np.pi / 90., 1.)\n\n # ploting zenith angle range\n ra0, ra1 = 50., 100.\n\n grid_locator1 = None\n tick_formatter1 = DictFormatter(dict(angle_ticks))\n\n tr = tr_rotate + tr_scale + PolarAxes.PolarTransform()\n\n # Angle in minutes\n # grid_locator1 = angle_helper.LocatorHMS(6)\n # tick_formatter1 = angle_helper.FormatterHMS()\n\n grid_locator2 = MaxNLocator(11)\n\n if ylim is not None:\n cz0, cz1 = ylim\n else:\n cz0, cz1 = 0, 50.\n\n grid_helper = floating_axes.GridHelperCurveLinear(tr,\n extremes=(ra0, ra1, cz0, cz1),\n grid_locator1=grid_locator1,\n grid_locator2=grid_locator2,\n tick_formatter1=tick_formatter1,\n tick_formatter2=None,\n )\n\n ax1 = floating_axes.FloatingSubplot(fig, rect, grid_helper=grid_helper)\n fig.add_subplot(ax1)\n\n if fig_title is not None:\n plt.title(fig_title, fontsize=title_size, loc=\"left\")\n\n # adjust axis\n ax1.axis[\"left\"].set_axis_direction(\"bottom\")\n ax1.axis[\"right\"].set_axis_direction(\"top\")\n\n ax1.axis[\"bottom\"].set_visible(False)\n ax1.axis[\"top\"].set_axis_direction(\"bottom\")\n ax1.axis[\"top\"].toggle(ticklabels=True, label=True)\n ax1.axis[\"top\"].major_ticklabels.set_axis_direction(\"top\")\n ax1.axis[\"top\"].label.set_axis_direction(\"top\")\n\n if component == \"horizontal\" or component == \"hori\":\n ax1.axis[\"left\"].label.set_text(r\"$|H_{\\phi}|$ [m]\")\n elif component == \"meridional\":\n ax1.axis[\"left\"].label.set_text(r\"$|H_{\\theta}|$ [m]\")\n elif component == \"vertical-horizontal\":\n ax1.axis[\"left\"].label.set_text(r\"$|H_{\\theta,hor}|$ [m]\")\n elif component == \"vertical-vertical\":\n ax1.axis[\"left\"].label.set_text(r\"$|H_{\\theta.vert}|$ [m]\")\n elif component == \"vertical\":\n ax1.axis[\"left\"].label.set_text(r\"$|H_{v}| = |H_{\\theta}| \\cdot \\sin(\\theta)$ [m]\")\n\n ax1.axis[\"left\"].label.set_fontsize(24)\n ax1.axis[\"left\"].major_ticklabels.set_fontsize(22)\n ax1.axis[\"top\"].label.set_text(r\"$\\Theta$\")\n ax1.axis[\"top\"].label.set_fontsize(24)\n ax1.axis[\"top\"].major_ticklabels.set_fontsize(22)\n\n ax1.grid(True)\n\n # create a parasite axes whose transData in RA, cz\n aux_ax = ax1.get_aux_axes(tr)\n\n aux_ax.patch = ax1.patch # for aux_ax to have a clip path as in ax\n ax1.patch.zorder = 0.9 # but this has a side effect that the patch is\n # drawn twice, and possibly over some other\n # artists. So, we decrease the zorder a bit to\n # prevent this.\n\n return ax1, aux_ax", "def clear(self):\n row, col = self.selected\n if self.cubes[row][col].value == 0:\n self.cubes[row][col].set_temp(0)", "def cell_edges3d_cartesian(self, axis2, axis3):", "def format_image_axis(ax=None,remove_frame=True):\n ax = gca(ax)\n no_x_label(ax)\n no_x_ticks(ax)\n no_y_label(ax)\n no_y_ticks(ax)\n if remove_frame:\n ax.axis('off')", "def hide_tick_labels(ax):\n ax.xaxis.set_ticklabels([])\n ax.yaxis.set_ticklabels([])", "def _update_axes(self):\n data_shape = self.data.shape\n if len(self.axes) < self.data.ndim + 1:\n self._axes.append(Axis())\n for index in range(self.data.ndim):\n if len(self.axes[index].values) != data_shape[index]:\n self.axes[index].values = np.arange(data_shape[index],\n dtype=np.float64)", "def fix_ticks(self, noxticks=False, noyticks=False, style=None, N=None,\n rotate_x=False, rotate_y=False, xticklabels=None, yticklabels=None, \n oned=True):\n \n pl.draw()\n \n self.fix_axes_labels()\n \n self.fix_axes_ticks(axis='x', N=N, rotate_x=rotate_x) \n self.fix_axes_ticks(axis='y', N=N, rotate_y=rotate_y)\n \n \n if self.diagonal == 'lower' and oned:\n self.grid[np.intersect1d(self.left, self.top)[0]].set_yticklabels([])\n \n self.remove_all_ticks(noxticks, noyticks)\n \n pl.draw()", "def clear(self):\n\n # Inform the user\n log.info(\"Clearing the scatter plotter ...\")", "def setColorAxisLengths(x,y,z):\n dislin.ax3len(x,y,z)", "def switch_frequency_plot_channel_three(self):\n if self.plot_channel_key_booleans[2]:\n self.plot_channel_key_booleans[2] = False\n self.parent_widget.graph_channel_three_button.setStyleSheet(\n \"background-color:rgb(%d,%d,%d)\" % (255, 255, 255))\n\n else:\n self.plot_channel_key_booleans[2] = True\n self.parent_widget.graph_channel_three_button.setStyleSheet(\n \"background-color:rgb(%d,%d,%d)\" % (LINE_COLORS[2]))", "def format_xaxis (self, axes, \n n_ticks = 10, # Number of ticks we would like\n timestamp_formatting = '(%Y-%m-%d)%H:%M', # Specified formatting \n xaxis_mode = None): # Several automatic modes\n if (self.X_type == \"categorical\"):\n axes.set_xticks(self.X[self.start_indx:self.end_indx], minor=False)\n axes.set_xticklabels(self.Xcategories[self.start_indx:self.end_indx][:,0], minor=False)\n \n elif(self.X_type == \"numerical\"):\n # If regular numerical we just plot the values\n axes.xaxis.set_major_locator(mticker.MaxNLocator(nbins = n_ticks, prune='upper'))\n# ax.get_xaxis().get_major_formatter().set_useOffset(False)\n \n elif(self.X_type == \"timestamp\"):\n axes.xaxis.set_major_formatter(mdates.DateFormatter(timestamp_formatting))\n axes.xaxis.set_major_locator(mticker.MaxNLocator(nbins = n_ticks, prune='upper'))\n axes.xaxis_date()\n # ax.xaxis.set_major_formatter(FuncFormatter(self.ticklabels[val:val + wsize]))\n self.figure.autofmt_xdate()\n# print (type(self.X), type(self.X[0]))\n \n elif(self.formatXaxis == \"intraday\"):\n # If the data is intraday and we want to apply the Gap Remover !!! \n gap_remover_flag = 1;\n if (gap_remover_flag):\n formatter = FuncFormatter(ul.detransformer_Formatter)\n axes.xaxis.set_major_formatter(formatter) \n # mdates.DateFormatter(formatting)\n \n else:\n axes.xaxis.set_major_formatter(mdates.DateFormatter(formatting))\n \n axes.xaxis.set_major_locator(mticker.MaxNLocator(nbins = n_ticks, prune='upper'))", "def oiDataIsNull(self):\n self.charts_1.getGV().hide()\n self.charts_2.getGV().hide()", "def simple_xy_axes(ax):\n\n # Hide the right and top spines\n ax.spines['right'].set_visible(False)\n ax.spines['top'].set_visible(False)\n\n # Only show ticks on the left and bottom spines\n ax.yaxis.set_ticks_position('left')\n ax.xaxis.set_ticks_position('bottom')", "def _onLinearQ(self, event):\r\n self.graph.xaxis('\\\\rm{q} ', 'A^{-1}')\r\n self.set_xscale('linear')\r\n self.graph.render(self)\r\n self.subplot.figure.canvas.draw_idle()", "def _DoUpdatePlot( self, wd, ht ):\n self.ax.grid(\n True, 'both', 'both',\n\tcolor = '#c8c8c8', linestyle = ':', linewidth = 1\n\t)", "def onSetToCustDims(self, evt):\n\t\tself.halfResampleZ.Enable(0)\n\t\tself.fourthResampleZ.Enable(0)\n\t\t\n\t\tfor obj in [self.factorLabel, self.dimLabel, self.newDimX, self.newDimY, self.newDimZ, self.factorX, self.factorY, self.factorZ]:\n\t\t\tobj.Enable(1)\n\t\ttry:\n\t\t\trx = int(self.newDimX.GetValue())\n\t\t\try = int(self.newDimY.GetValue())\n\t\t\trz = int(self.newDimZ.GetValue())\n\t\t\tself.currSize = (rx, ry, rz)\n\t\texcept:\n\t\t\tpass", "def xaxis ( self ) :\n return self.__xaxis", "def xaxis ( self ) :\n return self.__xaxis", "def _clear(self, event):\n if self.ignore(event) or self._changed_canvas():\n return\n self._background = self.canvas.copy_from_bbox(self.ax.bbox)\n self.ax.draw_artist(self._checks)\n if hasattr(self, '_lines'):\n for l1, l2 in self._lines:\n self.ax.draw_artist(l1)\n self.ax.draw_artist(l2)", "def drawAxes(t):\r\n t.speed(0)\r\n t.pd()\r\n t.forward(500)\r\n t.back(500)", "def setup_mpl_visuals(self, axes=None) -> None:\n if axes is None:\n axes = self.subplot\n axes.patch.set_facecolor('white')\n axes.set_aspect('equal', 'box')\n axes.set_xlim(-10, 10, auto=True)\n axes.set_ylim(-10, 10, auto=True)\n # TODO: Make XYLim confort to window size/dimensions\n axes.set_xticks([])\n axes.set_yticks([])\n self.figure.subplots_adjust(bottom=0, top=1, left=0, right=1)\n axes.axis('off')", "def test_negative_ticks(self):\n fig = plt.figure()\n ax = fig.add_subplot(projection='ternary')\n ax.set_ternary_min(0, 3, -3)", "def stylize_axes(ax):\n ax.spines[\"top\"].set_visible(False)\n ax.spines[\"right\"].set_visible(False)\n\n ax.xaxis.set_tick_params(top=\"off\", direction=\"out\", width=1)\n ax.yaxis.set_tick_params(right=\"off\", direction=\"out\", width=1)", "def clear(self):\n if self.flag == 0:\n for coord in INDICES:\n self.kill(coord)\n self.chart[coord] = DEAD", "def _set_None(self):\n for obj in self.axes:\n obj._set_None()\n self.normalizations = None\n self.FTparameters = None\n self.values = None\n # Set to None the properties inherited from Data\n super(DataND, self)._set_None()", "def refresh(self):\n\t\tif len(self.curves) == 0: return\n\t\t\n\t\tmax_x = max((c[1][-1] for c in self.curves.itervalues()))\n\t\n\t\tstart_x = max_x - self.x_visible\n\t\t\n\t\tfor c, x, y in self.curves.itervalues():\n\t\t\twhile len(x) > 0 and x[0] < start_x:\n\t\t\t\tdel x[0]\n\t\t\t\tdel y[0]\n\n\t\tfor curve, x, y in self.curves.itervalues():\n\t\t\t# I guess this probably copies stuff, more\n\t\t\t# efficient way would of course to use a\n\t\t\t# ringbuffer, but I couldn't find a ready made\n\t\t\t# implementation and am too busy for doing things\n\t\t\t# right.\n\t\t\tcurve.setData(x, y)\n\t\t\n\t\t# The autoscale likes to hang on to integers without\n\t\t# this\n\t\tself.setAxisScale(self.xBottom, start_x, max_x)\n\t\tself.replot()", "def reset_graph(self):\n self.sick_per_timestep = []\n self.steps = []\n self.ax.clear()\n self.ax.set_xlabel(self.xlabel)\n self.ax.set_ylabel(self.ylabel)\n self.ax.set_title(self.title)", "def reset_axis_counters(self):\n\n self.column_counter = 0\n self.row_counter = 0", "def clear_figure(self):\n self.figure.clf()", "def xaxis ( self ) :\n return self.__xaxis", "def debugplots(fig,data):\n grid = AxesGrid(fig, 111, # similar to subplot(142)\n nrows_ncols=(1, 6),\n axes_pad=0.0,\n share_all=True,\n label_mode=\"L\",\n cbar_location=\"right\",\n cbar_mode=\"none\",\n )\n\n Z0=data[0].real\n Z1=data[1].real\n Z2=data[2].real\n Z3=data[3].real\n Z4=data[4].real\n Z5=data[5].real\n \n Z=[Z0,Z1,Z2,Z3,Z4,Z5]\n \n for i in range(6):\n grid[i].set_title(r\"$t=%u\\Delta t$\"%(Timestep(i)),color='black',horizontalalignment='center',verticalalignment='bottom')\n im = grid[i].imshow(Z[i], extent=(-2, 2, -2, 2), interpolation=\"Nearest\",origin=\"lower\",cmap='seismic',vmin=-1,vmax=1)\n grid[i].set_aspect(ratio)\n grid[i].set_xlabel(\"$x/10$\",size=16)\n grid[0].set_ylabel(\"$y/10$\",size=16)\n pos2 = [0.905,0.25,0.01,0.5]\n position = fig.add_axes(pos2)\n fig.colorbar(im, ax=grid[2],cax=position,extend=\"both\")\n \n for cax in grid.cbar_axes:\n cax.toggle_label(True)\n \n # This affects all axes as share_all = True.\n grid.axes_llc.set_xticks([-2,-1, 0,1])\n #grid[0].set_xticks([-20,-10, 0,10, 20])\n grid.axes_llc.set_yticks([-2, -1, 0, 1,2])", "def clear(self):\n self._x_prev = None\n self._y_prev = None", "def _use_data_bounds_changed_for_axes(self):\n self.update_pipeline()", "def refresh(self):\n\n self.ax.relim()\n self.ax.autoscale_view()\n self.canvas.draw()", "def _figure_3():\n\n dataset_id = 3\n pkl_file = _pkl_file_path(dataset_id)\n with open(pkl_file, 'rb') as f:\n data = pickle.load(f)\n\n cdata = data[:, 33]\n seconds = np.arange(data.shape[0]) * 1. / 250\n\n plt.xlim(right=seconds[-1])\n plt.plot(seconds, cdata, color='black', linestyle=':')\n plt.ticklabel_format(useOffset=False)\n plt.xlabel('Second')\n plt.ylabel('Microstrain')\n plt.savefig('Figure3.png', dpi=300)\n plt.gcf().clear()", "def updatePlot(self):\n self.axes.clear()\n self.axes.plot(self.data[0], self.data[1], linestyle='-', color='blue')\n self.axes.plot(self.data[0], self.data[2], linestyle='-', color='red')\n self.axes.plot(self.data[0], self.data[3], linestyle='-', color='gray')\n self.canvas.draw()", "def plot_xyz():\n plt.subplot(3,1,1) # for x axis\n plt.title('x value v.s. time')\n plt.grid(True)\n plt.ylabel('X')\n plt.xlabel('t')\n plt.plot(x, '-r')\n\n plt.subplot(3,1,2) # for y axis\n plt.title('y value v.s. time')\n plt.grid(True)\n plt.ylabel('Y')\n plt.xlabel('t')\n plt.plot(y, '-g')\n\n plt.subplot(3,1,3) # for z axis\n plt.title('z value v.s. time')\n plt.grid(True)\n plt.ylabel('Z')\n plt.xlabel('t')\n plt.plot(z, '-b')", "def _clear(self, event):\n if self.ignore(event) or self._changed_canvas():\n return\n self._background = self.canvas.copy_from_bbox(self.ax.bbox)\n self.ax.draw_artist(self._buttons)\n if hasattr(self, \"_circles\"):\n for circle in self._circles:\n self.ax.draw_artist(circle)", "def clear(self):\n self.canvas = [[self.style] * self.cols for _ in range(self.lines)]", "def tight_layout(self):\n self.fig.tight_layout()\n self.canvas.draw()", "def set_axes_equal_3d(ax):\n\n limits = np.array([ax.get_xlim3d(), ax.get_ylim3d(), ax.get_zlim3d()])\n spans = np.abs(limits[:, 0] - limits[:, 1])\n centers = np.mean(limits, axis=1)\n radius = 0.5 * np.max(spans)\n ax.set_xlim3d([centers[0]-radius, centers[0]+radius])\n ax.set_ylim3d([centers[1]-radius, centers[1]+radius])\n ax.set_zlim3d([centers[2]-radius, centers[2]+radius])", "def clearAllPlots(self):\n self.dataPoints = [[{'x': 0, 'y': 0}]]\n self.sendPreviousDataPoints()", "def clean_axes(axes, left=True, right=False):\n if not type(axes) in [np.ndarray, list]:\n axes = [axes]\n elif type(axes) == np.ndarray:\n axes = axes.flatten()\n for axis in axes:\n axis.tick_params(direction='out')\n axis.spines['top'].set_visible(False)\n if not right:\n axis.spines['right'].set_visible(False)\n if not left:\n axis.spines['left'].set_visible(False)\n axis.get_xaxis().tick_bottom()\n axis.get_yaxis().tick_left()", "def py_apply_ticks(self, plot):\n if self.x_ticks is not None:\n plot.set_xticks(self.x_ticks)\n if self.x_labels is not None:\n plot.set_xticklabels(self.x_labels)\n if self.y_ticks is not None:\n plot.set_yticks(self.y_ticks)\n if self.y_labels is not None:\n plot.set_yticklabels(self.y_labels)", "def cplot(self, figure, i, n):\n xx, yy = np.meshgrid(range(self.L), range(self.L))\n ax = figure.add_subplot(2,2,n)\n plt.setp(ax.get_yticklabels(), visible=False)\n plt.setp(ax.get_xticklabels(), visible=False) \n plt.pcolormesh(xx, yy, self.config, cmap=plt.cm.RdBu);\n plt.title('Time=%d'%i, fontsize=20)\n plt.xlabel('X', fontsize=12)\n plt.ylabel('Y',fontsize=12) \n plt.axis('tight') \n self.ax = ax", "def updatePlot(self):\n self.axes.clear()\n self.axes.plot(self.data[0], self.data[1], linestyle='-', color='gray')\n self.axes.plot(self.data[0], self.data[2], linestyle='-', color='blue')\n self.axes.plot(self.data[0], self.data[3], linestyle='-', color='darkgreen')\n self.canvas.draw()" ]
[ "0.65289444", "0.63922054", "0.6321609", "0.6298944", "0.6190664", "0.61035085", "0.60953176", "0.60531133", "0.6040696", "0.6023394", "0.6004292", "0.5933611", "0.58755267", "0.5860231", "0.58342767", "0.5827443", "0.5751592", "0.5682224", "0.5678112", "0.5671971", "0.5668456", "0.5643409", "0.5637882", "0.5592046", "0.5588504", "0.55839306", "0.55743766", "0.5564773", "0.556454", "0.55615133", "0.55474406", "0.5513909", "0.5513778", "0.55130243", "0.55105823", "0.55100214", "0.55069536", "0.54693544", "0.546934", "0.5459642", "0.5431649", "0.5420438", "0.54191923", "0.5383217", "0.5383111", "0.537514", "0.5365059", "0.53643537", "0.5363024", "0.53613496", "0.5358864", "0.5353781", "0.5351393", "0.53502387", "0.5349769", "0.5340888", "0.5336925", "0.5332484", "0.53286105", "0.53268343", "0.53189117", "0.5317791", "0.5313006", "0.53125805", "0.5308054", "0.5306995", "0.5296294", "0.52902895", "0.5261183", "0.5257222", "0.52527803", "0.5249135", "0.5249135", "0.5245647", "0.5238818", "0.5232226", "0.5225312", "0.52247804", "0.5220471", "0.5216624", "0.5209909", "0.52040535", "0.52035403", "0.5194425", "0.5194175", "0.5192586", "0.518351", "0.518077", "0.5180707", "0.5178596", "0.5177576", "0.51732063", "0.51648504", "0.5162202", "0.5159467", "0.51556635", "0.5155566", "0.5145601", "0.51433396", "0.5130625", "0.51221985" ]
0.0
-1
ffs open, he off, hgcd off, 3x axis clear, guider on
def test_guider_start_arcsOn(self): sopTester.updateModel('mcp', TestHelper.mcpState['arcs']) self._guider_start(8, 20, 0, 0)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def cehs():\n\tcloseEHShutter()", "def zguider():\n gzero.gxoff = camera.status.guider[0] + gzero.gxoff\n gzero.gyoff = camera.status.guider[1] + gzero.gyoff\n guider(0,0)\n f = open('/data/guidezero','w')\n cPickle.dump(gzero,f)\n f.close()", "def reset(self):\n self.position = np.zeros(self.ndegres)\n self.velocity = np.zeros(self.ndegres)\n self.state = np.zeros(2*self.ndegres)\n self.flag = 0\n self.h_ref = np.array([self.ref for _ in range(self.horizon)])\n self.action = np.zeros(self.ACTION_DIM) \n self.h_action = np.zeros(self.ACTION_DIM*self.horizon)", "def h_mode(stdscr):\r\n \r\n unit=0.36 #default\r\n stdscr.clear()\r\n while True:\r\n \r\n #print(\"------------Hand control mode------------\")\r\n #print(\"unit:\", unit)\r\n #print(\"current position: \"+\"( \"+str(_x_degrees)+\" , \"+str(_y_degrees)+\" )\")\r\n stdscr.addstr(0, 0, \"------------Hand control mode------------\")\r\n stdscr.addstr(1, 0, \"unit: {}\".format(unit))\r\n stdscr.addstr(2, 0, \"current position: ({},{})\".format(_x_degrees,_y_degrees))\r\n\r\n ch=stdscr.getch()\r\n stdscr.clear()\r\n \r\n if ch==259:#up\r\n rotate_degree(0,unit)\r\n elif ch==258:#down\r\n rotate_degree(0,-unit)\r\n elif ch==261:#left\r\n rotate_degree(unit,0)\r\n elif ch==260:#right\r\n rotate_degree(-unit,0)\r\n elif ch==43:#+\r\n if unit>=72:\r\n continue\r\n unit=unit+0.36\r\n elif ch==45:#-\r\n if unit<=0.36:\r\n continue\r\n unit=unit-0.36\r\n elif ch==113 or ch==813:\r\n break\r\n stdscr.refresh()", "def disable_cl1(self):\n self.write_versa5(0x10,0xc4) ## Enable xtal and clock\n self.write_versa5(0x21,0x81) ## Use and enable divider\n self.write_versa5(0x13,0x00) ## Use CL1 input instead of xtal\n self.write_versa5(0x10,0x80) ## Enable xtal input only\n self.write_versa5(0x17,0x04) ## Change top multiplier to 0x44\n self.write_versa5(0x18,0x40)", "def cerrar(self):\n self.x0 = np.array(self.x0, dtype=float)\n self.x = np.array(self.x, dtype=float)\n self.tipos = np.array(self.tipos, dtype=int)\n self.mask_fr = self.tipos == 1\n self.mask_in = self.tipos == 2\n self.num_fr = np.sum(self.mask_fr)\n self.num_in = np.sum(self.mask_in)\n self.open = False", "def resetFrame(self):\n with self.lock:\n hbin = self.hbin\n vbin = self.vbin\n if hbin != 1:\n self.setHBin(1)\n if vbin != 1:\n self.setVBin(1)\n with self.lock:\n self.expArea = self.defaultExpArea\n x1, y1, x2, y2 = self.expArea\n self.xsize = x2 - x1\n self.ysize = y2 - y1", "def clear(self):\n row, col = self.selected\n if self.cubes[row][col].value == 0:\n self.cubes[row][col].set_temp(0)", "def clear(self):\n self.np.fill(OFF)\n self.np.show()\n return True", "def reset(self):\r\n # reset Wheel encoders\r\n self.start_time = time.time()\r\n [left_start, right_start] = self.Roomba.Query(43, 44)\r\n self.Motion.reset(left_start, right_start)\r\n # reset bumper\r\n self.bumper.reset()\r\n\r\n #reset grid world data\r\n self.action=[0.0,0.0]\r\n self.grid_state= [0,0,0]\r\n self.real_state = [0.0, 0.0, 0.0]\r\n self.trans_model = None\r\n pass", "def test_flipflop(self):\n circ = FlipFlop(size=2)\n #test basic flip flop functionality\n circ.d = 3\n self.assertSigEq(circ.q, 0)\n circ.clk.pulse()\n self.assertSigEq(circ.q, 3)\n #test reset circuit\n circ.r.set()\n circ.clk.pulse()\n self.assertSigEq(circ.q, 0)\n circ.r.reset()\n #test load\n circ.l.set()\n circ.d = 3\n circ.clk.pulse()\n self.assertSigEq(circ.q, 0)\n #test enable\n circ.l.reset()\n circ.e.set()\n circ.clk.pulse()\n self.assertSigEq(circ.q, 0)", "def clearup(self):\n\t\tself.outChannel.clearup()\n\t\tself.inChannel.clearup()", "def initial_h_mode(stdscr):\r\n j=0\r\n unit=0.36 #default\r\n initial_points=[]\r\n stdscr.clear()\r\n while True:\r\n \r\n #print(\"------------Hand control mode------------\")\r\n #print(\"unit:\", unit)\r\n #print(\"current position: \"+\"( \"+str(_x_degrees)+\" , \"+str(_y_degrees)+\" )\")\r\n stdscr.addstr(0, 0, \"------------initialize hand control mode------------\")\r\n stdscr.addstr(1, 0, \"unit: {}\".format(unit))\r\n stdscr.addstr(2, 0, \"current position: ({},{})\".format(_x_degrees,_y_degrees))\r\n if j==0:\r\n stdscr.addstr(3, 0,\"please shoot upper left\")\r\n elif j==1:\r\n stdscr.addstr(3, 0,\"please shoot upper right\")\r\n elif j==2:\r\n stdscr.addstr(3, 0,\"please shoot lower right\")\r\n elif j==3:\r\n stdscr.addstr(3, 0,\"please shoot lower left\")\r\n\r\n ch=stdscr.getch()\r\n stdscr.clear()\r\n \r\n if ch==259:#up\r\n rotate_degree(0,unit)\r\n elif ch==258:#down\r\n rotate_degree(0,-unit)\r\n elif ch==261:#left\r\n rotate_degree(unit,0)\r\n elif ch==260:#right\r\n rotate_degree(-unit,0)\r\n elif ch==43:#+\r\n if unit>=72:\r\n continue\r\n unit=unit+0.36\r\n elif ch==45:#-\r\n if unit<=0.36:\r\n continue\r\n unit=unit-0.36\r\n elif ch==113 or ch==813:\r\n skip_shoot_by_tcp=True\r\n break\r\n elif ch==10: # enter\r\n initial_points.append(_x_degrees)\r\n initial_points.append(_y_degrees)\r\n j=j+1\r\n print(initial_points)\r\n if j==4:\r\n sendAngles(initial_points)\r\n return\r\n elif ch==8: #backspace\r\n if j==0:\r\n continue\r\n else:\r\n j=j-1\r\n initial_points.pop()\r\n initial_points.pop()\r\n stdscr.refresh()", "def setNoZeroColor():\n dislin.nobgd()", "def ToggleAllGizmoLocalMode( self ):\n\n value = self.gizmoMgr.GetGizmoLocal( 'pos' )\n self.gizmoMgr.SetGizmoLocal( 'pos', not value )\n self.gizmoMgr.SetGizmoLocal( 'rot', not value )\n self.gizmoMgr.SetGizmoLocal( 'scl', not value )", "def clear(tft, oled):\n oled.fill(tft.BLACK)", "def _switch(self):\n self.fill= not self.fill", "def clear_crossfilter2(self):\n print ('Trigger clear')\n self.query_dict = {}\n self.plot_data = None\n self.create_figure_new()\n layout_doc.children[4].children[1] = self.p", "def clear_crossfilter1(self):\n print ('Trigger clear')\n self.query_dict = {}\n self.plot_data = None\n self.create_figure_new()\n layout_doc.children[4].children[0] = self.p", "def abrir(self):\n self.x0 = [val for val in self.x0]\n self.x = [val for val in self.x]\n self.tipos = [val for val in self.tipos]\n self.mask_fr = [val for val in self.mask_fr]\n self.mask_in = [val for val in self.mask_in]\n self.open = True", "def clean_graph(self):\n #self.time = 0#\n \n # values of microcontroller\n #if self.graf_t.buffer_info()[1] != 0:\n for a in range(self.graf_t.buffer_info()[1]):\n self.graf_t.pop()\n \n for a in range(self.graf_r.buffer_info()[1]):\n self.graf_r.pop()\n\n for a in range(self.graf_x0.buffer_info()[1]):\n self.graf_x0.pop()\n\n for a in range(self.graf_x1.buffer_info()[1]):\n self.graf_x1.pop()\n\n for a in range(self.graf_u.buffer_info()[1]):\n self.graf_u.pop()\n \n self.referenceLine.set_data(self.graf_t, self.graf_r)\n self.x0Line.set_data(self.graf_t, self.graf_x0)\n self.x1Line.set_data(self.graf_t, self.graf_x1)\n self.uLine.set_data(self.graf_t, self.graf_u)\n \n try:\n #Draw the lines\n if self.checkBox_R.isChecked():\n self.mplWidget.canvas.ax.draw_artist(self.referenceLine)\n if self.checkBox_x0.isChecked():\n self.mplWidget.canvas.ax.draw_artist(self.x0Line)\n if self.checkBox_U.isChecked():\n self.mplWidget.canvas.ax.draw_artist(self.uLine)\n if self.checkBox_x1.isChecked():\n self.mplWidget.canvas.ax.draw_artist(self.x1Line)\n except AssertionError:\n pass\n try:\n self.mplWidget.canvas.blit(self.mplWidget.canvas.ax.bbox)\n except AttributeError:\n pass\n \n # force an image redraw\n self.mplWidget.canvas.draw()", "def test_guider_start_ffsClosed(self):\n self._guider_start(6, 20, 0, 0)", "def align(): # open EH and fast shutter\n\t#marAuxiliary.closeMarShield()\n\td2in()\n\td3in()\n\tsh('o')", "def terminatePlane3D():\n dislin.grffin()", "def all_off(self):\n self.fill_off()\n self.update()\n self.fill_off()\n self.update()", "def clear(self):\n self.cmd(0x33) # $33 8-bit mode\n self.cmd(0x32) # $32 8-bit mode\n self.cmd(0x28) # $28 8-bit mode\n self.cmd(0x0C) # $0C 8-bit mode\n self.cmd(0x06) # $06 8-bit mode\n self.cmd(0x01) # $01 8-bit mode", "def fill_single_world():\n if not front_is_clear():\n if not right_is_clear():\n if not left_is_clear():\n put_beeper()", "def proz_manually ():\r\n Check_180turn(left_boundary,right_boundary)\r\n ABS() # Baseline correction \r\n XCMD(\"closeall\",WAIT_TILL_DONE)", "def clear():\n\tglobal _s\n\t_s.screen.fill(_s.back)\n\t_s.tab(0,0)\n\t_flip()", "def full_reset(self):\n self.at_cmd('CFUN=1')", "def ClearDisplay():\n display.fill(0)", "def main():\r\n micropython.kbd_intr(3)\r\n # configure spi interface\r\n spi = SPI(1, baudrate=31250000, sck=Pin(10), mosi=Pin(11))\r\n\r\n # initialize display\r\n tft = st7789.ST7789(\r\n spi,\r\n 240,\r\n 240,\r\n reset=Pin(12, Pin.OUT),\r\n cs=Pin(9, Pin.OUT),\r\n dc=Pin(8, Pin.OUT),\r\n backlight=Pin(13, Pin.OUT),\r\n rotation=1,\r\n buffer_size=64*64*2)\r\n\r\n # enable display and clear screen\r\n tft.init()\r\n tft.fill(st7789.WHITE)\r\n\r\n keyA = Pin(15, Pin.IN, Pin.PULL_UP)\r\n keyB = Pin(17, Pin.IN, Pin.PULL_UP)\r\n keyX = Pin(19, Pin.IN, Pin.PULL_UP)\r\n keyY = Pin(21, Pin.IN, Pin.PULL_UP)\r\n\r\n up = Pin(2, Pin.IN, Pin.PULL_UP)\r\n down = Pin(18, Pin.IN, Pin.PULL_UP)\r\n left = Pin(16, Pin.IN, Pin.PULL_UP)\r\n right = Pin(20, Pin.IN, Pin.PULL_UP)\r\n ctrl = Pin(3, Pin.IN, Pin.PULL_UP)\r\n\r\n while True:\r\n if keyA.value() == 0:\r\n tft.fill_rect(208, 15, 30, 30, st7789.RED)\r\n print(\"A\")\r\n else:\r\n tft.fill_rect(208, 15, 30, 30, st7789.WHITE)\r\n tft.rect(208, 15, 30, 30, st7789.RED)\r\n\r\n if keyB.value() == 0:\r\n tft.fill_rect(208, 75, 30, 30, st7789.RED)\r\n print(\"B\")\r\n else:\r\n tft.fill_rect(208, 75, 30, 30, st7789.WHITE)\r\n tft.rect(208, 75, 30, 30, st7789.RED)\r\n\r\n if keyX.value() == 0:\r\n tft.fill_rect(208, 135, 30, 30, st7789.RED)\r\n print(\"C\")\r\n else:\r\n tft.fill_rect(208, 135, 30, 30, st7789.WHITE)\r\n tft.rect(208, 135, 30, 30, st7789.RED)\r\n\r\n if keyY.value() == 0:\r\n tft.fill_rect(208, 195, 30, 30, st7789.RED)\r\n print(\"D\")\r\n else:\r\n tft.fill_rect(208, 195, 30, 30, st7789.WHITE)\r\n tft.rect(208, 195, 30, 30, st7789.RED)\r\n\r\n if up.value() == 0:\r\n tft.fill_rect(60, 60, 30, 30, st7789.RED)\r\n print(\"UP\")\r\n else:\r\n tft.fill_rect(60, 60, 30, 30, st7789.WHITE)\r\n tft.rect(60, 60, 30, 30, st7789.RED)\r\n\r\n if down.value() == 0:\r\n tft.fill_rect(60, 150, 30, 30, st7789.RED)\r\n print(\"DOWM\")\r\n else:\r\n tft.fill_rect(60, 150, 30, 30, st7789.WHITE)\r\n tft.rect(60, 150, 30, 30, st7789.RED)\r\n\r\n if left.value() == 0:\r\n tft.fill_rect(15, 105, 30, 30, st7789.RED)\r\n print(\"LEFT\")\r\n else:\r\n tft.fill_rect(15, 105, 30, 30, st7789.WHITE)\r\n tft.rect(15, 105, 30, 30, st7789.RED)\r\n\r\n if right.value() == 0:\r\n tft.fill_rect(105, 105, 30, 30, st7789.RED)\r\n print(\"RIGHT\")\r\n else:\r\n tft.fill_rect(105, 105, 30, 30, st7789.WHITE)\r\n tft.rect(105, 105, 30, 30, st7789.RED)\r\n\r\n if ctrl.value() == 0:\r\n tft.fill_rect(60, 105, 30, 30, st7789.RED)\r\n print(\"CTRL\")\r\n else:\r\n tft.fill_rect(60, 105, 30, 30, st7789.WHITE)\r\n tft.rect(60, 105, 30, 30, st7789.RED)", "def toggle_zero_grid(self, x):\r\n self.konfig.zero.set_grid(x)\r\n self.zeroGraf.toggle_grid(x)", "def reset_phis ( self ) :\n for f in self.__phis : f.setVal(0)", "def all_off():\n Leds.red_left.brightness = 0\n Leds.red_right.brightness = 0\n Leds.green_left.brightness = 0\n Leds.green_right.brightness = 0\n Leds.blue_left.brightness = 0\n Leds.blue_right.brightness = 0", "def reset(self):\n self.u0.fill(0.)\n self.u1.fill(0.)\n self.u2.fill(0.)\n self.time = 0.", "def fix(hobj):\n\n h.execute('create axon[2]', hobj)\n\n for sec in hobj.axon:\n sec.L = 30\n sec.diam = 1\n hobj.axonal.append(sec=sec)\n hobj.all.append(sec=sec) # need to remove this comment\n\n hobj.axon[0].connect(hobj.soma[0], 0.5, 0)\n hobj.axon[1].connect(hobj.axon[0], 1, 0)\n\n h.define_shape()", "def zeroing(self):\n x_zeroed, y_zeroed, z_zeroed = False, False, False\n self._stepper_x.set_stepper(defines.STEPPER_X_MAX_HZ / 2, -defines.BOARD_X_LENGTH)\n self._stepper_y_left.set_stepper(defines.STEPPER_Y_MAX_HZ / 2, -defines.BOARD_Y_LENGTH)\n self._stepper_y_right.set_stepper(defines.STEPPER_Y_MAX_HZ / 2, -defines.BOARD_Y_LENGTH)\n self._stepper_z.set_stepper(defines.STEPPER_Z_MAX_HZ / 2, -defines.BOARD_Z_LENGTH)\n\n while x_zeroed is False or y_zeroed is False or z_zeroed is False:\n if x_zeroed is False and self._switch_reset_x.get_state() is True:\n self._stepper_x.set_stepper(0, 0)\n x_zeroed = True\n\n if y_zeroed is False and self._switch_reset_y.get_state() is True:\n self._stepper_y_left.set_stepper(0, 0)\n self._stepper_y_right.set_stepper(0, 0)\n y_zeroed = True\n\n if z_zeroed is False and self._switch_reset_z.get_state() is True:\n self._stepper_z.set_stepper(0, 0)\n z_zeroed = True", "def f_fixed(self):\n self.fx_free = self.fy_free = self.fz_free = False\n return self", "def turn_aux_heat_off(self):\n self.set_operation_mode(STATE_HEAT)", "def __clear_all(self,event):\n \n self.CoagS = \\\n self.CoagS_term = \\\n self.GR_term = \\\n self.J_time = \\\n self.J = \\\n self.J_lims = \\\n self.mmd_time = \\\n self.mmd_dp = \\\n self.mmd_time_sr = \\\n self.mmd_dp_sr = np.array([])\n \n # Initialize all np.nan variables\n self.J_peak = \\\n self.J_halfmax = \\\n self.J_median = \\\n self.gr = np.nan\n \n # Clears polygon used to outline particle mode\n self.polyx = \\\n self.polyy = \\\n self.polyx_out = \\\n self.polyy_out = np.array([])\n self.poly.set_xy(np.ones((2,2))*np.nan)\n self.poly_out.set_xy(np.ones((2,2))*np.nan)\n \n self.box_gr.set_val(\"%.2f\" % self.gr)\n self.box_J_peak.set_val(\"%.2f\" % self.J_peak)\n self.box_J_median.set_val(\"%.2f\" % self.J_median)\n self.box_J_halfmax.set_val(\"%.2f\" % self.J_halfmax)\n\n # clear average mode diameters and fit\n self.mmd_plot.set_data(np.nan,np.nan)\n self.mmd_plot_sr.set_data(np.nan,np.nan)\n self.mmd_fit_sr.set_data(np.nan,np.nan)\n self.J_plot.set_data(np.nan,np.nan)\n self.J_fit.set_data(np.nan,np.nan)\n self.J_vertical_line1.set_xdata(np.nan)\n self.J_vertical_line2.set_xdata(np.nan)\n self.gr_term_plot.set_data(np.nan,np.nan)\n self.coags_term_plot.set_data(np.nan,np.nan)\n self.dNdt_term_plot.set_data(np.nan,np.nan)\n\n plt.draw()", "def enable_cl1_direct(self):\n self.write_versa5(0x17,0x02) ## Change top multiplier to 0x22\n self.write_versa5(0x18,0x20)\n self.write_versa5(0x10,0xc0) ## Enable xtal and clock\n self.write_versa5(0x13,0x03) ## Switch to clock\n self.write_versa5(0x10,0x44) ## Enable clock input only and refmode\n self.write_versa5(0x21,0x0c) ## Use previous channel, direct input, may have skew", "def event_m20_11_x51(z73=_, z74=_):\n \"\"\"State 0,1: Flag reset\"\"\"\n SetEventFlagsInRange(z73, z74, 0)\n \"\"\"State 2: End state\"\"\"\n return 0", "async def async_turn_aux_heat_off(self) -> None:\n self._aux = False\n self.async_write_ha_state()", "def off(self):", "def reset(self):\n\n self.x = np.random.randint(3, self.grid_size-3, size=1)[0]\n self.y = np.random.randint(3, self.grid_size-3, size=1)[0]\n \n self.h_x = []\n self.h_y = []\n for i in range(self.hunters):\n x = np.random.randint(3, self.grid_size-3, size=1)[0]\n y = np.random.randint(3, self.grid_size-3, size=1)[0]\n self.h_x.append(x)\n self.h_y.append(y)\n\n self.trajectory = np.zeros((self.grid_size,self.grid_size))\n\n bonus = 0.5 * np.random.binomial(1,self.temperature,size=self.grid_size**2)\n bonus = bonus.reshape(self.grid_size,self.grid_size)\n\n malus = -1.0 * np.random.binomial(1,self.temperature,size=self.grid_size**2)\n malus = malus.reshape(self.grid_size, self.grid_size)\n\n self.to_draw = np.zeros((self.max_time+2, self.grid_size*self.scale, self.grid_size*self.scale, 3))\n\n\n malus[bonus>0]=0\n\n self.board = bonus + malus\n\n self.position = np.zeros((self.grid_size, self.grid_size))\n self.position[0:2,:]= -1\n self.position[:,0:2] = -1\n self.position[-2:, :] = -1\n self.position[:, -2:] = -1\n self.board[self.x,self.y] = 0\n self.t = 0\n \n self.board_with_hunters[:,:] = 0\n \n for i in range(self.hunters):\n self.board_with_hunters[self.h_x[i],self.h_y[i]] = -100\n \n\n global_state = np.concatenate((\n self.board.reshape(self.grid_size, self.grid_size,1),\n self.position.reshape(self.grid_size, self.grid_size,1),\n self.trajectory.reshape(self.grid_size, self.grid_size,1),\n self.board_with_hunters.reshape(self.grid_size, self.grid_size,1)),axis=2)\n\n state = global_state[self.x - 2:self.x + 3, self.y - 2:self.y + 3, :]\n return state", "def toggle_fore_mod(self, checked):\n for tile in self.tiles:\n tile.toggle_fore_mod(checked)", "def clear(self):\n\n self.index = 1\n self.degen = 1.\n self.nnnn_out = False\n self.json_out = False\n self.verbose = False\n self.ipol = 0\n self.ellip = 0.\n self.nepts = 0\n self.genfmt_order = 2\n self.genfmt_vers = \"\"\n self.exch_label = \"\"\n self.rs = 0.\n self.vint = 0.\n self.xmu = 0.\n self.edge = 0.\n self.kf = 0.\n self.rnorman = 0.\n self.gamach = 0.\n self.nepts = FEFF_maxpts\n\n dargs = dict(dtype=np.float64, order='F')\n largs = dict(dtype=np.int32, order='F')\n\n self.evec = np.zeros(3, **dargs)\n self.xivec = np.zeros(3, **dargs)\n self.ipot = np.zeros(1+FEFF_maxleg, **largs)\n self.beta = np.zeros(1+FEFF_maxleg, **dargs)\n self.eta = np.zeros(2+FEFF_maxleg, **dargs)\n self.ri = np.zeros(FEFF_maxleg, **dargs)\n self.rat = np.zeros((3, 2+FEFF_maxleg), **dargs)\n self.iz = np.zeros(1+FEFF_maxpot, **largs)\n self.kfeff = np.zeros(FEFF_maxpts, **dargs)\n self.real_phc = np.zeros(FEFF_maxpts, **dargs)\n self.mag_feff = np.zeros(FEFF_maxpts, **dargs)\n self.pha_feff = np.zeros(FEFF_maxpts, **dargs)\n self.red_fact = np.zeros(FEFF_maxpts, **dargs)\n self.lam = np.zeros(FEFF_maxpts, **dargs)\n self.rep = np.zeros(FEFF_maxpts, **dargs)\n self.nleg = 1", "def cb_reset(event):\n axDirichlet.cla()\n # Reset Sliders\n sAlpha0.reset() # resetが駄目!一番最初に戻ってしまう\n sAlpha1.reset()\n sAlpha2.reset()\n alpha_update = [sAlpha0.val, sAlpha1.val, sAlpha2.val]\n print('alpha_update=', alpha_update)\n\n # ML\n lambda_ML = CatML.MLinfer(x_cat)\n\n axML.cla()\n drawBarGraph( axML, \"ML\", lambda_ML, bar_y_max, col_ML ) # Draw Bar graph\n\n\n # MAP\n dirichlet.set_param(alpha_update)\n lambda_MAP = CatMAP.MAPinfer(x_cat, dirichlet)\n\n axMAP.cla()\n drawBarGraph( axMAP, \"MAP\", lambda_MAP, bar_y_max, col_MAP ) # Draw Bar Graph\n\n # Bayes\n posteriorDirichlet.set_param(alpha_update)\n posteriorDirichlet.calcPosterior(x_cat)\n lambda_Bayes = np.zeros(3)\n for k in range(3):\n lambda_Bayes[k] = posteriorDirichlet.BayesInfer(k)\n\n axBayes.cla()\n drawBarGraph( axBayes, \"Bayes\", lambda_Bayes, bar_y_max, col_Bayes ) # Draw Bar Graph\n\n draw_pdf_contours(axDirichlet, dirichlet, True) # Draw Dirichlet\n\n print('Reset')\n print('lambda_ML =', lambda_ML)\n print('lambda_MAP =', lambda_MAP)\n print('lambda_Bayes=', lambda_Bayes)\n draw_point(axDirichlet, lambda_ML, col_ML)\n draw_point(axDirichlet, lambda_MAP, col_MAP)\n draw_point(axDirichlet, lambda_Bayes, col_Bayes)\n draw_point(axLikelihood, lambda_ML, col_ML)\n draw_point(axPosteriorDirichlet, lambda_MAP, col_MAP)\n draw_point(axPosteriorDirichlet, lambda_Bayes, col_Bayes)\n\n fig.canvas.draw_idle()", "def inverted_hammer(self):\n self.data['inverted_hammer'] = (((self.data['High'] - self.data['Low']) > ((self.data['Open'] - self.data['Close'])*3)) & \\\n ((self.data['High'] - self.data['Close']) / ((.001 + self.data['High'] - self.data['Low']) > 0.6)) & \\\n ((self.data['High'] - self.data['Open']) / ((.001 + self.data['High'] - self.data['Low']) > 0.6)))", "def reset(self):\n self.F = 0\n self.M = 0\n self.w = np.zeros(self.n)\n self.z = np.zeros(self.n)", "def update_H(self):", "def setSurfaceMeshing(state='off',shading=1):\n sdict = {'off':'OFF','on':'ON'}\n val = sdict[state]\n if not shading:\n val = 'ONLY'\n dislin.surmsh(val)", "def driver(datacube,xstart=0,ystart=0,xr=None,yr=None,xsgn=1,ysgn=1,outfile=None,\n plotxr=None,trackplot=False,noplot=True,silent=False,\n noback=False,backret=True,wander=False,startfile=None,\n savestep=10000,clobber=False):\n\n global BTRACK, GSTRUC, NPIX\n\n savetime = time.time()\n endflag = False\n count = 0\n tstart = time.time() \n\n\n # Load the cube\n if type(datacube) is str:\n datacubefile = datacube\n print('Loading '+datacubefile)\n datacube = Cube.read(datacubefile)\n \n # Setting parameters\n if xr is None:\n xr = [0,datacube.nx-1]\n if yr is None:\n yr = [0,datacube.ny-1] \n if xstart is None:\n if xsgn == 1: \n xstart = xr[0] \n else: \n xstart = xr[1]\n if ystart is None:\n if ysgn == 1 : \n ystart = yr[0] \n else: \n ystart = yr[1]\n if wander:\n backret = False\n if noback:\n backret = False\n\n # No mode selected, using default mode (backret) \n if (backret == False) and (noback == False) and (wander == False): \n print('' )\n print('!!! WARNING !!! NO MODE SELECTED -> USING DEFAULT (BACKRET) MODE' )\n print('')\n sleep(3) \n backret = True\n\n\n # Checking the file\n if outfile is None:\n logtime = datetime.now().strftime(\"%Y%m%d%H%M%S\") \n outfile = 'gaussdecomp_'+logtime+'.fits' \n\n # STARTING WITH BTRACK, RESTORING THE LAST STATE\n if startfile is not None:\n print('Starting with last state of input file '+str(startfile))\n with open(startfile,'rb') as f: \n BTRACK = pickle.load(f)\n GSTRUC = pickle.load(f)\n count = BTRACK['count']\n x = BTRACK['x'][count-1]\n y = BTRACK['y'][count-1]\n track = BTRACK['data'][count-1]\n back = track['back']\n redo = track['redo']\n redo_fail = track['redo_fail']\n skip = False\n count += 1\n xstart = x\n ystart = y\n lastx = x \n lasty = y\n # STARTING TRACKING FRESH\n else:\n initialize_tracking(wander,yr)\n redo_fail = False \n redo = False\n back = False\n lastx = None\n lasty = None\n \n # Printing out the inputs\n if silent==False:\n print(' RUNNING GAUSSIAN ANALYSIS WITH THE FOLLOWING PARAMETERS')\n print('-----------------------------------------------------------')\n print(' STARTING POSITION = (%d,%d)' % (xstart,ystart))\n print(' X RANGE = [%d,%d]' % (xr[0],xr[1]))\n print(' Y RANGE = [%d,%d]' % (yr[0],yr[1]))\n print(' X DIRECTION = '+str(xsgn))\n print(' Y DIRECTION = '+str(ysgn))\n print(' OUTFILE = '+outfile)\n print('-----------------------------------------------------------')\n if (backret == 1) : \n print(' USING (BACKRET) MODE')\n if (noback == 1) : \n print(' USING (NOBACK) MODE')\n if (wander == 1) : \n print(' USING (WANDER) MODE')\n print('-----------------------------------------------------------')\n print('')\n \n # Initializing some parameters \n p0 = False\n p1 = False\n p2 = False\n p3 = False\n p4 = False \n # Where are we starting \n x = xstart \n y = ystart \n\n track_dict = {'count':None,'x':None,'y':None,'rms':None,'noise':None,'par':None,\n 'guesspar':None,'guessx':None,'guessy':None,'back':None,'redo':None,\n 'redo_fail':None,'skip':None,'lastx':None,'lasty':None,'npix':None}\n gstruc_dict = {'x':None,'y':None,'rms':None,'noise':None,'par':None,\n 'sigpar':None,'lon':None,'lat':None,'npix':None}\n\n \n \n # STARTING THE LARGE LOOP \n while (endflag == False): \n t00 = time.time() \n skip,guessx,guessy,guesspar = False,None,None,None \n tstr = {'par':None}\n tstr1 = {'par':None}\n tstr2 = {'par':None} \n\n # FIGURE OUT THE NEXT MOVE \n #------------------------- \n if (count > 0):\n lastx,lasty = x,y\n out = nextmove(x,y,xr,yr,count,xsgn,ysgn,backret=backret,noback=noback,\n wander=wander,redo=redo,back=back,redo_fail=redo_fail,silent=silent)\n x,y,guessx,guessy,guesspar,back,redo,skip,endflag = out\n \n # The end\n if endflag:\n break\n \n # Starting the tracking structure, bad until proven good\n track = track_dict.copy()\n track['count'] = count \n track['x'] = x \n track['y'] = y \n track['lastx'] = lastx \n track['lasty'] = lasty \n track['guesspar'] = guesspar \n track['guessx'] = guessx \n track['guessy'] = guessy \n track['back'] = back \n track['redo'] = redo \n track['skip'] = skip \n\n # Minimal structure, in case we skip\n tstr = {'x':x,'y':y,'rms':np.inf,'noise':None,'par':None,\n 'sigpar':None,'lon':None,'lat':None}\n \n # Some bug checking \n if x is None: \n import pdb; pdb.set_trace() \n if (x == lastx) and (y == lasty): \n import pdb; pdb.set_trace() \n \n if skip:\n if silent==False:\n print('SKIP')\n \n # FITTING THE SPECTRUM, UNLESS WE'RE SKIPPING IT \n #------------------------------------------------ \n if skip == False: \n t0 = time.time() \n \n # Initial Printing\n if silent==False:\n print('Fitting Gaussians to the HI spectrum at (%d,%d)' % (x,y))\n strout = ''\n if redo:\n strout = strout+'REDO '\n if back:\n strout = strout+'BACK'\n if back is False:\n strout = strout+'FORWARD' \n print(strout) \n \n # Getting the HI spectrum\n spec = datacube(x,y) # Get the new spectrum\n # No good spectrum \n if spec is None or np.sum(spec.flux)==0:\n if silent==False:\n print('No spectrum to fit')\n skip = True\n count += 1\n btrack_add(track)\n continue\n\n lon,lat = datacube.coords(x,y)\n noise = spec.noise\n npts = spec.n\n sigma = np.ones(npts,float)*noise\n NPIX = npts\n \n # Zero-velocity region INCLUDED\n #==================================== \n if np.min(spec.vel) < 0:\n # GETTIING THE VELOCITY RANGE around the zero-velocity MW peak\n if silent==False:\n print('Zero-velocity region INCLUDED. Fitting it separately')\n smspec = dln.savgol(spec.flux,21,2) \n dum,vindcen = dln.closest(spec.vel,0)\n \n # Finding the vel. low point \n lflag = 0 \n i = vindcen\n lo = 0\n while (lflag == 0): \n if smspec[i] <= noise: \n lo = i \n if smspec[i] <= noise: \n lflag = 1\n i -= 1 \n if i < 0: \n lflag = 1 \n lo = np.maximum(0,(lo-20))\n \n # Finding the vel. high point \n hflag = 0 \n i = vindcen\n hi = npts-1\n while (hflag == 0): \n if smspec[i] <= noise : \n hi = i \n if smspec[i] <= noise : \n hflag = 1 \n i += 1 \n if i > npts-1: \n hflag = 1 \n hi = np.minimum((npts-1),(hi+20))\n \n vmin = spec.vel[lo] \n vmax = spec.vel[hi] \n \n # RUNNING GAUSSFITTER ON ZERO VELOCITY REGION, WITH GUESS \n v0results = fitter.gaussfitter(spec,vmin=vmin,vmax=vmax,initpar=guesspar,silent=True,noplot=True) \n \n # FIT WITH NO GUESS (if first time and previous fit above with guess) \n tp0,tres0 = gfind(x,y,xr=xr,yr=yr) \n if (tp0 == 0) and (guesspar is not None):\n v0results_noguess = fitter.gaussfitter(spec,vmin=vmin,vmax=vmax,silent=True,noplot=True)\n b,dbic = gbetter(v0results,v0results_noguess)\n # The fit without the guess is better \n if (dbic>0):\n v0results = v0results_noguess.copy()\n \n # ADDING THE BEST RESULTS TO THE STRUCTURE, TSTR1\n if v0results['par'] is not None:\n ngauss = len(v0results['par'])//3\n tstr1 = gstruc_dict.copy()\n for n in ['par','sigpar','rms','npix']:\n tstr1[n] = v0results[n] \n tstr1['x'] = x \n tstr1['y'] = y\n tstr1['noise'] = spec.noise \n tstr1['lon'] = lon\n tstr1['lat'] = lat\n else:\n tstr1 = {'par':None}\n \n # REMOVING ZERO-VELOCITY parameters and spectrum\n guesspar2 = None\n inspec = spec.copy()\n if v0results['par'] is not None:\n th = utils.gfunc(spec.vel,*v0results['par'])\n inspec = spec.copy()\n inspec.flux -= th\n npts = spec.n\n if guesspar is not None:\n guesspar2 = np.array([],float)\n inpar1 = np.copy(guesspar)\n inpar2 = np.copy(guesspar)\n inpar1 = utils.gremove(inpar1,spec.vel[0:lo],spec.flux[0:lo],noise)\n if inpar1 is not None:\n guesspar2 = np.hstack((guesspar2,inpar1))\n inpar2 = utils.gremove(inpar2,spec.vel[hi:npts],spec.flux[hi:npts],noise)\n if inpar2 is not None:\n guesspar2 = np.hstack((guesspar2,inpar2))\n if len(guesspar2)==0:\n guesspar2 = None\n \n \n # RUNNING GAUSSFITTER ON EVERYTHING WITHOUT THE ZERO-VELOCITY REGION, WITH GUESS \n results = fitter.gaussfitter(inspec,initpar=guesspar2,noplot=True,silent=True)\n \n \n # FIT WITH NO GUESS (if first time and previous fit above with guess) \n if (tp0 == 0) and (guesspar is not None):\n results_noguess = fitter.gaussfitter(inspec,silent=True,noplot=True) \n b,dbic34 = gbetter(results,results_noguess)\n # The fit without the guess is better \n if (b == 1):\n results = results_noguess.copy()\n \n # ADDING THE RESULTS TO THE STRUCTURE, TSTR2\n if results['par'] is not None:\n ngauss = len(results['par'])//3 \n tstr2 = gstruc_dict.copy()\n for n in ['par','sigpar','rms','npix']:\n tstr2[n] = results[n] \n tstr2['x'] = x \n tstr2['y'] = y\n tstr2['noise'] = spec.noise\n tstr2['lon'] = lon \n tstr2['lat'] = lat\n else:\n tstr2 = {'par':None}\n \n # ADDING THE STRUCTURES TOGETHER, TSTR = [TSTR1,TSTR2]\n if tstr1['par'] is not None and tstr2['par'] is not None:\n tstr = tstr1.copy()\n tstr['par'] = np.hstack((tstr1['par'],tstr2['par']))\n tstr['sigpar'] = np.hstack((tstr1['sigpar'],tstr2['sigpar']))\n tstr['rms'] = utils.computerms(spec.vel,spec.flux,tstr['par'])\n if tstr1['par'] is not None and tstr2 is None:\n tstr = tstr1.copy()\n if tstr1['par'] is None and tstr2['par'] is not None:\n tstr = tstr2.copy()\n if tstr1['par'] is None and tstr2['par'] is None: # no gaussians\n tstr = gstruc_dict.copy()\n tstr['x'] = x \n tstr['y'] = y \n tstr['lon'] = lon \n tstr['lat'] = lat \n tstr['rms'] = np.inf\n tstr['noise'] = spec.noise \n tstr['npix'] = len(spec.flux)\n \n \n # Does NOT cover zero-velocity region\n #====================================\n else:\n if silent==False:\n print('Zero-velocity NOT covered')\n # RUNNING GAUSSFITTER ON EVERYTHING WITH GUESS \n results = fitter.gaussfitter(spec,initpar=guesspar,noplot=True,silent=True) \n \n # FIT WITH NO GUESS (if first time and previous fit above with guess)\n tp0,res0 = gfind(x,y,xr=xr,yr=yr)\n results2 = None\n if (tp0 == 0) and (guesspar is not None):\n results2 = fitter.gaussfitter(spec,silent=True,noplot=True) \n b,dbic = gbetter(results,results2)\n # The fit without the guess is better \n if (dbic>0): \n results = results2.copy() \n \n # Creating the structure with the results\n if results['par'] is not None:\n ngauss = len(results['par'])//3\n tstr = gstruc_dict.copy()\n for n in ['par','sigpar','rms','npix']:\n tstr[n] = results[n] \n tstr['x'] = x\n tstr['y'] = y\n tstr['noise'] = spec.noise\n tstr['lon'] = lon \n tstr['lat'] = lat\n else:\n tstr = {'par':None}\n \n \n # PLOTTING/PRINTING, IF THERE WAS A FIT \n if tstr['par'] is not None:\n # Getting the rms of all the components of the whole spectrum\n tstr['rms'] = utils.computerms(spec.vel,spec.flux,tstr['par'])\n \n # Printing and plotting\n if noplot==False:\n utils.gplot(spec.vel,spec.flux,tstr['par'],xlim=plotxr)\n if silent==False:\n utils.printgpar(tstr['par'],sigpar=tstr['sigpar'],rms=tstr['rms'],noise=tstr['noise'])\n if trackplot:\n utils.gtrackplot(x,y,lastx,lasty,redo, count,xr=xr,yr=yr,pstr=pstr,xstr=xstr,ystr=ystr)\n else:\n if silent==False:\n print('No gaussians found at this position!')\n\n # ADDING SOLUTION TO GSTRUC\n if tstr['par'] is not None:\n if count == 0: \n gstruc_add(tstr)\n if count > 0: \n old,res1 = gfind(x,y,xr=xr,yr=yr)\n \n # This is a re-decomposition \n if (old==1) and redo: \n # Checking the two decompositions \n b,dbic = gbetter(tstr,res1)\n # New one is better \n if (b == False): \n gstruc_replace(tstr) # replacing the solution\n redo_fail = False\n else: # re-decomposition failed \n redo_fail = True\n if silent==False:\n print('REDO FAILED!')\n \n # This is NOT a re-decomposition, add it \n if (old==0) or (redo == False): \n t1 = time.time()\n gstruc_add(tstr)\n redo_fail = False\n\n\n # SKIP FITTING PART\n else: \n # Creating a dummy structure \n tstr = {'par':None}\n redo_fail = False\n redo = False\n back = False\n \n if trackplot:\n utils.gtrackplot(x,y,lastx,lasty,redo,count,xr=xr,yr=yr,pstr=pstr,xstr=xstr,ystr=ystr)\n\n \n # FINISHING UP THE TRACKING STRUCTURE\n if tstr['par'] is not None:\n npar = len(tstr['par'])\n track['par'] = tstr['par']\n track['rms'] = tstr['rms']\n track['noise'] = tstr['noise']\n track['npix'] = tstr['npix']\n else:\n npar = 0\n track['redo_fail'] = redo_fail \n \n # UPDATING THE TRACKING STRUCTURE\n btrack_add(track)\n\n # The end\n if ((x>=xr[1]) and (y>=yr[1])):\n break\n \n count += 1 \n \n # Saving the last position \n lastx = x \n lasty = y \n \n # SAVING THE STRUCTURES, periodically\n if (count % savestep == 0) and (time.time()-savetime) > 1800:\n gstruc = savedata(outfile)\n savetime = time.time()\n \n # FINAL SAVE\n ngauss = GSTRUC['ngauss']\n print(str(ngauss)+' final Gaussians')\n gstruc = savedata(outfile)\n \n # Clean up the tracking structures\n del BTRACK\n del GSTRUC\n\n print('Total time = %.2f sec.' % (time.time()-tstart))\n \n return gstruc", "def wave_clear():\n return _u2i(_pigpio_command(_control, _PI_CMD_WVCLR, 0, 0))", "def __reset_crosshair(self):\n self.lhor.set_ydata(self.y_coord)\n self.lver.set_xdata(self.x_coord)", "def clear(self):\r\n\t\tself.grid.fill(False)", "def plot_clear():\n plt.cla()", "def clear(self):\r\n self.SetPoint = 0.0\r\n\r\n self.PTerm = 0.0\r\n self.ITerm = 0.0\r\n self.DTerm = 0.0\r\n self.last_error = 0.0\r\n\r\n # Windup Guard\r\n self.int_error = 0.0\r\n self.windup_guard = 20.0\r\n\r\n self.output = 0.0", "def __qft_input_state(self, size) -> None:\n\n for i in range(size):\n self.h(i)\n self.u1(-math.pi / float(2 ** i), i)", "def disable_cl2(self):\n self.write_versa5(0x31,0x80) ## Disable divider output for clock2\n self.write_versa5(0x63,0x00) ## Disable clock2 output", "def erase(self):\n\tself.state={}\n\tself.display(update_board=0)", "def clear(self):\n if self.flag == 0:\n for coord in INDICES:\n self.kill(coord)\n self.chart[coord] = DEAD", "def clear(self):\n self._delayvalue = _CFG[\"delay\"]\n self._colormode = _CFG[\"colormode\"]\n self._delete(\"all\")\n self._bgpic = self._createimage(\"\")\n self._bgpicname = \"nopic\"\n self._tracing = 1\n self._updatecounter = 0\n self._turtles = []\n self.bgcolor(\"white\")\n for btn in 1, 2, 3:\n self.onclick(None, btn)\n self.onkeypress(None)\n for key in self._keys[:]:\n self.onkey(None, key)\n self.onkeypress(None, key)\n Myturtle._pen = None", "def turn_aux_heat_on(self):\n self._device.set_mode(self._device.MODE_HEAT_EMERGENCY)\n self._device.set_fan(self._device.FAN_AUTO)", "def reset(self):\n self.vrp = np.matrix([0.5, 0.5, 1])\n self.vpn = np.matrix([0, 0, -1])\n self.vup = np.matrix([0, 1, 0])\n self.u = np.matrix([-1, 0, 0])\n self.extent = [1., 1., 1.]\n self.screen = [400., 400.]\n self.offset = [20., 20.]", "def clear(self):\n\t\tself.PTerm = 0.0\n\t\tself.ITerm = 0.0\n\t\tself.DTerm = 0.0\n\t\tself.clearing = True\n\n\t\tself.output = 0.0", "def toggle_fccm(self, i: int = -1, verbose=True):\n written = self.set_bit(\"d2\", 0, i)\n if verbose:\n print(\"FCCM: \" + written)", "def ToggleOff(self):\n for s in self.sensors:\n self.gSetpt[s.GetID()].Disable()\n\n self.top_sizer.Layout()\n print(\"Toggle off\")", "def pswitchoff(chan) :\n s.phaseSwitching(False, chan)", "def update(self):\n self.active = False\n self.top.update(self.rgb,self.cmyk,self.hsv)\n self.bot.update(self.rgb,self.cmyk,self.hsv)\n self.active = True", "def init_game_setting(self):\n ##################\n # YOUR CODE HERE #\n ##################\n self.state = np.zeros((1, 80, 80))\n self.clear_action()", "def hold(self):\n self.dev.write(1, 'H')", "def fluid_reynolds(uu, param, grid, lnrho=list(), shock=list(), nghost=3,\n lmix=True):\n #viscous forces\n th2 = 2./3\n th1 = 1./3\n fvisc = np.zeros_like(uu)\n #molecular viscosity contribution\n ldel2, lshock, lhyper3 = False, False, False\n for ivisc in param.ivisc:\n if not 'shock' in ivisc and not 'hyper' in ivisc\\\n and not '\\n' in ivisc:\n ldel2 = True\n if 'shock' in ivisc:\n lshock = True\n if 'hyper3' in ivisc:\n lhyper3 = True\n \n if ldel2:\n if lhyper3:\n lhyper3 = lhyper3==lmix\n del2u = np.zeros_like(uu)\n for j in range(0,3):\n del2u[j] = del2(uu[j],grid.dx,grid.dy,grid.dz,x=grid.x,y=grid.y,\n coordinate_system=param.coord_system)\n del2u[j, : nghost, nghost:-nghost, nghost:-nghost] = del2u[j,-2*nghost: -nghost, nghost: -nghost, nghost: -nghost]\n del2u[j,-nghost: , nghost:-nghost, nghost:-nghost] = del2u[j, nghost:2*nghost, nghost: -nghost, nghost: -nghost]\n del2u[j, nghost:-nghost, : nghost, nghost:-nghost] = del2u[j, nghost: -nghost,-2*nghost: -nghost, nghost: -nghost]\n del2u[j, nghost:-nghost,-nghost: , nghost:-nghost] = del2u[j, nghost: -nghost, nghost:2*nghost, nghost: -nghost]\n del2u[j, nghost:-nghost, nghost:-nghost, : nghost] = del2u[j, nghost: -nghost, nghost: -nghost,-2*nghost: -nghost]\n del2u[j, nghost:-nghost, nghost:-nghost,-nghost: ] = del2u[j, nghost: -nghost, nghost: -nghost, nghost:2*nghost]\n for ivisc in param.ivisc:\n ivisc = str.strip(ivisc,'\\n')\n if 'nu-const' not in ivisc and 'shock' not in ivisc\\\n and 'hyper' not in ivisc and len(ivisc) > 0:\n print('fluid_reynolds WARNING: '+ivisc+' not implemented\\n'+\n 'terms may be missing from the standard rate of strain tensor')\n fvisc = fvisc + param.nu*del2u\n del(del2u)\n tmp0 = grad(uu[0],grid.dx,grid.dy,grid.dz,x=grid.x,y=grid.y,\n coordinate_system=param.coord_system)\n for j in range(0,3):\n tmp0[j, :nghost,nghost:-nghost,nghost:-nghost] = tmp0[j,-2*nghost:-nghost,nghost:-nghost,nghost:-nghost]\n tmp0[j,-nghost:,nghost:-nghost,nghost:-nghost] = tmp0[j, nghost: 2*nghost,nghost:-nghost,nghost:-nghost]\n tmp0[j,nghost:-nghost, :nghost,nghost:-nghost] = tmp0[j,nghost:-nghost,-2*nghost:-nghost,nghost:-nghost]\n tmp0[j,nghost:-nghost,-nghost:,nghost:-nghost] = tmp0[j,nghost:-nghost, nghost: 2*nghost,nghost:-nghost]\n tmp0[j,nghost:-nghost,nghost:-nghost, :nghost] = tmp0[j,nghost:-nghost,nghost:-nghost,-2*nghost:-nghost]\n tmp0[j,nghost:-nghost,nghost:-nghost,-nghost:] = tmp0[j,nghost:-nghost,nghost:-nghost, nghost: 2*nghost]\n tmp1 = grad(uu[1],grid.dx,grid.dy,grid.dz,x=grid.x,y=grid.y,\n coordinate_system=param.coord_system)\n for j in range(0,3):\n tmp1[j, :nghost,nghost:-nghost,nghost:-nghost] = tmp1[j,-2*nghost:-nghost,nghost:-nghost,nghost:-nghost]\n tmp1[j,-nghost:,nghost:-nghost,nghost:-nghost] = tmp1[j, nghost: 2*nghost,nghost:-nghost,nghost:-nghost]\n tmp1[j,nghost:-nghost, :nghost,nghost:-nghost] = tmp1[j,nghost:-nghost,-2*nghost:-nghost,nghost:-nghost]\n tmp1[j,nghost:-nghost,-nghost:,nghost:-nghost] = tmp1[j,nghost:-nghost, nghost: 2*nghost,nghost:-nghost]\n tmp1[j,nghost:-nghost,nghost:-nghost, :nghost] = tmp1[j,nghost:-nghost,nghost:-nghost,-2*nghost:-nghost]\n tmp1[j,nghost:-nghost,nghost:-nghost,-nghost:] = tmp1[j,nghost:-nghost,nghost:-nghost, nghost: 2*nghost]\n tmp2 = grad(uu[2],grid.dx,grid.dy,grid.dz,x=grid.x,y=grid.y,\n coordinate_system=param.coord_system)\n for j in range(0,3):\n tmp2[j, :nghost,nghost:-nghost,nghost:-nghost] = tmp2[j,-2*nghost:-nghost,nghost:-nghost,nghost:-nghost]\n tmp2[j,-nghost:,nghost:-nghost,nghost:-nghost] = tmp2[j, nghost: 2*nghost,nghost:-nghost,nghost:-nghost]\n tmp2[j,nghost:-nghost, :nghost,nghost:-nghost] = tmp2[j,nghost:-nghost,-2*nghost:-nghost,nghost:-nghost]\n tmp2[j,nghost:-nghost,-nghost:,nghost:-nghost] = tmp2[j,nghost:-nghost, nghost: 2*nghost,nghost:-nghost]\n tmp2[j,nghost:-nghost,nghost:-nghost, :nghost] = tmp2[j,nghost:-nghost,nghost:-nghost,-2*nghost:-nghost]\n tmp2[j,nghost:-nghost,nghost:-nghost,-nghost:] = tmp2[j,nghost:-nghost,nghost:-nghost, nghost: 2*nghost]\n #effect of compressibility \n if len(lnrho) > 0:\n divu = div(uu,grid.dx,grid.dy,grid.dz,x=grid.x,y=grid.y,\n coordinate_system=param.coord_system)\n divu[ :nghost,nghost:-nghost,nghost:-nghost] = divu[-2*nghost:-nghost,nghost:-nghost,nghost:-nghost]\n divu[-nghost:,nghost:-nghost,nghost:-nghost] = divu[ nghost: 2*nghost,nghost:-nghost,nghost:-nghost]\n divu[nghost:-nghost, :nghost,nghost:-nghost] = divu[nghost:-nghost,-2*nghost:-nghost,nghost:-nghost]\n divu[nghost:-nghost,-nghost:,nghost:-nghost] = divu[nghost:-nghost, nghost: 2*nghost,nghost:-nghost]\n divu[nghost:-nghost,nghost:-nghost, :nghost] = divu[nghost:-nghost,nghost:-nghost,-2*nghost:-nghost]\n divu[nghost:-nghost,nghost:-nghost,-nghost:] = divu[nghost:-nghost,nghost:-nghost, nghost: 2*nghost]\n gradlnrho = grad(lnrho,grid.dx,grid.dy,grid.dz,x=grid.x,y=grid.y,\n coordinate_system=param.coord_system)\n for j in range(0,3):\n gradlnrho[j, :nghost,nghost:-nghost,nghost:-nghost] = gradlnrho[j,-2*nghost:-nghost,nghost:-nghost,nghost:-nghost]\n gradlnrho[j,-nghost:,nghost:-nghost,nghost:-nghost] = gradlnrho[j, nghost: 2*nghost,nghost:-nghost,nghost:-nghost]\n gradlnrho[j,nghost:-nghost, :nghost,nghost:-nghost] = gradlnrho[j,nghost:-nghost,-2*nghost:-nghost,nghost:-nghost]\n gradlnrho[j,nghost:-nghost,-nghost:,nghost:-nghost] = gradlnrho[j,nghost:-nghost, nghost: 2*nghost,nghost:-nghost]\n gradlnrho[j,nghost:-nghost,nghost:-nghost, :nghost] = gradlnrho[j,nghost:-nghost,nghost:-nghost,-2*nghost:-nghost]\n gradlnrho[j,nghost:-nghost,nghost:-nghost,-nghost:] = gradlnrho[j,nghost:-nghost,nghost:-nghost, nghost: 2*nghost]\n Sglnrho = np.zeros_like(uu)\n Sglnrho[0] = dot(tmp0,gradlnrho) +\\\n (tmp0[0]+tmp1[0]+tmp2[0]-th2*divu)*gradlnrho[0] \n Sglnrho[1] = dot(tmp1,gradlnrho) +\\\n (tmp0[1]+tmp1[1]+tmp2[1]-th2*divu)*gradlnrho[1]\n Sglnrho[2] = dot(tmp2,gradlnrho) +\\\n (tmp0[2]+tmp1[2]+tmp2[2]-th2*divu)*gradlnrho[2]\n graddivu = grad(divu,grid.dx,grid.dy,grid.dz,x=grid.x,y=grid.y,\n coordinate_system=param.coord_system)\n for j in range(0,3):\n graddivu[j, :nghost,nghost:-nghost,nghost:-nghost] = graddivu[j,-2*nghost:-nghost,nghost:-nghost,nghost:-nghost]\n graddivu[j,-nghost:,nghost:-nghost,nghost:-nghost] = graddivu[j, nghost: 2*nghost,nghost:-nghost,nghost:-nghost]\n graddivu[j,nghost:-nghost, :nghost,nghost:-nghost] = graddivu[j,nghost:-nghost,-2*nghost:-nghost,nghost:-nghost]\n graddivu[j,nghost:-nghost,-nghost:,nghost:-nghost] = graddivu[j,nghost:-nghost, nghost: 2*nghost,nghost:-nghost]\n graddivu[j,nghost:-nghost,nghost:-nghost, :nghost] = graddivu[j,nghost:-nghost,nghost:-nghost,-2*nghost:-nghost]\n graddivu[j,nghost:-nghost,nghost:-nghost,-nghost:] = graddivu[j,nghost:-nghost,nghost:-nghost, nghost: 2*nghost]\n fvisc = fvisc + param.nu*(th1*graddivu+Sglnrho)\n del(Sglnrho)\n elif param.ldensity:\n print('fluid_reynolds WARNING: no lnrho provided\\n'+\n 'rate of strain tensor likely incomplete')\n #shock contribution\n if lshock:\n if len(shock) == 0:\n print('fluid_reynolds WARNING: no shock provided\\n'+\n 'rate of strain tensor likely incomplete')\n else:\n shock[ :nghost,nghost:-nghost,nghost:-nghost] = shock[-2*nghost:-nghost,nghost:-nghost,nghost:-nghost]\n shock[-nghost:,nghost:-nghost,nghost:-nghost] = shock[ nghost: 2*nghost,nghost:-nghost,nghost:-nghost]\n shock[nghost:-nghost, :nghost,nghost:-nghost] = shock[nghost:-nghost,-2*nghost:-nghost,nghost:-nghost]\n shock[nghost:-nghost,-nghost:,nghost:-nghost] = shock[nghost:-nghost, nghost: 2*nghost,nghost:-nghost]\n shock[nghost:-nghost,nghost:-nghost, :nghost] = shock[nghost:-nghost,nghost:-nghost,-2*nghost:-nghost]\n shock[nghost:-nghost,nghost:-nghost,-nghost:] = shock[nghost:-nghost,nghost:-nghost, nghost: 2*nghost]\n divugradlnrho = np.zeros_like(uu)\n gradshock = grad(shock,grid.dx,grid.dy,grid.dz,x=grid.x,y=grid.y,\n coordinate_system=param.coord_system)\n for j in range(0,3):\n gradshock[j, :nghost,nghost:-nghost,nghost:-nghost] = gradshock[j,-2*nghost:-nghost,nghost:-nghost,nghost:-nghost]\n gradshock[j,-nghost:,nghost:-nghost,nghost:-nghost] = gradshock[j, nghost: 2*nghost,nghost:-nghost,nghost:-nghost]\n gradshock[j,nghost:-nghost, :nghost,nghost:-nghost] = gradshock[j,nghost:-nghost,-2*nghost:-nghost,nghost:-nghost]\n gradshock[j,nghost:-nghost,-nghost:,nghost:-nghost] = gradshock[j,nghost:-nghost, nghost: 2*nghost,nghost:-nghost]\n gradshock[j,nghost:-nghost,nghost:-nghost, :nghost] = gradshock[j,nghost:-nghost,nghost:-nghost,-2*nghost:-nghost]\n gradshock[j,nghost:-nghost,nghost:-nghost,-nghost:] = gradshock[j,nghost:-nghost,nghost:-nghost, nghost: 2*nghost]\n for j in range(0,3):\n divugradlnrho[j] = param.nu_shock*divu*gradshock[j] +\\\n param.nu_shock*shock*(divu*gradlnrho[j] + graddivu[j])\n del(divu,gradshock,gradlnrho,graddivu)\n fvisc = fvisc + divugradlnrho\n del(divugradlnrho)\n if lhyper3:\n #deluij5 = np.zeros_like([uu,uu,uu])\n #uij5glnrho to be included\n del6u = np.zeros_like(uu)\n for j in range(0,3):\n del6u[j] = del6(uu[j],grid.dx,grid.dy,grid.dz)\n del6u[j, :nghost,nghost:-nghost,nghost:-nghost] = del6u[j,-2*nghost:-nghost,nghost:-nghost,nghost:-nghost]\n del6u[j,-nghost:,nghost:-nghost,nghost:-nghost] = del6u[j, nghost: 2*nghost,nghost:-nghost,nghost:-nghost]\n del6u[j,nghost:-nghost, :nghost,nghost:-nghost] = del6u[j,nghost:-nghost,-2*nghost:-nghost,nghost:-nghost]\n del6u[j,nghost:-nghost,-nghost:,nghost:-nghost] = del6u[j,nghost:-nghost, nghost: 2*nghost,nghost:-nghost]\n del6u[j,nghost:-nghost,nghost:-nghost, :nghost] = del6u[j,nghost:-nghost,nghost:-nghost,-2*nghost:-nghost]\n del6u[j,nghost:-nghost,nghost:-nghost,-nghost:] = del6u[j,nghost:-nghost,nghost:-nghost, nghost: 2*nghost]\n #del6 for non-cartesian tba\n #del6u[j] = del6(uu[j],grid.dx,grid.dy,grid.dz,x=grid.x,y=grid.y,\n # coordinate_system=param.coord_system)\n fvisc = fvisc + param.nu_hyper3*del6u\n del(del6u)\n fvisc2 = np.sqrt(dot2(fvisc))\n #advective forces\n advec = np.zeros_like(uu)\n advec[0] = dot(uu,tmp0)\n advec[1] = dot(uu,tmp1)\n advec[0] = dot(uu,tmp2)\n del(tmp0,tmp1,tmp2)\n advec2 = np.sqrt(dot2(advec))\n del(advec)\n #avoid division by zero\n if fvisc2.max() > 0:\n fvisc2[np.where(fvisc2==0)] = fvisc2[np.where(fvisc2>0)].min()\n Re = advec2/fvisc2\n #set minimum floor to exclude zero-valued Re \n Re[np.where(Re==0)] = Re[np.where(Re>0)].min()\n else:\n Re = advec2\n print('Re undefined')\n return Re", "def set_bc(self, problem):\n bcs = problem.bcs\n n_bound = cfg.const['N_GHOST_CELLS']\n # Left X-b.c.\n for i in range(0, self.i_min):\n for j in range(self.j_min, self.j_max):\n for k in range(self.k_min, self.k_max): \n if bcs[0] == 't': \n self.U[i][j][k] = self.U[self.i_min][j][k]\n elif bcs[0] == 'w':\n for num in [0, 2, 3, 4]: # 0 -> 3, 1 -> 2, i_min-1 -> i_min, i_min-2 -> i_min+1\n self.U[i][j][k][num] = self.U[self.i_min + (self.i_min - i - 1)][j][k][num]\n for num in [1]:\n self.U[i][j][k][num] = - self.U[self.i_min + (self.i_min - i - 1)][j][k][num]\n else:\n print(\"Errof field.set_ics(): only wall-type and transmissive boundaries supported! Bye!\")\n # Right X-b.c.\n for i in range(self.i_max, self.i_max+n_bound):\n for j in range(self.j_min, self.j_max):\n for k in range(self.k_min, self.k_max): \n if bcs[1] == 't':\n self.U[i][j][k] = self.U[self.i_max-1][j][k]\n elif bcs[1] == 'w':\n for num in [0, 2, 3, 4]: # i_max -> i_max-1 , i_max+1-> i_max-2\n self.U[i][j][k][num] = self.U[self.i_max - (i - self.i_max + 1)][j][k][num]\n for num in [1]:\n self.U[i][j][k][num] = - self.U[self.i_max - (i - self.i_max + 1)][j][k][num]\n else:\n print(\"Error field.set_ics(): only wall-type and transmissive boundaries supported! Bye!\")\n # Left Y-b.c.\n for i in range(0, self.i_max+n_bound):\n for j in range(0, self.j_min):\n for k in range(self.k_min, self.k_max): \n if bcs[2] == 't':\n self.U[i][j][k] = self.U[i][self.j_min][k]\n elif bcs[2] == 'w':\n for num in [0, 1, 3, 4]:\n self.U[i][j][k][num] = self.U[i][self.j_min + (self.j_min - j - 1)][k][num]\n for num in [2]:\n self.U[i][j][k][num] = - self.U[i][self.j_min + (self.j_min - j - 1)][k][num]\n else:\n print(\"Error field.set_ics(): only wall-type and transmissive boundaries supported! Bye!\")\n # Right Y-b.c.\n for i in range(0, self.i_max+n_bound):\n for j in range(self.j_max, self.j_max+n_bound):\n for k in range(self.k_min, self.k_max): \n if bcs[3] == 't':\n self.U[i][j][k] = self.U[i][self.j_max-1][k]\n elif bcs[3] == 'w':\n for num in [0, 1, 3, 4]:\n self.U[i][j][k][num] = self.U[i][self.j_max - (j - self.j_max + 1)][k][num]\n for num in [2]:\n self.U[i][j][k][num] = -self.U[i][self.j_max - (j - self.j_max + 1)][k][num]\n else:\n print(\"Error field.set_ics(): only wall-type and transmissive boundaries supported! Bye!\")\n # Left Z-b.c.\n for i in range(0, self.i_max+n_bound):\n for j in range(0, self.j_max+n_bound):\n for k in range(0, self.k_min): \n if bcs[4] == 't':\n self.U[i][j][k] = self.U[i][j][self.k_min]\n elif bcs[4] == 'w':\n for num in [0, 1, 2, 4]:\n self.U[i][j][k][num] = self.U[i][j][self.k_min + (self.k_min - k - 1)][num]\n for num in [3]:\n self.U[i][j][k][num] = - self.U[i][j][self.k_min + (self.k_min - k - 1)][num]\n else:\n print(\"Error field.set_ics(): only wall-type and transmissive boundaries supported! Bye!\")\n # Right Z-b.c.\n for i in range(0, self.i_max+n_bound):\n for j in range(0, self.j_max+n_bound):\n for k in range(self.k_max, self.k_max+n_bound):\n if bcs[5] == 't':\n self.U[i][j][k] = self.U[i][j][self.k_max-1]\n elif bcs[5] == 'w':\n for num in [0, 1, 2, 4]:\n self.U[i][j][k][num] = self.U[i][j][self.k_max - (k - self.k_max + 1)][num]\n for num in [3]:\n self.U[i][j][k][num] = - self.U[i][j][self.k_max - (k - self.k_max + 1)][num]\n else:\n print(\"Error field.set_ics(): only wall-type and transmissive boundaries supported! Bye!\")", "def initialize_system(self):\n self.mfd.set_mesh(self.mesh)\n [[div_data, div_row, div_col], \n [div_t_data, div_t_row, div_t_col]] = self.mfd.build_div()\n [self.m_x_coo_data, \n m_x_coo_row, \n m_x_coo_col] = self.mfd.build_m(save_update_info=True)\n\n self.m_x_coo_length = len(self.m_x_coo_data)\n \n # The data for the bottom right should be zeros. \n [c_data, c_row, c_col] = self.mfd.build_bottom_right()\n \n [coupling_data, coupling_row, coupling_col] = self.mfd.build_coupling_terms()\n\n self.div = sparse.coo_matrix((np.array(div_data), \n (np.add(np.array(div_row), \n -self.mesh.get_number_of_faces()), \n np.array(div_col))))\n self.div = self.div.tocsr()\n\n lhs_data = self.m_x_coo_data\n lhs_row = m_x_coo_row\n lhs_col = m_x_coo_col\n \n lhs_data += div_data\n lhs_row += div_row\n lhs_col += div_col\n\n lhs_data += div_t_data\n lhs_row += div_t_row\n lhs_col += div_t_col \n \n self.c_start = len(lhs_data)\n \n lhs_data += c_data\n lhs_row += c_row\n lhs_col += c_col \n\n self.c_end = len(c_data)\n\n lhs_data += coupling_data\n lhs_row += coupling_row\n lhs_col += coupling_col\n\n # Convert m_x_coo_data to numpy array. \n self.m_x_coo_data = np.array(self.m_x_coo_data)\n\n self.lhs_coo = sparse.coo_matrix((np.array(lhs_data), \n (np.array(lhs_row), \n np.array(lhs_col))))\n\n # RHS construction is for Neumann and Dirichlet \n # boundaries specified by the mesh. \n self.rhs_mfd = self.mfd.build_rhs()", "def off(self,ax):\n # remove cell lines if thery are on the plot\n # (if new axes are created the cell lines will be not there)\n for line in self.cell_lines:\n try:\n ax.lines.remove(line)\n except ValueError:\n pass\n # set lines and coordinates to empty lists \n self.cell_lines = []\n self.xx_cells = []", "def front_is_clear(): #py:front_is_clear\n return RUR._front_is_clear_()", "def switch_off():\n app.redis.flushall()\n colour = [0, 0, 0]\n app.data = {\"colour\": colour, \"mode\": \"sector-diverge\"}\n return enqueue_and_return(app.data)", "def reset(self):\n self.mol.RHF(doPrint=False)\n self.dipole = []\n self.angmom = []\n self.Energy = []\n self.shape = []", "def Clear(port):\n\tport.write(\"C\")", "def test_dflipflop(self):\n circ = DFlipFlop(size=2)\n circ.clk.pulse()\n self.assertSigEq(circ.q, 0)\n circ.d = 3\n self.assertSigEq(circ.q, 0)\n circ.clk.set()\n self.assertSigEq(circ.q, 3)\n circ.d = 2\n self.assertSigEq(circ.q, 3)", "def clear(self):\n self.SetPoint = 0.0\n\n self.PTerm = 0.0\n self.ITerm = 0.0\n self.DTerm = 0.0\n self.last_error = 0.0\n\n # Windup Guard\n self.int_error = 0.0\n self.windup_guard = 20.0\n\n self.output = 0.0", "def clear(self):\n self.SetPoint = 0.0\n\n self.PTerm = 0.0\n self.ITerm = 0.0\n self.DTerm = 0.0\n self.last_error = 0.0\n\n # Windup Guard\n self.int_error = 0.0\n self.windup_guard = 20.0\n\n self.output = 0.0", "def clear(self):\n self.SetPoint = 0.0\n\n self.PTerm = 0.0\n self.ITerm = 0.0\n self.DTerm = 0.0\n self.last_error = 0.0\n\n # Windup Guard\n self.int_error = 0.0\n self.windup_guard = 20.0\n\n self.output = 0.0", "def reset(self):\n self.posXY = (0,0)\n self.magXY = (1.0,1.0)\n self.rot = 0.0\n self.trans = 255\n self.isDone = False\n self.isFirst = True\n\n self.kill()\n self.Group = pyglet.graphics.OrderedGroup(self.order)\n self.isReady = self.check()", "def closeHoles3D(imIn, imOut, grid=m3D.DEFAULT_GRID3D):\r\n \r\n imWrk = m3D.image3DMb(imIn)\r\n m3D.negate3D(imIn, imIn)\r\n m3D.drawEdge3D(imWrk)\r\n m3D.logic3D(imIn, imWrk, imWrk, \"inf\")\r\n build3D(imIn, imWrk, grid=grid)\r\n m3D.negate3D(imIn, imIn)\r\n m3D.negate3D(imWrk, imOut)", "def __init__(self,datamask , h, w, upper, lower):\n self.datamask = datamask\n self.imgidmask = datamask[datamask.type == 'global'].reset_index(drop = True)\n self.h = h\n self.w = w\n self.upper = upper\n self.lower = lower\n self._birads_to_idxs = get_birad()\n self._densities_to_idxs = get_dens()\n self.tfms = get_transform(height = self.h, width =self.w)", "def test_reset(self):\n self.p.C[0][0] = np.inf\n self.step()", "def toggle_axis(self):\n # cycle through three axis states\n states = [False, 'world', 'all']\n # the state after toggling\n index = (states.index(self.view['axis']) + 1) % len(states)\n # update state to next index\n self.view['axis'] = states[index]\n # perform gl actions\n self.update_flags()", "async def async_turn_aux_heat_off(self) -> None:\n await self._set_aux_heat(False)", "def destroy(self,hc,hr):\n c = self.level.destroy(hc,hr)\n if c:\n self.cells[hc,hr] = 0 # blank\n self.prepare()\n return c", "def __init__(self, active: bool):\n self.clahe = cv2.createCLAHE(clipLimit=3.0, tileGridSize=(8, 8))\n self.active = active", "def clear_strip(self):\n self.spi.write(bytearray([0] * 4 +\n [255, 0, 0, 0] * self.bmp2led.num_pixels +\n [255] * ((self.bmp2led.num_pixels + 15) //\n 16)))", "def correct(self):\n self.parent.copyCurrentWinState(self.pltw)\n self.pltw.blklst[self.blkno][self.ypos] = self.data[1] - self.data[2]\n self.pltw.updatePlot()\n self.pltw.dirty = True\n self.pltw.activecurv = self.cpos\n self.parent.updateUI()\n self.hide()", "def glclear(self):\n self.pixels = [\n [color(self.r, self.g, self.b) for x in range(self.width)]\n for y in range(self.height)\n ]", "def __init__(self, resolution=None, colour='multi', cs_pin=CS0_PIN, dc_pin=DC_PIN, reset_pin=RESET_PIN, busy_pin=BUSY_PIN, h_flip=False, v_flip=False, spi_bus=None, i2c_bus=None, gpio=None): # noqa: E501\n self._spi_bus = spi_bus\n self._i2c_bus = i2c_bus\n self.eeprom = eeprom.read_eeprom(i2c_bus=i2c_bus)\n\n # Check for supported display variant and select the correct resolution\n # Eg: 600x480 and 640x400\n if resolution is None:\n if self.eeprom is not None and self.eeprom.display_variant in (14, 15, 16):\n resolution = [_RESOLUTION_7_3_INCH, None, _RESOLUTION_7_3_INCH][self.eeprom.display_variant - 14]\n else:\n resolution = _RESOLUTION_7_3_INCH\n\n if resolution not in _RESOLUTION.keys():\n raise ValueError('Resolution {}x{} not supported!'.format(*resolution))\n\n self.resolution = resolution\n self.width, self.height = resolution\n self.border_colour = WHITE\n self.cols, self.rows, self.rotation, self.offset_x, self.offset_y, self.resolution_setting = _RESOLUTION[resolution]\n\n if colour not in ('multi'):\n raise ValueError('Colour {} is not supported!'.format(colour))\n\n self.colour = colour\n self.lut = colour\n\n self.buf = numpy.zeros((self.rows, self.cols), dtype=numpy.uint8)\n\n self.dc_pin = dc_pin\n self.reset_pin = reset_pin\n self.busy_pin = busy_pin\n self.cs_pin = cs_pin\n try:\n self.cs_channel = [8, 7].index(cs_pin)\n except ValueError:\n self.cs_channel = 0\n self.h_flip = h_flip\n self.v_flip = v_flip\n\n self._gpio = gpio\n self._gpio_setup = False\n\n self._luts = None", "def test_goto_field_boss_hartmann(self):\n sopTester.updateModel('mcp', TestHelper.mcpState['all_off'])\n cmdState = self.actorState.gotoField\n cmdState.reinitialize(self.cmd)\n cmdState.doSlew = False\n cmdState.doCalibs = False\n cmdState.arcTime = 0\n cmdState.flatTime = 0\n cmdState.doGuider = False\n self._goto_field_boss(5, 29, 0, 0, cmdState)", "def flip(self):", "def View_Preorder( self ):\r\n cb.order = 0\r\n self.system.Draw( )", "def setup(self, flags):\n self.figure = pylab.figure(1)\n self.axes = {}\n self.stream_data = {}\n self.flags = flags" ]
[ "0.5735864", "0.5659085", "0.54460454", "0.5380752", "0.53681165", "0.5359119", "0.5346913", "0.53467923", "0.5345186", "0.53185624", "0.53087205", "0.5306554", "0.5283048", "0.5248694", "0.52321434", "0.5206659", "0.52050847", "0.51976866", "0.5184278", "0.5164785", "0.5163862", "0.5160837", "0.5130912", "0.5125229", "0.5122257", "0.5100941", "0.5092517", "0.50866395", "0.5083514", "0.5078332", "0.5073586", "0.50712323", "0.5063014", "0.505337", "0.50497514", "0.5031588", "0.5021878", "0.5021613", "0.5014389", "0.49799636", "0.49756312", "0.49667153", "0.49662048", "0.49654177", "0.49583092", "0.49534765", "0.4943622", "0.49258924", "0.49193874", "0.4914128", "0.4910045", "0.49081504", "0.4905574", "0.49028543", "0.48971102", "0.4895191", "0.48948765", "0.48917326", "0.48906767", "0.4890428", "0.4889542", "0.4888936", "0.48868757", "0.48838726", "0.48828128", "0.48722678", "0.48721963", "0.4869312", "0.48671585", "0.48622757", "0.48621157", "0.48620453", "0.48617938", "0.48488626", "0.48407906", "0.4839583", "0.4839457", "0.48389086", "0.48371607", "0.483514", "0.4829912", "0.4825995", "0.4822763", "0.4822763", "0.4822763", "0.4800081", "0.47962645", "0.47869554", "0.47868323", "0.4776469", "0.47728628", "0.47722125", "0.47705832", "0.47681785", "0.47669384", "0.47654927", "0.47653967", "0.47549942", "0.4754956", "0.4753135", "0.4752947" ]
0.0
-1
ffs open, flat off, 3x axis clear, guider on
def test_guider_start_flatsOn(self): sopTester.updateModel('mcp', TestHelper.mcpState['flats']) self._guider_start(7, 20, 0, 0)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def clear(self):\n self.np.fill(OFF)\n self.np.show()\n return True", "def cerrar(self):\n self.x0 = np.array(self.x0, dtype=float)\n self.x = np.array(self.x, dtype=float)\n self.tipos = np.array(self.tipos, dtype=int)\n self.mask_fr = self.tipos == 1\n self.mask_in = self.tipos == 2\n self.num_fr = np.sum(self.mask_fr)\n self.num_in = np.sum(self.mask_in)\n self.open = False", "def abrir(self):\n self.x0 = [val for val in self.x0]\n self.x = [val for val in self.x]\n self.tipos = [val for val in self.tipos]\n self.mask_fr = [val for val in self.mask_fr]\n self.mask_in = [val for val in self.mask_in]\n self.open = True", "def clear_crossfilter1(self):\n print ('Trigger clear')\n self.query_dict = {}\n self.plot_data = None\n self.create_figure_new()\n layout_doc.children[4].children[0] = self.p", "def clear_crossfilter2(self):\n print ('Trigger clear')\n self.query_dict = {}\n self.plot_data = None\n self.create_figure_new()\n layout_doc.children[4].children[1] = self.p", "def all_off(self):\n self.fill_off()\n self.update()\n self.fill_off()\n self.update()", "def clean_graph(self):\n #self.time = 0#\n \n # values of microcontroller\n #if self.graf_t.buffer_info()[1] != 0:\n for a in range(self.graf_t.buffer_info()[1]):\n self.graf_t.pop()\n \n for a in range(self.graf_r.buffer_info()[1]):\n self.graf_r.pop()\n\n for a in range(self.graf_x0.buffer_info()[1]):\n self.graf_x0.pop()\n\n for a in range(self.graf_x1.buffer_info()[1]):\n self.graf_x1.pop()\n\n for a in range(self.graf_u.buffer_info()[1]):\n self.graf_u.pop()\n \n self.referenceLine.set_data(self.graf_t, self.graf_r)\n self.x0Line.set_data(self.graf_t, self.graf_x0)\n self.x1Line.set_data(self.graf_t, self.graf_x1)\n self.uLine.set_data(self.graf_t, self.graf_u)\n \n try:\n #Draw the lines\n if self.checkBox_R.isChecked():\n self.mplWidget.canvas.ax.draw_artist(self.referenceLine)\n if self.checkBox_x0.isChecked():\n self.mplWidget.canvas.ax.draw_artist(self.x0Line)\n if self.checkBox_U.isChecked():\n self.mplWidget.canvas.ax.draw_artist(self.uLine)\n if self.checkBox_x1.isChecked():\n self.mplWidget.canvas.ax.draw_artist(self.x1Line)\n except AssertionError:\n pass\n try:\n self.mplWidget.canvas.blit(self.mplWidget.canvas.ax.bbox)\n except AttributeError:\n pass\n \n # force an image redraw\n self.mplWidget.canvas.draw()", "def clear(self):\n row, col = self.selected\n if self.cubes[row][col].value == 0:\n self.cubes[row][col].set_temp(0)", "def clear(self):\n if self.flag == 0:\n for coord in INDICES:\n self.kill(coord)\n self.chart[coord] = DEAD", "def f_fixed(self):\n self.fx_free = self.fy_free = self.fz_free = False\n return self", "def off(self,ax):\n # remove cell lines if thery are on the plot\n # (if new axes are created the cell lines will be not there)\n for line in self.cell_lines:\n try:\n ax.lines.remove(line)\n except ValueError:\n pass\n # set lines and coordinates to empty lists \n self.cell_lines = []\n self.xx_cells = []", "def clear(tft, oled):\n oled.fill(tft.BLACK)", "def toggle_zero_grid(self, x):\r\n self.konfig.zero.set_grid(x)\r\n self.zeroGraf.toggle_grid(x)", "def full_reset(self):\n self.at_cmd('CFUN=1')", "def reset(self):\n self.u0.fill(0.)\n self.u1.fill(0.)\n self.u2.fill(0.)\n self.time = 0.", "def _switch(self):\n self.fill= not self.fill", "def reset(self):\r\n # reset Wheel encoders\r\n self.start_time = time.time()\r\n [left_start, right_start] = self.Roomba.Query(43, 44)\r\n self.Motion.reset(left_start, right_start)\r\n # reset bumper\r\n self.bumper.reset()\r\n\r\n #reset grid world data\r\n self.action=[0.0,0.0]\r\n self.grid_state= [0,0,0]\r\n self.real_state = [0.0, 0.0, 0.0]\r\n self.trans_model = None\r\n pass", "def setNoZeroColor():\n dislin.nobgd()", "def clearup(self):\n\t\tself.outChannel.clearup()\n\t\tself.inChannel.clearup()", "def front_wheel_from_axis():", "def clear_graphs(self):\n for ax in (self.master_plot, self.time_velocity, self.time_power, self.power_velocity):\n ax.cla()", "def toggle_fore_mod(self, checked):\n for tile in self.tiles:\n tile.toggle_fore_mod(checked)", "def cells_off(self):\n self.plotter.cells_off(self.ax)\n self.fig.canvas.draw()", "def clear(self):\r\n\t\tself.grid.fill(False)", "def off(self):", "def cells_off(self,ax):\n self.cells.off(ax)", "def toggle_axis(self):\n # cycle through three axis states\n states = [False, 'world', 'all']\n # the state after toggling\n index = (states.index(self.view['axis']) + 1) % len(states)\n # update state to next index\n self.view['axis'] = states[index]\n # perform gl actions\n self.update_flags()", "def all_off():\n Leds.red_left.brightness = 0\n Leds.red_right.brightness = 0\n Leds.green_left.brightness = 0\n Leds.green_right.brightness = 0\n Leds.blue_left.brightness = 0\n Leds.blue_right.brightness = 0", "def disable_cl1(self):\n self.write_versa5(0x10,0xc4) ## Enable xtal and clock\n self.write_versa5(0x21,0x81) ## Use and enable divider\n self.write_versa5(0x13,0x00) ## Use CL1 input instead of xtal\n self.write_versa5(0x10,0x80) ## Enable xtal input only\n self.write_versa5(0x17,0x04) ## Change top multiplier to 0x44\n self.write_versa5(0x18,0x40)", "def onSetToCustDims(self, evt):\n\t\tself.halfResampleZ.Enable(0)\n\t\tself.fourthResampleZ.Enable(0)\n\t\t\n\t\tfor obj in [self.factorLabel, self.dimLabel, self.newDimX, self.newDimY, self.newDimZ, self.factorX, self.factorY, self.factorZ]:\n\t\t\tobj.Enable(1)\n\t\ttry:\n\t\t\trx = int(self.newDimX.GetValue())\n\t\t\try = int(self.newDimY.GetValue())\n\t\t\trz = int(self.newDimZ.GetValue())\n\t\t\tself.currSize = (rx, ry, rz)\n\t\texcept:\n\t\t\tpass", "def reset(self) -> None:\n self.f1.reset()", "def reset(self):\n self.position = np.zeros(self.ndegres)\n self.velocity = np.zeros(self.ndegres)\n self.state = np.zeros(2*self.ndegres)\n self.flag = 0\n self.h_ref = np.array([self.ref for _ in range(self.horizon)])\n self.action = np.zeros(self.ACTION_DIM) \n self.h_action = np.zeros(self.ACTION_DIM*self.horizon)", "def plot_clear():\n plt.cla()", "def clickClearReferences(self, event):\n self.whiteReference = None\n self.lightBtn.color = '0.85'\n self.darkReference = None\n self.darkBtn.color = '0.85'\n plt.pause(0.3)\n self.axes.autoscale_view()", "def reset(self):\n super(PolygonTool, self).reset()\n # self.__nsides = None\n # self.__increment = None\n # self.__external = False # make this adjustable?\n self.__center = None\n for _i in range(self.__nsides):\n self.__xpts[_i] = 0.0\n self.__ypts[_i] = 0.0", "def reset(self):\n self.F = 0\n self.M = 0\n self.w = np.zeros(self.n)\n self.z = np.zeros(self.n)", "def clear_strip(self):\r\n wlogger.log_info(\"Clear Strip\")\r\n for led in range(self.num_led):\r\n self.set_pixel(led, 0, 0, 0)\r\n self.show()", "def zeroing(self):\n x_zeroed, y_zeroed, z_zeroed = False, False, False\n self._stepper_x.set_stepper(defines.STEPPER_X_MAX_HZ / 2, -defines.BOARD_X_LENGTH)\n self._stepper_y_left.set_stepper(defines.STEPPER_Y_MAX_HZ / 2, -defines.BOARD_Y_LENGTH)\n self._stepper_y_right.set_stepper(defines.STEPPER_Y_MAX_HZ / 2, -defines.BOARD_Y_LENGTH)\n self._stepper_z.set_stepper(defines.STEPPER_Z_MAX_HZ / 2, -defines.BOARD_Z_LENGTH)\n\n while x_zeroed is False or y_zeroed is False or z_zeroed is False:\n if x_zeroed is False and self._switch_reset_x.get_state() is True:\n self._stepper_x.set_stepper(0, 0)\n x_zeroed = True\n\n if y_zeroed is False and self._switch_reset_y.get_state() is True:\n self._stepper_y_left.set_stepper(0, 0)\n self._stepper_y_right.set_stepper(0, 0)\n y_zeroed = True\n\n if z_zeroed is False and self._switch_reset_z.get_state() is True:\n self._stepper_z.set_stepper(0, 0)\n z_zeroed = True", "def reset(self, fullreset=True):\n self.controlpoints = []\n self.contour = []\n self.ext_energies = []\n self.update()\n if fullreset:\n self.optimized = False", "def resetFrame(self):\n with self.lock:\n hbin = self.hbin\n vbin = self.vbin\n if hbin != 1:\n self.setHBin(1)\n if vbin != 1:\n self.setVBin(1)\n with self.lock:\n self.expArea = self.defaultExpArea\n x1, y1, x2, y2 = self.expArea\n self.xsize = x2 - x1\n self.ysize = y2 - y1", "def ToggleAllGizmoLocalMode( self ):\n\n value = self.gizmoMgr.GetGizmoLocal( 'pos' )\n self.gizmoMgr.SetGizmoLocal( 'pos', not value )\n self.gizmoMgr.SetGizmoLocal( 'rot', not value )\n self.gizmoMgr.SetGizmoLocal( 'scl', not value )", "def clear(self):\n lines = self._lines\n image, bkg_image = self.image, self._image\n for line in lines: line.clear(image, bkg_image) #prej bkg_img\n self._cursor = 0", "def clear(self):\n self.cmd(0x33) # $33 8-bit mode\n self.cmd(0x32) # $32 8-bit mode\n self.cmd(0x28) # $28 8-bit mode\n self.cmd(0x0C) # $0C 8-bit mode\n self.cmd(0x06) # $06 8-bit mode\n self.cmd(0x01) # $01 8-bit mode", "def reset_mask(self):\n\n self.mask = np.ones(self.dispersion.shape, dtype=bool)", "def clearLineshape(self):\n self.x = np.arange(self.start,self.stop,round(self.step,4))\n self.lineshape = np.zeros(len(self.x))", "def clear_strip(self):\n self.spi.write(bytearray([0] * 4 +\n [255, 0, 0, 0] * self.bmp2led.num_pixels +\n [255] * ((self.bmp2led.num_pixels + 15) //\n 16)))", "def fix(hobj):\n\n h.execute('create axon[2]', hobj)\n\n for sec in hobj.axon:\n sec.L = 30\n sec.diam = 1\n hobj.axonal.append(sec=sec)\n hobj.all.append(sec=sec) # need to remove this comment\n\n hobj.axon[0].connect(hobj.soma[0], 0.5, 0)\n hobj.axon[1].connect(hobj.axon[0], 1, 0)\n\n h.define_shape()", "def clean(self):\n\n if (self.clean_level == 'dusty') | (self.clean_level == 'clean'):\n idx, = np.where(self['B_flag'] == 0)\n self.data = self[idx, :]\n\n return", "def fast_off(self, *args, **kwargs):\n return self.set(0,0,0,fade=False)", "def modes_off(self):\n bm = self.fitsimage.get_bindmap()\n bm.reset_mode(self.fitsimage)", "def cygx3IndFlux(self):\n # --------------------------------------------------------------------------------------------- #\n # Read data\n fitsNnam = os.path.join(self.workpath, 'LCresults.fits')\n lcTab = Table.read(fitsNnam)\n detect = lcTab['ts'] >= self.tsmin\n lcTab = lcTab[detect] \n\n ind08 = (lcTab['mjd'] > 54700) & (lcTab['mjd'] < 54900) \n flux08 = lcTab['flux'][ind08]\n fluxerr08 = lcTab['fluxerr'][ind08]\n index08 = lcTab['index'][ind08]\n indexerr08 = lcTab['indexerr'][ind08]\n\n ind09 = (lcTab['mjd'] > 54900) & (lcTab['mjd'] < 55100) \n flux09 = lcTab['flux'][ind09]\n fluxerr09 = lcTab['fluxerr'][ind09]\n index09 = lcTab['index'][ind09]\n indexerr09 = lcTab['indexerr'][ind09]\n\n scale = 10**int(np.floor(np.log10( np.mean( np.concatenate( (flux08, flux09), axis=0) ) ))) \n\n # --------------------------------------------------------------------------------------------- #\n # Plot\n indplt = FermiPlot(savepath='', xsize=8.5, ysize=6)\n indplt.figname = os.path.join(self.workpath, 'IndvsFlux.pdf')\n indplt.xlabel = r'Flux ($10^{%d}$ ph\\,cm$^{-2}$\\,s$^{-1}$)'%(int(np.log10(scale)))\n indplt.ylabel = r'Index'\n indplt.mksize = 2\n indplt.color = self.lblue\n indplt.label = r'2008'\n indplt.plot(x=flux08/scale, xerr=fluxerr08/scale, y=index08, yerr=indexerr08)\n indplt.color = self.loran\n indplt.label = r'2009'\n indplt.plot(x=flux09/scale, xerr=fluxerr09/scale, y=index09, yerr=indexerr09)\n indplt.save()\n\n print(\"\\t=== Figure '{}' created ===\".format(indplt.figname)) \n return", "def clear(self):\n self._tiff = np.array([], dtype=\"uint8\")\n self._offset = 0", "def clear(self):\n for i in range(len(self.canvas)):\n self.canvas[i] = 0", "def cb_reset(event):\n axDirichlet.cla()\n # Reset Sliders\n sAlpha0.reset() # resetが駄目!一番最初に戻ってしまう\n sAlpha1.reset()\n sAlpha2.reset()\n alpha_update = [sAlpha0.val, sAlpha1.val, sAlpha2.val]\n print('alpha_update=', alpha_update)\n\n # ML\n lambda_ML = CatML.MLinfer(x_cat)\n\n axML.cla()\n drawBarGraph( axML, \"ML\", lambda_ML, bar_y_max, col_ML ) # Draw Bar graph\n\n\n # MAP\n dirichlet.set_param(alpha_update)\n lambda_MAP = CatMAP.MAPinfer(x_cat, dirichlet)\n\n axMAP.cla()\n drawBarGraph( axMAP, \"MAP\", lambda_MAP, bar_y_max, col_MAP ) # Draw Bar Graph\n\n # Bayes\n posteriorDirichlet.set_param(alpha_update)\n posteriorDirichlet.calcPosterior(x_cat)\n lambda_Bayes = np.zeros(3)\n for k in range(3):\n lambda_Bayes[k] = posteriorDirichlet.BayesInfer(k)\n\n axBayes.cla()\n drawBarGraph( axBayes, \"Bayes\", lambda_Bayes, bar_y_max, col_Bayes ) # Draw Bar Graph\n\n draw_pdf_contours(axDirichlet, dirichlet, True) # Draw Dirichlet\n\n print('Reset')\n print('lambda_ML =', lambda_ML)\n print('lambda_MAP =', lambda_MAP)\n print('lambda_Bayes=', lambda_Bayes)\n draw_point(axDirichlet, lambda_ML, col_ML)\n draw_point(axDirichlet, lambda_MAP, col_MAP)\n draw_point(axDirichlet, lambda_Bayes, col_Bayes)\n draw_point(axLikelihood, lambda_ML, col_ML)\n draw_point(axPosteriorDirichlet, lambda_MAP, col_MAP)\n draw_point(axPosteriorDirichlet, lambda_Bayes, col_Bayes)\n\n fig.canvas.draw_idle()", "def __reset_crosshair(self):\n self.lhor.set_ydata(self.y_coord)\n self.lver.set_xdata(self.x_coord)", "def wave_clear():\n return _u2i(_pigpio_command(_control, _PI_CMD_WVCLR, 0, 0))", "def zguider():\n gzero.gxoff = camera.status.guider[0] + gzero.gxoff\n gzero.gyoff = camera.status.guider[1] + gzero.gyoff\n guider(0,0)\n f = open('/data/guidezero','w')\n cPickle.dump(gzero,f)\n f.close()", "def fill_single_world():\n if not front_is_clear():\n if not right_is_clear():\n if not left_is_clear():\n put_beeper()", "def __clear_all(self,event):\n \n self.CoagS = \\\n self.CoagS_term = \\\n self.GR_term = \\\n self.J_time = \\\n self.J = \\\n self.J_lims = \\\n self.mmd_time = \\\n self.mmd_dp = \\\n self.mmd_time_sr = \\\n self.mmd_dp_sr = np.array([])\n \n # Initialize all np.nan variables\n self.J_peak = \\\n self.J_halfmax = \\\n self.J_median = \\\n self.gr = np.nan\n \n # Clears polygon used to outline particle mode\n self.polyx = \\\n self.polyy = \\\n self.polyx_out = \\\n self.polyy_out = np.array([])\n self.poly.set_xy(np.ones((2,2))*np.nan)\n self.poly_out.set_xy(np.ones((2,2))*np.nan)\n \n self.box_gr.set_val(\"%.2f\" % self.gr)\n self.box_J_peak.set_val(\"%.2f\" % self.J_peak)\n self.box_J_median.set_val(\"%.2f\" % self.J_median)\n self.box_J_halfmax.set_val(\"%.2f\" % self.J_halfmax)\n\n # clear average mode diameters and fit\n self.mmd_plot.set_data(np.nan,np.nan)\n self.mmd_plot_sr.set_data(np.nan,np.nan)\n self.mmd_fit_sr.set_data(np.nan,np.nan)\n self.J_plot.set_data(np.nan,np.nan)\n self.J_fit.set_data(np.nan,np.nan)\n self.J_vertical_line1.set_xdata(np.nan)\n self.J_vertical_line2.set_xdata(np.nan)\n self.gr_term_plot.set_data(np.nan,np.nan)\n self.coags_term_plot.set_data(np.nan,np.nan)\n self.dNdt_term_plot.set_data(np.nan,np.nan)\n\n plt.draw()", "def _set_None(self):\n for obj in self.axes:\n obj._set_None()\n self.normalizations = None\n self.FTparameters = None\n self.values = None\n # Set to None the properties inherited from Data\n super(DataND, self)._set_None()", "def test_flipflop(self):\n circ = FlipFlop(size=2)\n #test basic flip flop functionality\n circ.d = 3\n self.assertSigEq(circ.q, 0)\n circ.clk.pulse()\n self.assertSigEq(circ.q, 3)\n #test reset circuit\n circ.r.set()\n circ.clk.pulse()\n self.assertSigEq(circ.q, 0)\n circ.r.reset()\n #test load\n circ.l.set()\n circ.d = 3\n circ.clk.pulse()\n self.assertSigEq(circ.q, 0)\n #test enable\n circ.l.reset()\n circ.e.set()\n circ.clk.pulse()\n self.assertSigEq(circ.q, 0)", "def reset(self):\n self.vrp = np.matrix([0.5, 0.5, 1])\n self.vpn = np.matrix([0, 0, -1])\n self.vup = np.matrix([0, 1, 0])\n self.u = np.matrix([-1, 0, 0])\n self.extent = [1., 1., 1.]\n self.screen = [400., 400.]\n self.offset = [20., 20.]", "def clear(self) -> None:\n for y in range(self.width):\n for x in range(self.height):\n self.set_value(Point(y, x), FieldState.EMPTY)", "def plot3d(self):\n plot_rupture_wire3d(self)", "def reset(self):\n self.z[:] = 0", "def _clear(self, event):\n if self.ignore(event) or self._changed_canvas():\n return\n self._background = self.canvas.copy_from_bbox(self.ax.bbox)\n self.ax.draw_artist(self._checks)\n if hasattr(self, '_lines'):\n for l1, l2 in self._lines:\n self.ax.draw_artist(l1)\n self.ax.draw_artist(l2)", "def correct(self):\n self.parent.copyCurrentWinState(self.pltw)\n self.pltw.blklst[self.blkno][self.ypos] = self.data[1] - self.data[2]\n self.pltw.updatePlot()\n self.pltw.dirty = True\n self.pltw.activecurv = self.cpos\n self.parent.updateUI()\n self.hide()", "def float_to_front(qtile):\n for window in qtile.current_group.windows:\n if window.floating:\n window.cmd_bring_to_front()", "def reset(self):\n self.tile=\"\"", "def toggle_culling(self):\n self.view['cull'] = not self.view['cull']\n self.update_flags()", "def reset(self):\n try:\n self.ax.cla()\n except Exception as e:\n print 'Exception BasePlot:', e\n raise e\n \n self._plotbuffer = { pat: [0 for _ in range(self._plotlength)] for pat in self._patterns }\n self._timestampbuffer = { pat: [0 for _ in range(self._plotlength)] for pat in self._patterns }\n self.ax.set_axis_bgcolor('black')\n self.ax.set_xticks([])\n self.ax.set_yticks([])", "def reset(self):\n self._x = 0\n self._y = 0", "def sink_floats(qtile):\n for window in qtile.current_group.windows:\n if window.floating:\n window.toggle_floating()", "def remove_all(self):\n self.initial = None\n self.contour = None\n self.control_points = []", "def update_flags(self):\n # view mode, filled vs wirefrom\n if self.view['wireframe']:\n gl.glPolygonMode(gl.GL_FRONT_AND_BACK, gl.GL_LINE)\n else:\n gl.glPolygonMode(gl.GL_FRONT_AND_BACK, gl.GL_FILL)\n\n # set fullscreen or windowed\n self.set_fullscreen(fullscreen=self.view['fullscreen'])\n\n # backface culling on or off\n if self.view['cull']:\n gl.glEnable(gl.GL_CULL_FACE)\n else:\n gl.glDisable(gl.GL_CULL_FACE)\n\n # case where we WANT an axis and NO vertexlist\n # is stored internally\n if self.view['axis'] and self._axis is None:\n from .. import creation\n # create an axis marker sized relative to the scene\n axis = creation.axis(origin_size=self.scene.scale / 100)\n # create ordered args for a vertex list\n args = rendering.mesh_to_vertexlist(axis)\n # store the axis as a reference\n self._axis = self.batch.add_indexed(*args)\n\n # case where we DON'T want an axis but a vertexlist\n # IS stored internally\n elif not self.view['axis'] and self._axis is not None:\n # remove the axis from the rendering batch\n self._axis.delete()\n # set the reference to None\n self._axis = None", "def flip(self):", "def reset(self):\n\t\tself.offsets = self.start_off.copy()", "def emit_reset(self):\n for name in self.layout.axes:\n params = self.layout.axes_options.get(name, DEFAULT_AXIS_OPTIONS)\n self.write_event(ecodes.EV_ABS, name, int(sum(params[1:3]) / 2))\n\n for name in self.layout.buttons:\n self.write_event(ecodes.EV_KEY, name, False)\n\n for name in self.layout.hats:\n self.write_event(ecodes.EV_ABS, name, 0)\n\n self.device.syn()", "def reset(self):\n self.sample['masked'] = [False]*len(self.sample.index)\n self.sample['colour'] = ['undefined']*len(self.sample.index)", "def toggle_wireframe(self):\n self.view['wireframe'] = not self.view['wireframe']\n self.update_flags()", "def reset(self) -> list:\n self.x1 = 0\n self.y1 = -1\n self.z = 3\n self.x2 = 0\n self.y2 = 0\n self.frame = 0\n return self.get_state()", "def clear(self):\n self._delayvalue = _CFG[\"delay\"]\n self._colormode = _CFG[\"colormode\"]\n self._delete(\"all\")\n self._bgpic = self._createimage(\"\")\n self._bgpicname = \"nopic\"\n self._tracing = 1\n self._updatecounter = 0\n self._turtles = []\n self.bgcolor(\"white\")\n for btn in 1, 2, 3:\n self.onclick(None, btn)\n self.onkeypress(None)\n for key in self._keys[:]:\n self.onkey(None, key)\n self.onkeypress(None, key)\n Myturtle._pen = None", "def clear():\n\tglobal _s\n\t_s.screen.fill(_s.back)\n\t_s.tab(0,0)\n\t_flip()", "def toggle_fccm(self, i: int = -1, verbose=True):\n written = self.set_bit(\"d2\", 0, i)\n if verbose:\n print(\"FCCM: \" + written)", "def clear(self):\r\n self.SetPoint = 0.0\r\n\r\n self.PTerm = 0.0\r\n self.ITerm = 0.0\r\n self.DTerm = 0.0\r\n self.last_error = 0.0\r\n\r\n # Windup Guard\r\n self.int_error = 0.0\r\n self.windup_guard = 20.0\r\n\r\n self.output = 0.0", "def top_clearing(fieldlist):\n fieldlist[int((width+1)/2)][0] = plain\n fieldlist[int((width+1)/2)][1] = plain\n fieldlist[int((width+1)/2)][2] = plain\n fieldlist[int(((width+1)/2)+1)][0] = plain\n fieldlist[int(((width+1)/2)+1)][1] = plain\n fieldlist[int(((width+1)/2)+1)][2] = plain\n fieldlist[int((width-1)/2)][0] = plain\n fieldlist[int((width-1)/2)][1] = plain\n fieldlist[int((width-1)/2)][2] = plain\n fieldlist[int(((width-1)/2)-1)][0] = plain\n fieldlist[int(((width-1)/2)-1)][1] = plain\n fieldlist[int(((width-1)/2)-1)][2] = plain", "def drawAxes(t):\r\n t.speed(0)\r\n t.pd()\r\n t.forward(500)\r\n t.back(500)", "def clear(self):\n black = neo.Color(0,0,0)\n self.set_all(black)\n self.draw()", "def terminatePlane3D():\n dislin.grffin()", "def fullLatticeCheckChanged(self, val):\n if val == QtCore.Qt.Unchecked:\n self.writeFullLattice = False\n else:\n self.writeFullLattice = True", "def off_all(self):\n self._set_status(\"off\", \"11111111\")", "def plot(self,ax):\n if self.show_cells_flag:\n self.cells_on(ax)\n else:\n self.cells.off(ax)", "def View_Preorder( self ):\r\n cb.order = 0\r\n self.system.Draw( )", "def down(self):\r\n self.brush_on = True", "def ToggleOff(self):\n for s in self.sensors:\n self.gSetpt[s.GetID()].Disable()\n\n self.top_sizer.Layout()\n print(\"Toggle off\")", "def clear(self):\n\t\tself.PTerm = 0.0\n\t\tself.ITerm = 0.0\n\t\tself.DTerm = 0.0\n\t\tself.clearing = True\n\n\t\tself.output = 0.0", "def setup(self, flags):\n self.figure = pylab.figure(1)\n self.axes = {}\n self.stream_data = {}\n self.flags = flags", "def turn_aux_heat_off(self):\n self.set_operation_mode(STATE_HEAT)", "def ClearDisplay():\n display.fill(0)", "def postSI(self):\n # for cell in self.cells:\n # cell.resetTotOrdFlux()\n self.depth = 0", "def _clear(self, event):\n if self.ignore(event) or self._changed_canvas():\n return\n self._background = self.canvas.copy_from_bbox(self.ax.bbox)\n self.ax.draw_artist(self._buttons)\n if hasattr(self, \"_circles\"):\n for circle in self._circles:\n self.ax.draw_artist(circle)" ]
[ "0.5862193", "0.5839169", "0.58179474", "0.5817385", "0.58068", "0.57821304", "0.5691695", "0.5675337", "0.5528641", "0.55042607", "0.5428167", "0.53599954", "0.5344104", "0.5337998", "0.5293606", "0.5292733", "0.5271016", "0.5258116", "0.5246498", "0.524269", "0.5237372", "0.5225428", "0.5220303", "0.5203837", "0.52036947", "0.52034456", "0.52013886", "0.5200847", "0.5185239", "0.51820904", "0.5177874", "0.51753616", "0.5173084", "0.51728207", "0.5159168", "0.5143096", "0.51428205", "0.5139978", "0.51398593", "0.51227844", "0.5115814", "0.5102671", "0.5100959", "0.50994486", "0.5096747", "0.5095979", "0.5084057", "0.5074998", "0.50742203", "0.50706124", "0.5070293", "0.5066537", "0.5053755", "0.5049005", "0.5048013", "0.504097", "0.50355285", "0.5031374", "0.5030477", "0.5028928", "0.502329", "0.5022385", "0.50112766", "0.50108874", "0.50095564", "0.5008981", "0.5008644", "0.50069445", "0.4995234", "0.49839142", "0.49793163", "0.4976686", "0.4972148", "0.49685583", "0.49645516", "0.49623454", "0.49554816", "0.4952276", "0.49507558", "0.49409944", "0.4940246", "0.4940235", "0.49383023", "0.49358943", "0.49287397", "0.49270687", "0.49265027", "0.4926365", "0.49254096", "0.49220663", "0.49196577", "0.4918332", "0.49139038", "0.4913082", "0.49114993", "0.49091053", "0.49079755", "0.49013704", "0.4894006", "0.48910108", "0.48834673" ]
0.0
-1
Helper for running guider flat tests.
def _guider_flat(self, nCall, nInfo, nWarn, nErr, finish=False, didFail=False): cmdState = self.actorState.gotoField cmdState.reinitialize(self.cmd) result = masterThread.guider_flat(self.cmd, cmdState, myGlobals.actorState, 'guider') self.assertEqual(result, not didFail) self._check_cmd(nCall, nInfo, nWarn, nErr, finish, didFail=didFail)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def runTests(self):\n \n pass", "def main():\n run_test_all()", "def tests():", "def runtest(self):", "def test_single_test_case():\n pass", "def __main() :\n launchTests()", "def test_basic_execution(self):", "def test_generate_all_testing(self):\n pass", "def spec_tests():\n pass", "def test(self):\n for arch, python in self.python:\n self.run(f\"{python} -m pytest\")", "def run(self):\n if self.all:\n cmd = self.apply_options(self.test_all_cmd)\n self.call_and_exit(cmd)\n else:\n cmds = (self.apply_options(self.unit_test_cmd, (\"coverage\",)),)\n if self.coverage:\n cmds += (self.apply_options(self.coverage_cmd),)\n self.call_in_sequence(cmds)", "def test_1():", "def run_tests(self, test_labels):\n import pytest\n\n argv = []\n if self.verbosity == 0:\n argv.append('--quiet')\n if self.verbosity == 2:\n argv.append('--verbose')\n if self.verbosity == 3:\n argv.append('-vv')\n if self.failfast:\n argv.append('--exitfirst')\n if self.keepdb:\n argv.append('--reuse-db')\n\n argv.extend(test_labels)\n return pytest.main(argv)", "def test_5():", "def unitary_test():", "def _run_local_tests(self, *args, **kwargs):\n pass", "def inner_test():\n pass", "def inner_test():\n pass", "def test_4():", "def test_script(self) -> None:\n main()", "def run_tests(self):\n raise NotImplementedError", "def run_main(): # pragma: no cover\n RunTestsCLI.run()", "def run(self, test, env):\n\n raise NotImplementedError", "def TestOneStep(self):\n pass", "def startTestRun(self):", "def test(self):\n # -- Test --\n\n # (1)\n\n # (2)\n\n # (3)\n\n # (4)\n # -- Test --", "def local_test():\n pass", "def runTest(self):\n\t\tself.setUp()\n\t\tself.test_postopProgramming1()", "def tests():\n api.local('nosetests')", "def run_test_suite():\n local('. fabric_factory/ve/bin/activate; fabric_factory/src/project/manage.py test')", "def test_2():", "def test_3():", "def main():\n test_merge_quick_sort()\n test_compare()", "def task_test(argv):\n run_tests(\"python2\", argv)\n run_tests(\"python3\", argv)", "def do_test():\n for x in execute_helper(test_info,crossmap_tests):\n yield x", "def _run_ci_test():\n _run_install(False)\n _run_coverage_html(False)\n _run_typecheck_xml(False)\n _run_lint(True)", "def test():", "def test():", "def run_tests(tests):\n return [test(t) for t in tests]", "def task_test():\n return {\n \"actions\": [[\"pytest\", \"--cov=mad_gui\", \"--cov-config=.coveragerc\", \"-vv\"]],\n \"verbosity\": 2,\n }", "def test():\n pass", "def runTest(self):\n return True", "def runTest(self):\n unittest.main()\n ChoreTest.clean_up()", "def test_main():\n # Setup\n # Exercise\n # Verify", "def test(self):\n pass", "def test_dummy():", "def test_something():", "def run_test(self):\n raise NotImplementedError", "def runAll():\n\n loader = unittest.TestLoader()\n test_dir = pkg_resources.resource_filename('frvcpy.test','.')\n suite = loader.discover(test_dir)\n\n runner = unittest.TextTestRunner(verbosity=2)\n runner.run(suite)", "def collectTests(self, global_ctx):\n pass", "def test_T1():", "def test_T1():", "def _test(self):", "def _test(self):", "def _test(self):", "def _test(self):", "def _test(self):", "def pytest_can_run_together(item1, item2):", "def test():\n for cmd in [\n \"pytest --verbose --cov pike/ --cov-report term --cov-report html tests/\",\n ]:\n _run_in_venv(shlex.split(cmd))\n for linter in [[\"black\", \"--check\"], [\"flake8\"], [\"isort\", \"--check\"]]:\n _run_in_venv(linter + TEST_FILES)\n\n _run_in_venv(\n [\"mypy\", \"pike/\", \"tests/\", \"setup.py\", \"pikefile.py\", \"--show-error-codes\"]\n )\n _run_in_venv([\"mypy\", \"examples/\"])\n _run_in_venv([\"bandit\", \"-r\", \"pike/\"])", "def test_T01():", "def runTest(self):\n self.setUp()\n self.test_FiberDistance1()", "def runTest(self):\n self.setUp()\n self.test_NeuroPath1()", "def runTest(self):\n self.setUp()\n self.test_MarkupsInViewsSelfTest1()", "def test_example_runs(self):\n run_example(\n verbose=False,\n testapp=self.testapp,\n )", "def run_tests():\n \n test_constructor_positive()\n test_constructor_negative()\n test_game_move_positive()\n test_game_move_negative()\n test_game_move_edge()\n print(\"Congratulations ! You passed all the game test cases.\")", "def task_test():\n return {\n 'actions': ['py.test tests/'],\n }", "def runTest(self):\n self.setUp()\n self.test_FiducialTransform1()", "def _test(self):\n pass", "def main():\n fix_sys_path()\n result = unittest.TextTestRunner(verbosity=2).run(createTestSuite())\n\n if result.testsRun != EXPECTED_TEST_COUNT:\n raise Exception(\n 'Expected %s tests to be run, not %s.' % (EXPECTED_TEST_COUNT, result.testsRun))\n\n if len(result.errors) != 0 or len(result.failures) != 0:\n raise Exception(\n \"Functional test suite failed: %s errors, %s failures of %s tests run.\" % (\n len(result.errors), len(result.failures), result.testsRun))", "def _test(self):\n pass", "def _test(self):\n pass", "def test_testutils():\n build()\n sh(\"%s psutil\\\\tests\\\\test_testutils.py\" % PYTHON)", "def run(cls): \n tests_to_run = cls.config.TESTS # A list of 5-tuple elements specifying the tests to run. See the\n # 'Test Setup' section in config.py.template for more info.\n test_group_name = \"Alchemist Tests\" # A short string identifier for this test run.\n output_dir = cls.config.OUTPUT_DIR # The output file where we write results.\n \n try:\n os.makedirs(output_dir,0o777)\n except:\n pass\n num_tests_to_run = len(tests_to_run)\n\n print(OUTPUT_DIVIDER_STRING)\n if num_tests_to_run == 1:\n print(\"Running %d test in %s\" % (num_tests_to_run, test_group_name))\n else:\n print(\"Running %d tests in %s\" % (num_tests_to_run, test_group_name))\n failed_tests = []\n\n cls.before_run_tests()\n \n spark_settings = []\n for i in cls.config.SPARK_SETTINGS:\n spark_settings.append(i.to_array()[0])\n \n output_settings = []\n for i in cls.config.OUTPUT_SETTINGS:\n output_settings.append(i.to_array()[0])\n \n main_class = \"altest.AlTest\"\n\n for meta_data, opt_sets in tests_to_run:\n print(OUTPUT_DIVIDER_STRING + '\\n')\n# print(\"Running test command: '%s' ... \" % main_class)\n \n meta = {}\n meta_pairs = [i.to_tuple() for i in meta_data]\n for mp in meta_pairs:\n meta[mp[0].replace('-', '_')] = mp[1].replace('0x20', ' ')\n \n meta_settings = []\n for i in meta_data:\n meta_settings.append(i.to_array()[0])\n \n# stdout_filename = \"%s/%s.out\" % (output_dir, meta['short_name'])\n# stderr_filename = \"%s/%s.err\" % (output_dir, meta['short_name'])\n# \n# out_file = open(output_dir + \"/\" + meta['short_name'] + \".out\", 'w')\n\n # Run a test for all combinations of the OptionSets given, then capture\n # and print the output.\n opt_set_arrays = [i.to_array() for i in opt_sets]\n for opt_list in itertools.product(*opt_set_arrays):\n\n cmd = cls.get_spark_submit_cmd(spark_settings, main_class, output_settings, meta_settings, opt_list)\n# print(\"\\nSetting env var SPARK_SUBMIT_OPTS: %s\" % java_opts_str)\n# test_env[\"SPARK_SUBMIT_OPTS\"] = java_opts_str\n print(\"Running command:\")\n print(\"%s\\n\" % cmd)\n Popen(cmd, shell=True, env=test_env).wait()\n\n try:\n src = output_dir + meta['short_name'] + '_latest/'\n src_files = os.listdir(src)\n src_file = src_files[0][:-4]\n new_dir = output_dir + src_file\n os.makedirs(new_dir)\n for file_name in src_files:\n full_file_name = os.path.join(src, file_name)\n if (os.path.isfile(full_file_name)):\n shutil.copy(full_file_name, new_dir)\n except:\n pass\n \n# result_string = cls.process_output(config, meta['short_name'], opt_list,\n# stdout_filename, stderr_filename)\n# print(OUTPUT_DIVIDER_STRING)\n# print(\"\\nResult: \" + result_string)\n# print(OUTPUT_DIVIDER_STRING)\n# if \"FAILED\" in result_string:\n# failed_tests.append(meta['short_name'])\n# \n# \n# out_file.write(result_string + \"\\n\")\n# out_file.flush()\n\n if num_tests_to_run == 1:\n print(\"Finished running %d test in %s.\" % (num_tests_to_run, test_group_name))\n else:\n print(\"Finished running %d tests in %s.\" % (num_tests_to_run, test_group_name))\n# print(\"\\nNumber of failed tests: %d, failed tests: %s\" %\n# (len(failed_tests), \",\".join(failed_tests)))\n print(OUTPUT_DIVIDER_STRING)", "def test_standard_tap_tests():\n tests = get_standard_tap_tests(TapPartoo, config=SAMPLE_CONFIG)\n for test in tests:\n test()", "def test_run_started(self):", "def test_batch(self):\n pass", "def _run_tests(self):\n for pyunit_testcase in self.cfg.testcases:\n yield self._run_testsuite(pyunit_testcase)", "def main():\n run_test_summary1a()\n run_test_summary1c()\n run_test_summary1c()", "def test_T4():", "def test_T4():", "def test():\n with lcd(BASEDIR):\n local('virtenv/bin/coverage run runtests.py -v2')\n local('virtenv/bin/coverage report -m')", "def test_multiple_commands_at_same_time(self):", "def run_tests():\n test_command = \"pytest -s \" + os.path.join(root_path, \"cases\", \"test_cases.py::TestCases::test_cases\") + \" --html=\" + os.path.join(root_path, \"reports\", \"qa_testing_report.html\")\n\n subprocess.run(test_command, shell=True)", "def run_combined(self):\n self.runtest_autokey()\n self.runtest_mediaresource()\n self.runtest_composite_slug()\n self.runtest_all_types()\n self.runtest_complex_types()\n self.runtest_only_key()\n self.runtest_compound_key()\n self.runtest_simple_select()\n self.runtest_paging()\n self.runtest_nav_o2o()\n self.runtest_nav_o2o_1()\n self.runtest_nav_zo2o()\n self.runtest_nav_zo2o_f()\n self.runtest_nav_zo2o_b()\n self.runtest_nav_many2o()\n self.runtest_nav_many2o_f()\n self.runtest_nav_many2o_b()\n self.runtest_nav_many2zo()\n self.runtest_nav_many2zo_f()\n self.runtest_nav_many2zo_b()\n self.runtest_nav_many2zo_r()\n self.runtest_nav_many2zo_rf()\n self.runtest_nav_many2zo_rb()\n self.runtest_nav_many2many()\n self.runtest_nav_many2many_1()\n self.runtest_nav_many2many_r()\n self.runtest_nav_many2many_r1()", "def test_get_scenarios(self):\n pass", "def main_test():\n full = unittest.TestSuite()\n full.addTest(unittest.makeSuite(TestToolOptions))\n full.addTest(unittest.makeSuite(TestBadConfiguration))\n full.addTest(unittest.makeSuite(TestBasicEndpoints))\n full.addTest(unittest.makeSuite(TestMultipleEPG))\n full.addTest(unittest.makeSuite(TestBasicExistingEndpoints))\n full.addTest(unittest.makeSuite(TestBasicExistingEndpointsAddPolicyLater))\n full.addTest(unittest.makeSuite(TestExportPolicyRemoval))\n full.addTest(unittest.makeSuite(TestBasicEndpointsWithContract))\n full.addTest(unittest.makeSuite(TestBasicEndpointMove))\n full.addTest(unittest.makeSuite(TestPolicyChangeProvidedContract))\n full.addTest(unittest.makeSuite(TestChangeL3Out))\n full.addTest(unittest.makeSuite(TestDuplicates))\n full.addTest(unittest.makeSuite(TestDuplicatesTwoL3Outs))\n full.addTest(unittest.makeSuite(TestDeletions))\n\n unittest.main()", "def runalltests():\n doctest.testmod()", "def run_tests():\n fail = []\n okay = []\n for i in os.listdir(\".\"):\n if i.find(\"_test_\") > -1 and i.endswith(\".py\"):\n if 0 != subprocess.call(\"python \" + i, shell=True):\n fail.append(i)\n else:\n okay.append(i)\n if fail:\n print(\"[ERROR] The following %u tests failed: %r\" % (len(fail), fail))\n return False\n print(\"[DONE] All %u tests completely successfully!\" % (len(okay)))\n return True", "def runTest(self):\n self.setUp()\n self.test_ExtendSpine1()", "def run_tests():\n def print_result(result, correct):\n if result == correct:\n print(\" OK!\")\n else:\n print(f\" Failed ({result} != {correct})!\")\n for n, test in enumerate(_tests, start=1):\n print(f\"Running test {n}...\")\n nums = line2ints(test[\"in\"])\n try:\n correct = test[\"part1\"]\n except KeyError:\n pass\n else:\n print(\" Testing part 1...\", end=\"\")\n result = part1(nums, steps=test.get(\"phases1\", 100))\n print_result(result, correct)\n try:\n correct = test[\"part2\"]\n except KeyError:\n pass\n else:\n print(\" Testing part 2...\", end=\"\")\n result = part2(nums, steps=test.get(\"phases2\", 100))\n print_result(result, correct)", "def runTest(self):\r\n self.setUp()", "def test_passed():\n pass", "def test(command, options=\"\"):\n\n print(\n \"\"\"\nRunning pytest the test framework\n=================================\n\"\"\"\n )\n for dir_ in TEST_DIRECTORIES:\n test_dir(command, options=options, dir_=dir_)\n # command.run(f\"python -m pytest {options} {' '.join(dir_ for dir_ in TEST_DIRECTORIES)}\", echo=True, pty=POSIX)\n\n print(\n \"\"\"\nAll Testing Directories Passed Successfully\n===========================================\n\"\"\"\n )", "def test_T2():", "def test_T2():", "def test_step(self):\n # allow to escape testing by setting runtest to False\n if not self.cfg['runtest'] and not isinstance(self.cfg['runtest'], bool):\n\n # make very sure OMP_NUM_THREADS is set to 1, to avoid hanging GROMACS regression test\n env.setvar('OMP_NUM_THREADS', '1')\n\n self.cfg['runtest'] = 'check'\n if self.cfg['parallel']:\n # run 'make check' in parallel since it involves more compilation\n self.cfg.update('runtest', \"-j %s\" % self.cfg['parallel'])\n super(EB_GROMACS, self).test_step()", "def do_test(self):\n\t\tshutit_global.shutit_global_object.yield_to_draw()\n\t\tif not self.build['dotest']:\n\t\t\tself.log('Tests configured off, not running',level=logging.DEBUG)\n\t\t\treturn\n\t\t# Test in reverse order\n\t\tself.log('PHASE: test', level=logging.DEBUG)\n\t\tself.stop_all()\n\t\tself.start_all()\n\t\tfor module_id in self.module_ids(rev=True):\n\t\t\t# Only test if it's installed.\n\t\t\tif self.is_installed(self.shutit_map[module_id]):\n\t\t\t\tself.log('RUNNING TEST ON: ' + module_id, level=logging.DEBUG)\n\t\t\t\tself.login(prompt_prefix=module_id,command=shutit_global.shutit_global_object.bash_startup_command,echo=False)\n\t\t\t\tif not self.shutit_map[module_id].test(self):\n\t\t\t\t\tself.fail(module_id + ' failed on test', shutit_pexpect_child=self.get_shutit_pexpect_session_from_id('target_child').pexpect_child) # pragma: no cover\n\t\t\t\tself.logout(echo=False)", "def main():\n test_runner = TestRunner(\n FLAGS.workspace, FLAGS.bench_home, imagenet_dir=FLAGS.train_data_dir)\n test_runner.run_tests(FLAGS.test_list.split(','))", "def test1(self):\n\n log.info('This is a test')\n self.assertTrue((random.randint(0,9) % 2) == 0)#! /usr/bin/env python", "def runtests():\r\n\r\n app_abspath = os.path.dirname(os.path.dirname(__file__))\r\n models_abspath = os.path.join(app_abspath, 'models.py')\r\n models_exists = os.path.isfile(models_abspath)\r\n urls_abspath = os.path.join(app_abspath, 'urls.py')\r\n urls_exists = os.path.isfile(urls_abspath)\r\n views_abspath = os.path.join(app_abspath, 'views')\r\n views_exists = os.path.isdir(views_abspath)\r\n tpls_abspath = os.path.join(app_abspath, 'templates')\r\n tpls_exists = os.path.isdir(tpls_abspath)\r\n\r\n for f in [models_abspath, urls_abspath]:\r\n if os.path.isfile(f):\r\n subprocess.call('cp {} {}.orig'.format(f, f), shell=True)\r\n\r\n if views_exists:\r\n subprocess.call('cp -r {} {}.orig'.format(views_abspath, views_abspath), shell=True)\r\n\r\n if tpls_exists:\r\n subprocess.call('cp -r {} {}.orig'.format(tpls_abspath, tpls_abspath), shell=True)\r\n\r\n overwrite_project_language('ja')\r\n subprocess.call('python manage.py generatescaffold test_app I18nModel title:string', shell=True)\r\n time.sleep(1)\r\n overwrite_project_language('en-us')\r\n time.sleep(1)\r\n\r\n subprocess.call('python manage.py generatescaffold test_app GeneratedNoTimestampModel title:string description:text --no-timestamps', shell=True)\r\n time.sleep(2) # Give time for Django's AppCache to clear\r\n\r\n subprocess.call('python manage.py generatescaffold test_app GeneratedModel title:string description:text', shell=True)\r\n\r\n test_status = subprocess.call('python manage.py test --with-selenium --with-selenium-fixtures --with-cherrypyliveserver --noinput', shell=True)\r\n\r\n if models_exists:\r\n subprocess.call('mv {}.orig {}'.format(models_abspath, models_abspath), shell=True)\r\n else:\r\n subprocess.call('rm {}'.format(models_abspath), shell=True)\r\n\r\n if urls_exists:\r\n subprocess.call('mv {}.orig {}'.format(urls_abspath, urls_abspath), shell=True)\r\n else:\r\n subprocess.call('rm {}'.format(urls_abspath), shell=True)\r\n\r\n if views_exists:\r\n subprocess.call('rm -rf {}'.format(views_abspath), shell=True)\r\n subprocess.call('mv {}.orig {}'.format(views_abspath, views_abspath), shell=True)\r\n else:\r\n subprocess.call('rm -rf {}'.format(views_abspath), shell=True)\r\n\r\n if tpls_exists:\r\n subprocess.call('rm -rf {}'.format(tpls_abspath), shell=True)\r\n subprocess.call('mv {}.orig {}'.format(tpls_abspath, tpls_abspath), shell=True)\r\n else:\r\n subprocess.call('rm -rf {}'.format(tpls_abspath), shell=True)\r\n\r\n subprocess.call('rm {}/*.pyc'.format(app_abspath), shell=True)\r\n\r\n sys.exit(test_status)", "def runner(app):\n\n return app.test_cli_runner()" ]
[ "0.7352913", "0.72175664", "0.7153499", "0.71123314", "0.69343376", "0.6914146", "0.68364465", "0.68047154", "0.67976254", "0.6754427", "0.6737129", "0.6672995", "0.666052", "0.662404", "0.6613215", "0.6601137", "0.6594231", "0.6594231", "0.65821177", "0.6561161", "0.65609705", "0.65496105", "0.65379", "0.65282136", "0.6523428", "0.651125", "0.6504933", "0.64796484", "0.64694756", "0.6446504", "0.6424444", "0.6420083", "0.64081895", "0.640534", "0.6391357", "0.63712215", "0.63697773", "0.63697773", "0.6368389", "0.63669837", "0.6358539", "0.6351993", "0.63432777", "0.6341282", "0.6332429", "0.6326245", "0.6322308", "0.6304771", "0.6298688", "0.62855214", "0.62702185", "0.62702185", "0.62698126", "0.62698126", "0.62698126", "0.62698126", "0.62698126", "0.62645227", "0.6255347", "0.62400466", "0.6234838", "0.6228745", "0.6226527", "0.62260324", "0.62237066", "0.6221379", "0.62204164", "0.62145436", "0.62145436", "0.62145436", "0.62145436", "0.6212517", "0.6211584", "0.6199178", "0.6195916", "0.61855334", "0.6183107", "0.61754864", "0.61667484", "0.61667484", "0.6165504", "0.6164898", "0.6161687", "0.6158848", "0.6150749", "0.6148883", "0.61464214", "0.6145058", "0.6145048", "0.61403215", "0.61392874", "0.61202705", "0.61144143", "0.611364", "0.611364", "0.61043644", "0.60984325", "0.6093116", "0.60840607", "0.6082019", "0.60756195" ]
0.0
-1
Helper for running guider flat tests that check the APOGEE shutter.
def _guider_flat_apogeeShutter(self, nCall, nInfo, nWarn, nErr, finish=False, didFail=False): cmdState = self.actorState.gotoField cmdState.reinitialize(self.cmd) result = masterThread.guider_flat( self.cmd, cmdState, myGlobals.actorState, 'guider', apogeeShutter=True) self.assertEqual(result, not didFail) self._check_cmd(nCall, nInfo, nWarn, nErr, finish, didFail=didFail)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def runTest(self):\n E = main()\n self.assertInside(E, energy, 1e-5)", "def main():\n # run_test_go_straight_inches()\n # run_test_turn_degrees()\n # run_test_spin_degrees()\n beep_if_blob_is_bigger_than(3000)", "def test_01_lighting(self):", "def test_run(self):\n rig_analysis_dir = \"rig_analysis\"\n analysis_root = os.path.join(self.io_args.output_root, rig_analysis_dir)\n os.makedirs(analysis_root, exist_ok=True)\n\n self.io_args.output_obj = os.path.join(analysis_root, \"final.obj\")\n self.io_args.output_equirect = os.path.join(analysis_root, \"equirect.ppm\")\n self.io_args.output_camera = os.path.join(analysis_root, \"camera.ppm\")\n self.io_args.output_camera_id = \"0\"\n self.io_args.output_cross_section = os.path.join(analysis_root, \"cross.ppm\")\n\n self.run_app(\"RigAnalyzer\")\n self.check_against_truth(\n truth=os.path.join(self.io_args.truth_dir, rig_analysis_dir),\n output=analysis_root,\n )", "def test_theft_and_stealing(self):", "def do_it(args):\n\n #force = args.force\n #testing = args.testing\n #verbose = args.verbose\n #regions = args.regions\n\n # XXX WORKING HERE", "def main():\n run_test_draw_upside_down_wall()", "def test_arc_smear(self):", "def runtest(self):", "def test_art_from_taste_space(self):", "def tests():", "def test_run_simplega():\n WRFga_winner = run_simplega(pop_size=100, n_generations=1, testing=True)\n assert WRFga_winner.Fitness >= 0", "def run_tests():\n \n test_constructor_positive()\n test_constructor_negative()\n test_game_move_positive()\n test_game_move_negative()\n test_game_move_edge()\n print(\"Congratulations ! You passed all the game test cases.\")", "def main():\n run_test_all()", "def test_run_homer(self):\n \n #foreground = clipper.test_file(\"clip_analysis_test_peak_results.bed.all.real.fa\")\n #background = clipper.test_file(\"clip_analysis_test_peak_results.bed.all.random.fa\")\n #run_homer(foreground, background)", "async def test_routine(self):\n print('Running test routine...')\n print('Waiting for axes to initialize...')\n await self.robot.wait_until_initialized()\n print('Synchronizing robot state with peripheral...')\n await self.robot.synchronize_values()\n print('Loading calibration data...')\n await self.robot.load_calibrations()\n await self.robot.go_to_alignment_hole()\n\n print('Starting 96-well plate test...')\n await self.robot.go_to_96_well_plate(1, 'a')\n await self.robot.dispense('96-well plate', 'far above')\n for height in ['bottom', 'low', 'mid', 'high', 'top', 'above', 'far above']:\n print('Testing with height {}...'.format(height))\n for (row, volume) in [('a', 20), ('b', 30), ('c', 40), ('d', 50), ('e', 100)]:\n print(\n ' Testing precise with row {} and volume {} mL...'\n .format(row, volume)\n )\n await self.test_individual_precise(row, height, volume / 1000)\n await self.robot.dispense('96-well plate', height)\n for (row, volume) in [\n ('f', 100), ('g', 150), ('h', 200), ('a', 300), ('b', 400),\n ('c', 500), ('d', 600), ('e', 700), ('g', 800), ('h', 900)\n ]:\n print(\n ' Testing rough with row {} and volume {} mL...'\n .format(row, volume / 1000)\n )\n await self.test_individual_rough(row, height, volume / 1000)\n await self.robot.z.go_to_high_end_position()\n await self.robot.y.go_to_low_end_position()\n\n print(batch.OUTPUT_FOOTER)\n print('Quitting...')", "def main():\n coverage = calculate_code_coverage()\n platform = os.uname()[0]\n if coverage < CODE_COVERAGE_GOAL[platform]:\n data = {\n 'expected': CODE_COVERAGE_GOAL[platform],\n 'observed': coverage,\n }\n print '\\033[91mFAIL: %(observed).2f%% does not meet goal of %(expected).2f%%\\033[0m' % data\n sys.exit(1)", "def given_test_cases(self):\n self.assertTrue(anagram_finder(\"listen\", \"silent\"))\n self.assertTrue(anagram_finder(\"triangle\", \"integral\"))\n self.assertFalse(anagram_finder(\"apple\", \"pabble\"))", "def runTest(self):\n return True", "def test_5():", "def test_step(self):\n # allow to escape testing by setting runtest to False\n if not self.cfg['runtest'] and not isinstance(self.cfg['runtest'], bool):\n\n # make very sure OMP_NUM_THREADS is set to 1, to avoid hanging GROMACS regression test\n env.setvar('OMP_NUM_THREADS', '1')\n\n self.cfg['runtest'] = 'check'\n if self.cfg['parallel']:\n # run 'make check' in parallel since it involves more compilation\n self.cfg.update('runtest', \"-j %s\" % self.cfg['parallel'])\n super(EB_GROMACS, self).test_step()", "def runTests():\r\n\r\n print(\"running a few tests\")\r\n\r\n average = compute .gpsAverage (4, 5)\r\n print(\"average = \", average)\r\n \r\n print (\"hello!\")", "def test_bed(self):\n #TODO write bed tests", "def test_qasm_simulator_single_shot(self):\n shots = 1\n self.qobj.config.shots = shots\n result = self.backend.run(self.qobj).result()\n self.assertEqual(result.success, True)", "def runTests(self):\n \n pass", "def test_alien_data(self):", "def test_T4():", "def test_T4():", "def runTest(self):\n self.setUp()\n self.test_FiberDistance1()", "def test_quick_build1(self):\n pass", "def _test(self):\n pass", "def _test(self):\n pass", "def _test(self):\n pass", "def test_basic_execution(self):", "def main():\n # random_peeler() # Run one or the other by uncommenting/commenting\n peel_digits_test_suite() # Run one or the other by uncommenting/commenting", "def test_4():", "def runTest(self):\n\t\tself.setUp()\n\t\tself.test_postopProgramming1()", "def runTest(self):\r\n self.setUp()\r\n self.test_sceneImport24281()", "def test_deviceX_1():\n assert 0", "def test_outE_traverals(self):\r\n results = self.blake.outE()\r\n assert len(results) == 1\r\n assert self.blake_in_theoretics in results", "def test_T01():", "def test_generate_all_testing(self):\n pass", "def testStart(self):\n self.assert_(self.visionSystem.downwardSafeDetector)\n #self.assertCurrentMotion(motion.search.ForwardZigZag)", "def TestOneStep(self):\n pass", "def test_take_apple(self):\n manager = DummyLevelManager()\n game = Game(manager)\n game.move(GameMoves.PASS)\n state = game.move(GameMoves.UP)\n self.assertEqual(state, LevelState.RUNNING)\n self.assertEqual(game.current_level.snake_length, 7)\n self.assertEqual(game.current_level.snake_direction, (-1, 0))\n self.assertEqual(game.current_level.snake,\n [(0, 9), (1, 9), (1, 8), (1, 7), (1, 6), (1, 5), (1, 4)])", "def test_quick_build(self):\n pass", "def _test(self):", "def _test(self):", "def _test(self):", "def _test(self):", "def _test(self):", "def runTest(self):\n self.setUp()\n self.test_FlexCrop1()", "def test_standard_tap_tests():\n tests = get_standard_tap_tests(TapPartoo, config=SAMPLE_CONFIG)\n for test in tests:\n test()", "def test_1_xc_run(self) :\n self.banner(\"Testing if extra credit simulation gives the right percentages.\")\n filename = self.find_file('project9_xc.py')\n doors = random.randrange(10, 100)\n switch_target = 100 * ((doors - 1) / doors)\n stay_target = 100 * (1 / doors)\n target_range = 2\n \n with open('logs/test_extra_credit.out', 'a') as log :\n test = pexpect.spawnu('python ' + filename.as_posix() + ' {}'.format(doors), logfile=log, encoding='utf-8')\n turns = random.randrange(200, 500)\n test.sendline(str(turns))\n try : \n got = test.expect([pexpect.EOF, '(\\d+\\.\\d+)\\s*%'], timeout=5)\n switch_percent = float(test.match.group(1))\n got = test.expect([pexpect.EOF, '(\\d+\\.\\d+)\\s*%'], timeout=5)\n stay_percent = float(test.match.group(1))\n except :\n self.fail(\"The grader program failed to parse the output of your project.\")\n\n if not (switch_target-target_range < switch_percent < switch_target+target_range) :\n self.fail('Your switch percentage ({}) is out of range. It should be between {} and {}'.format(switch_percent, switch_target-target_range, switch_target+target_range))\n if not (stay_target-target_range < stay_percent < stay_target+target_range) :\n self.fail('Your stay percentage ({}) is out of range. It should be between {} and {}'.format(stay_percent, stay_target-target_range, stay_target+target_range))\n test.close()", "def test_emirp_check():\r\n pass", "def run_tests():\n good_car = UnreliableCar(\"Good Car\", 100, 90)\n bad_car = UnreliableCar(\"Bad Car\", 100, 10)\n\n for i in range(1, 15):\n print(\"Attempting to drive {}km:\".format(i))\n print(\"{:12} drove {:2}km\".format(good_car.name, good_car.drive(i)))\n print(\"{:12} drove {:2}km\".format(bad_car.name, bad_car.drive(i)))\n\n \"\"\"final states of the cars\"\"\"\n print(good_car)\n print(bad_car)", "def test_run_alpha_rarefaction(self):\r\n\r\n run_alpha_rarefaction(\r\n self.test_data['biom'][0],\r\n self.test_data['map'][0],\r\n self.test_out,\r\n call_commands_serially,\r\n self.params,\r\n self.qiime_config,\r\n tree_fp=self.test_data['tree'][0],\r\n num_steps=5,\r\n parallel=False,\r\n min_rare_depth=3,\r\n max_rare_depth=18,\r\n status_update_callback=no_status_updates)\r\n\r\n html_fp = join(self.test_out, 'alpha_rarefaction_plots',\r\n 'rarefaction_plots.html')\r\n pd_averages_fp = join(self.test_out, 'alpha_rarefaction_plots',\r\n 'average_tables', 'PD_whole_treeSampleType.txt')\r\n pd_collated_fp = join(self.test_out, 'alpha_div_collated',\r\n 'PD_whole_tree.txt')\r\n\r\n # Confirm that palm and gut alpha diversities are different,\r\n # and suggestive of statistical significance (we only have a\r\n # few sequences, so we don't get significant results)\r\n ttest_res, alpha_avg = compare_alpha_diversities(open(pd_collated_fp),\r\n open(\r\n self.test_data[\r\n 'map'][0]),\r\n 'SampleType',\r\n 18,\r\n test_type='parametric')\r\n feces_palm_t = ttest_res[('feces', 'L_palm')][0]\r\n self.assertTrue(feces_palm_t < 0,\r\n \"t-statistic too high: %1.3f, but should be less than 0\"\r\n % feces_palm_t)\r\n\r\n # check that final output files have non-zero size\r\n self.assertTrue(getsize(html_fp) > 0)\r\n\r\n # Check that the log file is created and has size > 0\r\n log_fp = glob(join(self.test_out, 'log*.txt'))[0]\r\n self.assertTrue(getsize(log_fp) > 0)", "def test_foo(self):\n self.ran = True\n 1 / 0", "def inner_test():\n pass", "def inner_test():\n pass", "def test_gethardwares(self):\n pass", "def runTest(self):\n self.setUp()\n self.test_BiplaneRegistration1()", "def test_one_arm(self):\n bandit = self.bandit_class(self.one_arm_test_case)\n self._test_one_arm(bandit)", "def test_households_in_admin_unit(self):", "def checkAccuracy():\n\tcmd = \"{}/bin/augustus --species={} {}/output/testSet.gb\"\\\n\t.format(path_to_aug, species, testfile)\n\te = subprocess.check_call(cmd, shell=True)", "def test_if(self):", "def test(self):\n pass", "def test1(self):\n\n log.info('This is a test')\n self.assertTrue((random.randint(0,9) % 2) == 0)#! /usr/bin/env python", "def test():\n pass", "def runTest(self):\r\n self.setUp()\r\n self.test_CreateROI1()", "def test_func(debug: bool) -> None:\n click.echo(debug)", "def test_run_full(mk_tmp_dirs):\n tmp_current_path, tmp_data_path, tmp_config_path = mk_tmp_dirs\n\n cfg_dir = path.join(tmp_data_path, 'cfgs')\n collect_pipeline_cfgs(cfg_dir)\n\n asn_path = path.join(DATAPATH, 'mosaic_long_asn.json')\n args = [\n path.join(cfg_dir, 'calwebb_image3.cfg'),\n asn_path,\n ]\n\n Step.from_cmdline(args)\n\n # Check for the CRF files\n with open(asn_path) as fh:\n asn = load_asn(fh)\n expfilenames = [\n path.split(path.splitext(member['expname'])[0])[1]\n for member in asn['products'][0]['members']\n ]\n crffilenames = []\n for expfilename in expfilenames:\n name = remove_suffix(path.splitext(expfilename)[0])[0]\n crffilenames.append(name + '_a3001_crf.fits')\n for crffilename in crffilenames:\n assert path.isfile(crffilename)\n\n # Check for the level3 products\n product_name = asn['products'][0]['name']\n assert path.isfile(product_name + '_cat.ecsv')\n assert path.isfile(product_name + '_i2d.fits')", "def main():\n run_test_summary1a()\n run_test_summary1c()\n run_test_summary1c()", "def spec_tests():\n pass", "def test_allup(self):\n\n args = [\"--clusters\", clipper.test_file(\"clip_analysis_test_peak_results.bed\"),\n \"-s\", \"mm9\",\n \"--bam\", clipper.test_file(\"allup_test.bam\"),\n \"--AS_Structure\", os.path.join(clipper.test_dir(), \"mm9data4\"),\n '--genome_location', clipper.test_file('mm9.fa'), \n #'--regions_location', clipper.test_file(\"knownGene_sample.gtf\"),\n \"--regions_location\", os.path.join(clipper.test_dir(), \"regions\"),\n '--phastcons_location', clipper.test_file(\"allup_test.bam\"),\n '--motifs', 'AAAAAA',\n '--nrand', '1',\n #'--runPhast',\n '--runMotif',\n '--bw_pos', clipper.test_file(\"allup_test.pos.bw\"),\n '--bw_neg', clipper.test_file(\"allup_test.neg.bw\"),\n ] \n (options, args) = self.parser.parse_args(args)\n #self.assertTrue(False, \"allup test is slow and has been removed for now\")\n main(options)", "def runTest(self):\n self.setUp()\n self.test_visuThreeD1()", "def test_let(self):", "def test_water_regulation(self):\n\n for action in self.controller.actions.values():\n for water_level in range(90, 110, 2):\n\n # measure water level\n self.controller.sensor.measure = MagicMock(return_value=water_level)\n\n # get the state of the pump\n self.controller.pump.get_state = \\\n MagicMock(return_value=self.decider.decide(water_level, action, \\\n self.controller.actions)) \\\n\n self.controller.tick()", "def test_something():", "def test_e2e_general_mode(self):\n\n cli = \"--balance 1 offline --test\"\n deal = self._run_bot_offine(cli)\n\n self.assertEqual(float(deal.data_row[\"balance\"]) * float(deal.data_row[\"_config_share_balance_to_bid\"]),\n float(deal.data_row[\"start-qty\"]))\n\n self.assertEqual(0.8, float(deal.data_row[\"start-qty\"]))\n self.assertEqual(0.03883667000000002, float(deal.data_row[\"result-fact-diff\"]))", "def test_conditions(self):\n if not CalculatorUtils.clear_calc(self.device):\n Utils.start_home(self.serial)\n AppUtils.kill_app(self.serial, self.package)\n AppUtils.open_app(self.device, self.serial, self.app)\n Utils.wait_short()", "def test_too_far_scenario():\n start_too_far_scenario(config.MERAKI_CAMERAS[0][\"serial\"])\n time.sleep(WARN_EVENT_THRESHOLD)\n start_too_far_scenario(config.MERAKI_CAMERAS[0][\"serial\"])\n return \"ok\"", "def main():\r\n _evaluative_test(5)\r\n _fuzz_test(1)\r\n _fuzz_test(1, 512)\r\n _fuzz_test(1, 1512)\r\n _fuzz_test(1000)\r\n _fuzz_test(1000, 512)\r\n _fuzz_test(1000, 4077)", "def main():\n if input(\"Mode 0=raw / 1=wrapper: ? [0]\") == \"1\":\n # Wrapped mode\n try:\n number = os.environ[\"ROBOT_ID\"]\n except KeyError:\n number = int(input(\"Enter robot number (1-5):\"))\n robot = PiBot.PiBot(robot_nr=number, directory=\"../\")\n suite = get_suite(robot, number)\n gripper = input(\"Include gripper tests (0=no, 1=yes)? [1]\")\n if gripper != \"0\":\n suite.add(\"Clear gripper space... testing gripper up-down\",\n \"gripper up-down\", robot.set_grabber_height,\n [60, 10], [5, 5], [], [], 1)\n suite.add(\"Clear gripper space... testing gripper open-close\",\n \"gripper open-close\", robot.close_grabber,\n [80, 5], [5, 5], [], [], 1)\n else:\n # Raw mode\n robot = commRaspMain.PiBot()\n while not all(map(lambda fn: fn(), [robot._motors_enable,\n robot._encoders_enable,\n robot._servo_enable])):\n time.sleep(0.05)\n robot._tof_init()\n robot._gyro_start()\n robot._adc_conf(3)\n number = int(input(\"Enter robot number (1-5):\"))\n suite = get_suite(robot, number)\n\n robot._motorL_set(0)\n robot._motorR_set(0)\n\n suite.execute()", "def test5():\n setLogLevel(\"info\")\n info(\"Configuracion Unidad experimental\")\n \"\"\" 1 -> Definicion de la topologia \"\"\"\n t1 = Topologia1()\n ue1 = UnidadExperimental()\n ue1.setTopo(t1)\n ue1.definirNodosClaves(A = 'h1', C='h2', V='h3') # Caso solo para trafico normal\n ue1.setController('ryu', 'simple_switch_13.py,ofctl_rest.py')\n info(\"Configuracion del experimento\")\n \"\"\" 3. Confiracion del experimento \"\"\"\n exp1 = Experimento()\n exp1.configureParams(ue1)\n exp1.configurarTrafico('ataque')\n \"\"\" 4. Inicio del experimento \"\"\"\n exp1.startTest()\n exp1.pingAllTest() # **************** Parece que es necesario que se de un arranque al controlador\n # **************** para que aprenda las reglas antes del ataque.\n\n \"\"\" 5. Aplicacion de pruebas \"\"\"\n exp1.trafico.pingMeasure()\n #exp1.trafico.pingMeasure(filename='ping_ataque_test.log')\n \"\"\" 6. Fin del experimento \"\"\"\n exp1.endTest()\n info(\"Removiendo la topologia\\n\")\n exp1.killTest()\n info(\"Removiendo el controlador\\n\")\n exp1.killController() # Si no se pone no se finaliza el controlador", "def main():\n lake_drivers = Dynamic_Lake_Drivers()\n #lake_drivers.prepare_orography_ICE5G_0k_uncorrected()\n #lake_drivers.prepare_orography_ICE5G_0k_corrected()\n #lake_drivers.prepare_orography_ICE6G_21k_corrected()\n #lake_drivers.prepare_river_directions_with_depressions_from_glac1D()\n #lake_drivers.evaluate_glac1D_ts1900_basins()\n #import time\n # start = time.time()\n #lake_drivers.evaluate_ICE6G_lgm_basins()\n # end = time.time()\n # print(end - start)\n #lake_drivers.prepare_basins_from_glac1D()\n #lake_drivers.extract_lake_volumes_from_glac1D_basins()\n #lake_drivers.connect_catchments_for_glac1D()\n lake_drivers.connect_catchments_for_transient_run()\n #lake_drivers.extract_volumes_for_transient_run()\n #lake_drivers.add_10min_rmouth_to_transient_data()\n #lake_drivers.expand_transient_data_catchments_to_include_rmouth()\n #lake_drivers.remove_no_data_values_from_upscaled_MERIT_correction_set()\n #lake_drivers.remove_disconnected_points_from_slm()", "def test():", "def test():", "def test4():\n setLogLevel(\"info\")\n info(\"Configuracion Unidad experimental\")\n \"\"\" 1 -> Definicion de la topologia \"\"\"\n t1 = Topologia1()\n ue1 = UnidadExperimental()\n ue1.setTopo(t1)\n ue1.definirNodosClaves(C='h2', V='h3') # Caso solo para trafico normal\n ue1.setController('ryu', 'simple_switch_13.py,ofctl_rest.py')\n info(\"Configuracion del experimento\")\n \"\"\" 3. Confiracion del experimento \"\"\"\n exp1 = Experimento()\n exp1.configureParams(ue1)\n exp1.configurarTrafico('normal')\n \"\"\" 4. Inicio del experimento \"\"\"\n exp1.startTest()\n \"\"\" 5. Aplicacion de pruebas \"\"\"\n exp1.trafico.iperfMeasure()\n exp1.trafico.iperfMeasure(filename='iperf_normal_test.log')\n \"\"\" 6. Fin del experimento \"\"\"\n exp1.endTest()\n info(\"Removiendo la topologia\\n\")\n exp1.killTest()\n info(\"Removiendo el controlador\\n\")\n exp1.killController() # Si no se pone no se finaliza el controlador", "def test_workbench_scenarios(self):\n result_title = 'Adaptive Numeric Input XBlock'\n basic_scenario = \"<adaptivenumericinput />\"\n test_result = self.xblock.workbench_scenarios()\n self.assertEquals(result_title, test_result[0][0])\n self.assertIn(basic_scenario, test_result[0][1])", "def test_uparforvarg(self):", "def __main() :\n launchTests()", "def test_check_sun_above_horizon():\n pass", "def test(self):\n for arch, python in self.python:\n self.run(f\"{python} -m pytest\")", "def test_visualize_equipment(self):\n pass", "async def test_thermostat_raise_repair_issue_and_warning_when_setting_fan_preset(\n hass: HomeAssistant,\n client,\n climate_airzone_aidoo_control_hvac_unit,\n integration,\n caplog: pytest.LogCaptureFixture,\n) -> None:\n client.async_send_command.return_value = {\"result\": {\"status\": 1}}\n state = hass.states.get(CLIMATE_AIDOO_HVAC_UNIT_ENTITY)\n assert state\n\n await hass.services.async_call(\n CLIMATE_DOMAIN,\n SERVICE_SET_PRESET_MODE,\n {\n ATTR_ENTITY_ID: CLIMATE_AIDOO_HVAC_UNIT_ENTITY,\n ATTR_PRESET_MODE: \"Fan\",\n },\n blocking=True,\n )\n\n issue_id = f\"dry_fan_presets_deprecation_{CLIMATE_AIDOO_HVAC_UNIT_ENTITY}\"\n issue_registry = ir.async_get(hass)\n\n assert issue_registry.async_get_issue(\n domain=DOMAIN,\n issue_id=issue_id,\n )\n assert (\n \"Dry and Fan preset modes are deprecated and will be removed in Home Assistant 2024.2. Please use the corresponding Dry and Fan HVAC modes instead\"\n in caplog.text\n )", "def test_shd_should_not_crash_executed_heal_info(self):\n # pylint: disable=too-many-statements\n bricks_list = get_all_bricks(self.mnode, self.volname)\n # Setting options\n g.log.info('Setting options...')\n options = {\"metadata-self-heal\": \"off\",\n \"entry-self-heal\": \"off\",\n \"data-self-heal\": \"off\"}\n ret = set_volume_options(self.mnode, self.volname, options)\n self.assertTrue(ret, 'Failed to set options %s' % options)\n g.log.info(\"Successfully set %s for volume %s\",\n options, self.volname)\n\n # Creating files on client side\n for mount_obj in self.mounts:\n g.log.info(\"Generating data for %s:%s\",\n mount_obj.client_system, mount_obj.mountpoint)\n # Create files\n g.log.info('Creating files...')\n command = (\"python %s create_files -f 10 --fixed-file-size 1M %s\"\n % (self.script_upload_path, mount_obj.mountpoint))\n\n proc = g.run_async(mount_obj.client_system, command,\n user=mount_obj.user)\n self.all_mounts_procs.append(proc)\n self.io_validation_complete = False\n\n # Validate IO\n self.assertTrue(\n validate_io_procs(self.all_mounts_procs, self.mounts),\n \"IO failed on some of the clients\"\n )\n self.io_validation_complete = True\n\n # Bring brick0 offline\n g.log.info('Bringing bricks %s offline', bricks_list[0])\n ret = bring_bricks_offline(self.volname, bricks_list[0])\n self.assertTrue(ret, 'Failed to bring bricks %s offline'\n % bricks_list[0])\n\n ret = are_bricks_offline(self.mnode, self.volname,\n [bricks_list[0]])\n self.assertTrue(ret, 'Bricks %s are not offline'\n % bricks_list[0])\n g.log.info('Bringing bricks %s offline is successful',\n bricks_list[0])\n\n # Creating files on client side\n number_of_files_one_brick_off = '1000'\n self.all_mounts_procs = []\n for mount_obj in self.mounts:\n g.log.info(\"Generating data for %s:%s\",\n mount_obj.client_system, mount_obj.mountpoint)\n # Create files\n g.log.info('Creating files...')\n command = (\"python %s create_files \"\n \"-f %s \"\n \"--fixed-file-size 1k \"\n \"--base-file-name new_file \"\n \"%s\"\n % (self.script_upload_path,\n number_of_files_one_brick_off,\n mount_obj.mountpoint))\n\n proc = g.run_async(mount_obj.client_system, command,\n user=mount_obj.user)\n self.all_mounts_procs.append(proc)\n self.io_validation_complete = False\n\n # Validate IO\n self.assertTrue(\n validate_io_procs(self.all_mounts_procs, self.mounts),\n \"IO failed on some of the clients\"\n )\n self.io_validation_complete = True\n\n # Get heal info\n g.log.info(\"Getting heal info...\")\n heal_info_data = get_heal_info_summary(self.mnode, self.volname)\n self.assertIsNotNone(heal_info_data, 'Failed to get heal info.')\n g.log.info('Success in getting heal info')\n\n # Check quantity of file pending heal\n for brick in bricks_list[1:]:\n self.assertEqual(heal_info_data[brick]['numberOfEntries'],\n str(int(number_of_files_one_brick_off)+1),\n 'Number of files pending heal is not correct')\n\n # Setting options\n g.log.info('Setting options...')\n options = {\"performance.enable-least-priority\": \"enable\"}\n ret = set_volume_options(self.mnode, self.volname, options)\n self.assertTrue(ret, 'Failed to set options %s' % options)\n g.log.info(\"Successfully set %s for volume %s\",\n options, self.volname)\n\n # Bring brick1 offline\n g.log.info('Bringing bricks %s offline', bricks_list[1])\n ret = bring_bricks_offline(self.volname, bricks_list[1])\n self.assertTrue(ret, 'Failed to bring bricks %s offline'\n % bricks_list[1])\n\n ret = are_bricks_offline(self.mnode, self.volname,\n [bricks_list[1]])\n self.assertTrue(ret, 'Bricks %s are not offline'\n % bricks_list[1])\n g.log.info('Bringing bricks %s offline is successful',\n bricks_list[1])\n\n # Setting options\n g.log.info('Setting options...')\n options = {\"quorum-type\": \"fixed\"}\n ret = set_volume_options(self.mnode, self.volname, options)\n self.assertTrue(ret, 'Failed to set options %s' % options)\n g.log.info(\"Successfully set %s for volume %s\",\n options, self.volname)\n\n # Creating files on client side\n number_of_files_two_brick_off = '100'\n self.all_mounts_procs = []\n for mount_obj in self.mounts:\n g.log.info(\"Generating data for %s:%s\",\n mount_obj.client_system, mount_obj.mountpoint)\n # Create files\n g.log.info('Creating files...')\n command = (\"python %s create_files \"\n \"-f %s \"\n \"--fixed-file-size 1k \"\n \"--base-file-name new_new_file \"\n \"%s\"\n % (self.script_upload_path,\n number_of_files_two_brick_off,\n mount_obj.mountpoint))\n\n proc = g.run_async(mount_obj.client_system, command,\n user=mount_obj.user)\n self.all_mounts_procs.append(proc)\n self.io_validation_complete = False\n\n # Validate IO\n self.assertTrue(\n validate_io_procs(self.all_mounts_procs, self.mounts),\n \"IO failed on some of the clients\"\n )\n self.io_validation_complete = True\n\n # Get heal info\n g.log.info(\"Getting heal info...\")\n heal_info_data = get_heal_info_summary(self.mnode, self.volname)\n self.assertIsNotNone(heal_info_data, 'Failed to get heal info.')\n g.log.info('Success in getting heal info')\n\n # Check quantity of file pending heal\n number_of_files_to_check = str(int(number_of_files_one_brick_off) +\n int(number_of_files_two_brick_off) + 1)\n self.assertEqual(heal_info_data[bricks_list[-1]]['numberOfEntries'],\n number_of_files_to_check,\n 'Number of files pending heal is not correct')", "def test_runGame(self):\n # this is tested by playing the game. No good way to unit test this.\n pass", "def selfTest():\n try:\n loop = asyncio.get_event_loop()\n r34 = Rule34(loop)\n data = loop.run_until_complete(r34.getImageURLS(\"gay\", singlePage=True))\n if data is not None and len(data) != 0:\n print(\"self test passed\")\n exit(0)\n else:\n raise SelfTest_Failed(\"Automated self test failed to gather images\")\n except Exception as e:\n raise SelfTest_Failed(\"Automated self test failed with this error:\\n{}\".format(e))", "def test_different_workflows_and_cross_check_the_results(self):\n # Testcase 1. End-to-end report generation using coverage.py script. This is\n # the workflow of a regular user.\n cmd = [\n self.COVERAGE_SCRIPT,\n 'crypto_unittests',\n 'libpng_read_fuzzer',\n '-v',\n '-b',\n self.BUILD_DIR,\n '-o',\n self.REPORT_DIR_1,\n '-c'\n '%s/crypto_unittests' % self.BUILD_DIR,\n '-c',\n '%s/libpng_read_fuzzer -runs=0 third_party/libpng/' % self.BUILD_DIR,\n ]\n self.run_cmd(cmd)\n\n output_dir = os.path.join(self.REPORT_DIR_1, self.PLATFORM)\n self.verify_component_view(\n os.path.join(output_dir, 'component_view_index.html'))\n self.verify_directory_view(\n os.path.join(output_dir, 'directory_view_index.html'))\n self.verify_file_view(os.path.join(output_dir, 'file_view_index.html'))\n\n # Also try generating a report without components view. Useful for cross\n # checking with the report produced in the testcase #3.\n cmd = [\n self.COVERAGE_SCRIPT,\n 'crypto_unittests',\n 'libpng_read_fuzzer',\n '-v',\n '-b',\n self.BUILD_DIR,\n '-o',\n self.REPORT_DIR_1_NO_COMPONENTS,\n '-c'\n '%s/crypto_unittests' % self.BUILD_DIR,\n '-c',\n '%s/libpng_read_fuzzer -runs=0 third_party/libpng/' % self.BUILD_DIR,\n '--no-component-view',\n ]\n self.run_cmd(cmd)\n\n output_dir = os.path.join(self.REPORT_DIR_1_NO_COMPONENTS, self.PLATFORM)\n self.verify_directory_view(\n os.path.join(output_dir, 'directory_view_index.html'))\n self.verify_file_view(os.path.join(output_dir, 'file_view_index.html'))\n self.assertFalse(\n os.path.exists(os.path.join(output_dir, 'component_view_index.html')))\n\n # Testcase #2. Run the script for post processing in Chromium tree. This is\n # the workflow of the code coverage bots.\n instr_profile_path = os.path.join(self.REPORT_DIR_1, self.PLATFORM,\n 'coverage.profdata')\n\n cmd = [\n self.COVERAGE_SCRIPT,\n 'crypto_unittests',\n 'libpng_read_fuzzer',\n '-v',\n '-b',\n self.BUILD_DIR,\n '-p',\n instr_profile_path,\n '-o',\n self.REPORT_DIR_2,\n ]\n self.run_cmd(cmd)\n\n # Verify that the output dirs are the same except of the expected diff.\n report_1_listing = set(_RecursiveDirectoryListing(self.REPORT_DIR_1))\n report_2_listing = set(_RecursiveDirectoryListing(self.REPORT_DIR_2))\n logs_subdir = os.path.join(self.PLATFORM, 'logs')\n self.assertEqual(\n set([\n os.path.join(self.PLATFORM, 'coverage.profdata'),\n os.path.join(logs_subdir, 'crypto_unittests_output.log'),\n os.path.join(logs_subdir, 'libpng_read_fuzzer_output.log'),\n ]), report_1_listing - report_2_listing)\n\n output_dir = os.path.join(self.REPORT_DIR_2, self.PLATFORM)\n self.verify_component_view(\n os.path.join(output_dir, 'component_view_index.html'))\n self.verify_directory_view(\n os.path.join(output_dir, 'directory_view_index.html'))\n self.verify_file_view(os.path.join(output_dir, 'file_view_index.html'))\n\n # Verify that the file view pages are binary equal.\n report_1_file_view_data = _ReadFile(\n os.path.join(self.REPORT_DIR_1, self.PLATFORM, 'file_view_index.html'))\n report_2_file_view_data = _ReadFile(\n os.path.join(self.REPORT_DIR_2, self.PLATFORM, 'file_view_index.html'))\n self.assertEqual(report_1_file_view_data, report_2_file_view_data)\n\n # Testcase #3, run coverage_utils.py on manually produced report and summary\n # file. This is the workflow of OSS-Fuzz code coverage job.\n objects = [\n '-object=%s' % os.path.join(self.BUILD_DIR, 'crypto_unittests'),\n '-object=%s' % os.path.join(self.BUILD_DIR, 'libpng_read_fuzzer'),\n ]\n\n cmd = [\n self.PYTHON,\n self.COVERAGE_UTILS,\n '-v',\n 'shared_libs',\n '-build-dir=%s' % self.BUILD_DIR,\n ] + objects\n\n shared_libraries = self.run_cmd(cmd)\n objects.extend(shared_libraries.split())\n\n instr_profile_path = os.path.join(self.REPORT_DIR_1_NO_COMPONENTS,\n self.PLATFORM, 'coverage.profdata')\n cmd = [\n self.LLVM_COV,\n 'show',\n '-format=html',\n '-output-dir=%s' % self.REPORT_DIR_3,\n '-instr-profile=%s' % instr_profile_path,\n ] + objects\n if self.PLATFORM in ['linux', 'mac']:\n cmd.extend(['-Xdemangler', 'c++filt', '-Xdemangler', '-n'])\n self.run_cmd(cmd)\n\n cmd = [\n self.LLVM_COV,\n 'export',\n '-summary-only',\n '-instr-profile=%s' % instr_profile_path,\n ] + objects\n summary_output = self.run_cmd(cmd)\n\n summary_path = os.path.join(self.REPORT_DIR_3, 'summary.json')\n with open(summary_path, 'wb') as f:\n f.write(summary_output)\n\n cmd = [\n self.PYTHON,\n self.COVERAGE_UTILS,\n '-v',\n 'post_process',\n '-src-root-dir=%s' % self.CHROMIUM_SRC_DIR,\n '-summary-file=%s' % summary_path,\n '-output-dir=%s' % self.REPORT_DIR_3,\n ]\n self.run_cmd(cmd)\n\n output_dir = os.path.join(self.REPORT_DIR_3, self.PLATFORM)\n self.verify_directory_view(\n os.path.join(output_dir, 'directory_view_index.html'))\n self.verify_file_view(os.path.join(output_dir, 'file_view_index.html'))\n self.assertFalse(\n os.path.exists(os.path.join(output_dir, 'component_view_index.html')))\n\n # Verify that the file view pages are binary equal.\n report_1_file_view_data_no_component = _ReadFile(\n os.path.join(self.REPORT_DIR_1_NO_COMPONENTS, self.PLATFORM,\n 'file_view_index.html'))\n report_3_file_view_data = _ReadFile(\n os.path.join(self.REPORT_DIR_3, self.PLATFORM, 'file_view_index.html'))\n self.assertEqual(report_1_file_view_data_no_component,\n report_3_file_view_data)\n\n # Testcase 4. Export coverage data in lcov format using coverage.py script.\n cmd = [\n self.COVERAGE_SCRIPT,\n 'crypto_unittests',\n 'libpng_read_fuzzer',\n '--format',\n 'lcov',\n '-v',\n '-b',\n self.BUILD_DIR,\n '-o',\n self.REPORT_DIR_4,\n '-c'\n '%s/crypto_unittests' % self.BUILD_DIR,\n '-c',\n '%s/libpng_read_fuzzer -runs=0 third_party/libpng/' % self.BUILD_DIR,\n ]\n self.run_cmd(cmd)\n\n output_dir = os.path.join(self.REPORT_DIR_4, self.PLATFORM)\n self.verify_lcov_file(os.path.join(output_dir, 'coverage.lcov'))" ]
[ "0.6602023", "0.65421003", "0.62224287", "0.62081254", "0.6206044", "0.5991344", "0.5950729", "0.59440666", "0.5933885", "0.5919975", "0.5912514", "0.5885823", "0.5882865", "0.5820897", "0.581793", "0.5800086", "0.5794324", "0.57891035", "0.578905", "0.57890207", "0.57865036", "0.5782528", "0.57785404", "0.57627034", "0.57579255", "0.5742589", "0.5727967", "0.5727967", "0.5727036", "0.57056844", "0.5697101", "0.5697101", "0.5697101", "0.56942266", "0.5693399", "0.56930214", "0.5692873", "0.56881946", "0.568669", "0.5679702", "0.5675384", "0.56597835", "0.56449294", "0.5642378", "0.5639108", "0.5637868", "0.5636497", "0.5636497", "0.5636497", "0.5636497", "0.5636497", "0.5624262", "0.5622288", "0.562205", "0.56169266", "0.56169176", "0.56150204", "0.5604626", "0.56028414", "0.56028414", "0.5598135", "0.55970883", "0.5595181", "0.55925524", "0.55877966", "0.5582788", "0.5574614", "0.55649877", "0.55567485", "0.5551307", "0.55462044", "0.5545987", "0.5545064", "0.5541892", "0.55416375", "0.5538375", "0.553469", "0.553072", "0.55252886", "0.5518188", "0.551229", "0.55111367", "0.5505523", "0.55027664", "0.55021375", "0.54973775", "0.5495872", "0.5495872", "0.5493625", "0.5492698", "0.54924023", "0.54890615", "0.54884285", "0.548598", "0.5484674", "0.5483309", "0.54792285", "0.5478427", "0.5472585", "0.5450437" ]
0.5492133
91
decenter off Will give cmd.error and stage=failed, but won't fail command.
def test_deactivate_guider_decenter_fails(self): sopTester.updateModel('guider', TestHelper.guiderState['guiderOnDecenter']) self.cmd.failOn = 'guider decenter off' self._deactivate_guider_decenter(1, 9, 0, 2, didFail=True)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def halt_cmd(ctx):\n pass", "def command_failed_error(cmd):\n\n output_1 = colored(' - Error: Failed to run command ', 'red')\n output_2 = command(cmd)\n return output_1 + output_2 + '\\n'", "def assert_cli_fails_properly(response, caplog):\n # don't exit successfully\n assert response.exit_code != 0\n\n # end the logs with a record on an error\n assert caplog.records[-1].levelname == \"ERROR\"", "def fail(msg):\n\n # Not sure if simply raising the exception is clearer.\n raise CommandFailed(msg)", "def fail():\n sys.stdout.write('%s[ fail ]%s\\n' % (colors.RED, colors.RESET))", "def test_run_cmd_simple_negative(caplog):\n caplog.set_level(logging.DEBUG)\n cmd = \"ls /tmp/this/file/isindeednotthereatall\"\n with pytest.raises(CommandFailed) as excinfo:\n utils.run_cmd(cmd)\n assert \"No such file or directory\" in str(excinfo.value)\n # check that run_cmd logged the run as expected\n assert caplog.records[0].levelname == 'INFO'\n assert caplog.records[0].message == f'Executing command: {cmd}'\n assert caplog.records[1].levelname == 'DEBUG'\n assert caplog.records[1].message == 'Command stdout is empty'\n assert caplog.records[2].levelname == 'WARNING'\n assert caplog.records[2].message.startswith(\"Command stderr: ls:\")\n assert \"No such file or directory\" in caplog.records[2].message\n assert caplog.records[3].levelname == 'DEBUG'\n assert caplog.records[3].message == 'Command return code: 2'", "def state_failsafe_exit(cfg, app, win):", "def test_stratis_bad_subcommand(self):\n for command_line in [\n [\"notasub\"],\n [\"daemon\", \"notasub\"],\n [\"pool\", \"notasub\"],\n [\"blockdev\", \"notasub\"],\n [\"filesystem\", \"notasub\"],\n ]:\n for prefix in [[], [\"--propagate\"]]:\n self.check_system_exit(prefix + command_line, _PARSE_ERROR)", "def handle_unsuccessful_cmd(out, error_template, missing_resource_template):\n if MISSING_RESOURCE in out.lower() or UNRECOGNIZED_RESOURCE in out.lower():\n logger.info(missing_resource_template)\n else:\n logger.warning(error_template.format(out.rstrip()))", "def error_check(command):\r\n\r\n # TODO\r", "def error_throw(self,stage):\n if self.is_table_info == False:\n print(\"please enter table info by table_info()\")\n sys.exit(0)\n if stage == 'rank':\n if self.import_method == 'none':\n self.error_output_import()\n elif stage == 'output':\n if self.import_method == 'none':\n self.error_output_import()\n else: \n if self.rank_method == 'none':\n self.error_output_rank()", "def office_clean_failed(parser, args, params):\n parser.parse_known_args(args)\n control.clean_failed(params)", "def shell_fail_server(self, cmd):\n self.shell_cmd = cmd\n raise ConnectionResetError", "def test_run_cmd_simple_negative_ignoreerror(caplog):\n caplog.set_level(logging.DEBUG)\n cmd = \"ls /tmp/this/file/isindeednotthereatall\"\n assert utils.run_cmd(cmd, ignore_error=True) == \"\"\n # check that run_cmd logged the run as expected\n assert caplog.records[0].levelname == 'INFO'\n assert caplog.records[0].message == f'Executing command: {cmd}'\n assert caplog.records[1].levelname == 'DEBUG'\n assert caplog.records[1].message == 'Command stdout is empty'\n assert caplog.records[2].levelname == 'WARNING'\n assert caplog.records[2].message.startswith(\"Command stderr: ls:\")\n assert \"No such file or directory\" in caplog.records[2].message\n assert caplog.records[3].levelname == 'DEBUG'\n assert caplog.records[3].message == 'Command return code: 2'", "def test_hide_failed():\n cmd_list = [NETMIKO_GREP] + ['--hide-failed', 'interface', 'all']\n (output, std_err) = subprocess_handler(cmd_list)\n assert \"Failed devices\" not in output", "def rollback(self, stage, enodes, exception):", "def procFail(proc):\n\tif 'a' in proc.config._notify.when['pipeline']:\n\t\tlogger.debug('Notifying process fails')\n\t\tEMAIL.send('proc', proc, 'abort')", "def delcomperrbypassed(self) :\n\t\ttry :\n\t\t\treturn self._delcomperrbypassed\n\t\texcept Exception as e:\n\t\t\traise e", "def fail_local_operation(operation, node, environment):\n run_operation(operation, node, environment, succeed=False)", "def run_cmd(cmd, verbose=1, target=None):\n cmd = \"set -u pipefail; \" + cmd\n if verbose == 2:\n sys.stderr.write(\"\\nRunning command:\\n%s\\n\" % cmd)\n stdout = open(\"/dev/stdout\", \"w\")\n stderr = open(\"/dev/stderr\", \"w\")\n elif verbose == 1:\n sys.stderr.write(\"\\nRunning command:\\n%s\\n\" % cmd)\n stdout = open(\"/dev/null\", \"w\")\n stderr = open(\"/dev/null\", \"w\")\n else:\n stdout = open(\"/dev/null\", \"w\")\n stderr = open(\"/dev/null\", \"w\")\n\n res = subprocess.call(cmd, shell=True, stderr=stderr, stdout=stdout)\n stderr.close()\n if res != 0:\n print(\"Command Failed! Please Check!\")\n exit(1)", "def test_cmd2_argparse_exception(capsys):\n app = PluggedApp()\n app.register_postcmd_hook(app.postcmd_hook)\n app.register_cmdfinalization_hook(app.cmdfinalization_hook)\n\n # First generate no exception and make sure postcmd_hook, postcmd, and cmdfinalization_hook run\n app.onecmd_plus_hooks('argparse_cmd arg_val')\n out, err = capsys.readouterr()\n assert out == 'arg_val\\n'\n assert not err\n assert app.called_postcmd == 2\n assert app.called_cmdfinalization == 1\n\n app.reset_counters()\n\n # Next cause an argparse exception and verify no postcmd stuff runs but cmdfinalization_hook still does\n app.onecmd_plus_hooks('argparse_cmd')\n out, err = capsys.readouterr()\n assert not out\n assert \"Error: the following arguments are required: my_arg\" in err\n assert app.called_postcmd == 0\n assert app.called_cmdfinalization == 1", "def test_option_unhandled(self):\n cmd, output = runCmdOutput(['--__unhandled__'])\n self.assertEqual(cmd.returncode, os.EX_USAGE)", "def failure_cmd(self) -> str:\n return \"{} --enable=all -f -q {}\".format(\n self.conf.get_executable(), constants.ROOT_PATH + \"/data/cppcheck-152/trial-fail.cpp\"\n )", "def run_check_errors(cmd):\n if type(cmd) == str:\n cmd = cmd.split()\n output = subprocess.run(cmd, capture_output=True, text=True)\n if output.stderr != \"\":\n print_cmd = \" \".join(map(str, cmd))\n sys.exit(\n f\"The error {output.stderr} was generated when running {print_cmd}. Exiting.\"\n )\n return", "def _cmd_exit(self):\n raise EOFError()", "def stop_err(msg):\n sys.stderr.write('%s\\n' % msg)\n sys.exit(-1)", "def test_state_after_failure(self):\n pass", "def cmd_error_check(self, cmd_out):\n for err in self.err_strings:\n if re.search('\\\\b%s\\\\b' % (err), cmd_out, re.I):\n _log.info(cmd_out)\n _log.info(\n \"Cmd execution failed! with this Return Error: \\n%s\" % (\n cmd_out))\n return 0", "def testFailure():\n run(\"chariot-me\") #Start management-engine without initial deplflag\n egress()", "def assert_cmd_fail(self, cmds):\n print('@ %s [supposed to fail]' % cmds)\n try:\n self.cc.batch_command(cmds)\n except CommandFailed:\n pass\n else:\n msg = 'Command %r did not fail.' % cmds\n raise Exception(msg)", "def test_failed():\n build()\n sh(\"%s %s --last-failed\" % (PYTHON, RUNNER_PY))", "def test_shell_bad_command():\n out, err = shell_command(\"ls adasdasdas\")\n assert out is None\n assert \"adasdasdas\" in err", "def halt_cmd(cmd, cnt, args):\n log(\"halt\") # need an interrupt handler to do this\n cpu.halt()", "def evaluateCommand(self, cmd):\n if cmd.rc != 0:\n return FAILURE\n if self.errors:\n\t if halt_on_lintian_error:\n\t return FAILURE\n\t else:\n\t\treturn WARNINGS\n if self.warnings:\n return WARNINGS\n return SUCCESS", "def _ssh_quiet(self, cmd, allow_fail=False):\n try:\n if self.logger is not None:\n self.logger.debug(\"Trying to run remote command: \" + cmd)\n subprocess.check_output(cmd, shell=True, stderr=subprocess.STDOUT)\n return 0\n except subprocess.CalledProcessError as e:\n # Depending on the user, failure of remote commands may be part of normal usage patterns (e.g. if used in\n # a \"wait_until\" loop). So, log it, but don't make it too scary.\n if self.logger is not None:\n self.logger.debug(\"Error running remote command: \" + cmd)\n self.logger.debug(e.output)\n\n if allow_fail:\n return e.returncode\n raise e", "def stderr_lbs():\n\n # Make a (deep) copy\n cmd_orig = list(kivaloo.servers.Server_lbs.cmd)\n\n # Deliberately introduce an error\n kivaloo.servers.Server_lbs.cmd.append(\"--badarg\")\n\n try:\n lbs = kivaloo.servers.Server_lbs()\n lbs.shutdown()\n raise Exception(\"Server should not have started!\")\n except kivaloo.servers.StartError as err:\n # We wanted this exception. Now we check stderr.\n lines = str(err).splitlines()[3]\n assert \"lbs: illegal option -- --badarg\" in lines\n\n # Restore the original\n kivaloo.servers.Server_lbs.cmd = cmd_orig", "def reset_stage():\n return set_stage('')", "def evaluateCommand(self, cmd):\n\n if cmd.rc == 127:\n return WARNINGS\n elif cmd.rc != 0:\n if halt_on_unittest_error:\n return FAILURE\n else:\n return WARNINGS\n\n if self.warnings or self.errors:\n return WARNINGS\n return SUCCESS", "def teardown():\n enstools.core.set_behavior(log_level=\"ERROR\")", "def _fail(self, msg, err=None):\n if self.session:\n self.session.cleanup()\n\n if err:\n self.module.fail_json(msg=msg + \"\\n\" + str(err), **self.result)\n else:\n self.module.fail_json(msg=msg, **self.result)", "def error_exit(self, msg):\n wrappedmsg = textwrap.fill(msg, 78)\n fullmsg = \"%s\\n%s\" % (wrappedmsg, self.get_usage_command())\n raise SBToolError(fullmsg, True)", "def run_and_handle_error(cmd, print_cmd=True):\n stdout, stderr = run_subprocess_cmd(cmd, print_cmd=print_cmd, print_stdout_stderr=False)\n if stderr:\n import sys; sys.exit('Standard Errors:\\n%s\\n' % stderr)\n return stdout, stderr", "def _checkCommandStatus(self, lastCommand=False):\n p = self.spawnProc\n p.sendline('echo $?')\n regex = re.compile('^[0-9]+',re.M)\n p.expect(regex, 2)\n msg = '_checkCommandStatus : Execution of command FAILED'\n \tif lastCommand:\n \t msg = '_checkCommandStatus :Execution of command : \"%s\" FAILED' %lastCommand\n if p.after != '0' and p.after != '99':\n raise AssertionError(msg)", "def move_invalid():\n check50.run(run_command).stdin(\"EAST\").stdout(\"Invalid command.\")", "def run_cmd_wait_nofail(*args, **kwargs):\n try:\n return run_cmd_wait(*args, **kwargs)\n except CommandError as e:\n return e.code, e.out, e.err", "def tear_down(self):\n verdict, msg = TestStepEngine.tear_down(self)\n self._device.inject_device_log(\"i\", \"ACS_TESTCASE\", \"TEARDOWN: %s\" % self._name)\n return verdict, msg", "def _handle_failure(self, proc, test_case):\n if proc.returncode != 0:\n print('ERROR: Test execution failed: {}'.format(test_case.get_name()))\n stdout, stderr = proc.communicate()\n raise TestCaseFailure('Test case {} failed. stdout: {}, stderr: {}, '\n 'return code: {}.'.format(test_case.get_name(),\n stdout, stderr,\n proc.returncode))", "def test_pipeline_downgrade(sdc_executor):\n builder = sdc_executor.get_pipeline_builder()\n\n generator = builder.add_stage(label='Dev Data Generator')\n trash = builder.add_stage(label='Trash')\n generator >> trash\n pipeline = builder.build()\n # We manually alter the pipeline version to some really high number\n # TLKT-561: PipelineInfo doesn't seem to be exposed in the APIs\n pipeline._data['pipelineConfig']['info']['sdcVersion'] = '99.99.99'\n\n sdc_executor.add_pipeline(pipeline)\n\n with pytest.raises(Exception) as e:\n sdc_executor.validate_pipeline(pipeline)\n\n assert 'VALIDATION_0096' in e.value.issues", "def _cancelCommands(self, reason):\n while self._current:\n cmd = self._current.pop(0)\n cmd.fail(reason)", "def stop_err(msg, error_level=1):\n sys.stderr.write(\"%s\\n\" % msg)\n sys.exit(error_level)", "def run_or_die(command):\n (status, stdio) = commands.getstatusoutput(command)\n if status != 0:\n raise Exception(\"command '%s' failed with exit status %d and output '%s'\" % (command, status, stdio))\n return stdio", "def test_cmd_error(self):\n task = Task(\"uid\", False, False, \"does_not_exist\", None, \".\")\n task._checkpoint_dir = tmp_checkpoint_dir()\n with self.assertRaisesRegexp(RuntimeError, \".*executing Task's command:.*\"):\n task.run()\n task.shell = True\n with self.assertRaisesRegexp(RuntimeError, \".*executing Task's command:.*\"):\n task.run()\n task._dry_run = True\n task.run() # No longer raises RuntimeError", "def test_stratis_no_subcommand(self):\n for command_line in [[], [\"daemon\"]]:\n for prefix in [[], [\"--propagate\"]]:\n self.check_system_exit(prefix + command_line, _PARSE_ERROR)", "def __handle_except(inst):\n return __standardize_result(\n False,\n \"Docker-compose command {} failed\".format(inspect.stack()[1][3]),\n \"{}\".format(inst),\n None,\n )", "def test_halt_status_supersedes_completed(get_pipe_manager, raise_error, test_type):\n\n # Create manager and completion flag.\n pm = get_pipe_manager(name=\"halt-status-flag\")\n pm._set_status_flag(COMPLETE_FLAG)\n path_complete_flag = pm._flag_file_path(COMPLETE_FLAG)\n assert os.path.isfile(path_complete_flag)\n\n # Perform the halt.\n try:\n pm.halt(raise_error=raise_error)\n except PipelineHalt:\n # We don't care about exceptionality here, just that the flag files\n # are adjusted regardless of the halt type.\n pass\n\n # Check either the presence of the halt flag or the absence of the\n # completion flag, depending on test parameterization.\n if test_type == \"halt_flag\":\n path_halt_flag = pm._flag_file_path(PAUSE_FLAG)\n assert os.path.isfile(path_halt_flag)\n elif test_type == \"complete_flag\":\n assert not os.path.isfile(path_complete_flag)\n else:\n raise ValueError(\"Unknown test type: '{}'\".format(test_type))", "def _fake_execute(self, *cmd, **kwargs):\n cmdlist = list(cmd)\n exe = cmdlist.pop(0)\n if exe == 'vgc-cluster':\n exe = cmdlist.pop(0)\n if exe == \"request-cancel\":\n self._request_cancel = True\n if self._return_blocked > 0:\n return 'Request cancelled', ''\n else:\n raise processutils.ProcessExecutionError(exit_code=1)\n elif self._fail_vgc_cluster:\n raise processutils.ProcessExecutionError(exit_code=1)\n elif exe == \"--version\":\n return \"HGST Solutions V2.5.0.0.x.x.x.x.x\", ''\n elif exe == \"space-list\":\n return self._parse_space_list(cmdlist)\n elif exe == \"space-create\":\n self._parse_space_create(cmdlist)\n if self._return_blocked > 0:\n self._return_blocked = self._return_blocked - 1\n out = \"VGC_CREATE_000002\\nBLOCKED\\n\"\n raise processutils.ProcessExecutionError(stdout=out,\n exit_code=1)\n return '', ''\n elif exe == \"space-delete\":\n return self._parse_space_delete(cmdlist)\n elif exe == \"space-extend\":\n return self._parse_space_extend(cmdlist)\n elif exe == \"host-storage\":\n if self._fail_host_storage:\n raise processutils.ProcessExecutionError(exit_code=1)\n return HGST_HOST_STORAGE, ''\n elif exe == \"domain-list\":\n return self._parse_domain_list()\n elif exe == \"network-list\":\n return self._parse_network_list()\n elif exe == \"space-set-apphosts\":\n if self._fail_set_apphosts:\n raise processutils.ProcessExecutionError(exit_code=1)\n return '', ''\n else:\n raise NotImplementedError\n elif exe == 'ip':\n if self._fail_ip:\n raise processutils.ProcessExecutionError(exit_code=1)\n else:\n return IP_OUTPUT, ''\n elif exe == 'dd':\n self.dd_count = -1\n for p in cmdlist:\n if 'count=' in p:\n self.dd_count = int(p[6:])\n elif 'bs=' in p:\n self.bs = p[3:]\n return DD_OUTPUT, ''\n else:\n return '', ''", "async def on_command_error(ctx, error):\n await send_block(\n ctx,\n \"\".join(\n traceback.format_exception(\n etype=type(error), value=error, tb=error.__traceback__\n )\n ),\n )", "def test_finished_no_vm(self):\n self.command.finished()", "def cleanup_on_failure(self) -> pulumi.Input[bool]:\n return pulumi.get(self, \"cleanup_on_failure\")", "def _failed(self, msg):\n self.log(msg)\n self.result.passed = False\n self.result.add_error(msg)\n self.log(u\"Failed\")", "def test_command(self):\r\n with self.assertRaises(SystemExit) as ex:\r\n with self.assertRaisesRegexp(CommandError, 'This script requires.*'):\r\n call_command('git_export', 'blah', 'blah', 'blah',\r\n stderr=StringIO.StringIO())\r\n self.assertEqual(ex.exception.code, 1)\r\n\r\n with self.assertRaises(SystemExit) as ex:\r\n with self.assertRaisesRegexp(CommandError, 'This script requires.*'):\r\n call_command('git_export', stderr=StringIO.StringIO())\r\n self.assertEqual(ex.exception.code, 1)\r\n\r\n # Send bad url to get course not exported\r\n with self.assertRaises(SystemExit) as ex:\r\n with self.assertRaisesRegexp(CommandError, GitExportError.URL_BAD):\r\n call_command('git_export', 'foo/bar/baz', 'silly',\r\n stderr=StringIO.StringIO())\r\n self.assertEqual(ex.exception.code, 1)\r\n # Send bad course_id to get course not exported\r\n with self.assertRaises(SystemExit) as ex:\r\n with self.assertRaisesRegexp(CommandError, GitExportError.BAD_COURSE):\r\n call_command('git_export', 'foo/bar:baz', 'silly',\r\n stderr=StringIO.StringIO())\r\n self.assertEqual(ex.exception.code, 1)", "async def on_command_error(self, ctx: Context, e: errors.CommandError) -> None:\n command = ctx.command\n\n if hasattr(e, \"handled\"):\n log.trace(f\"Command {command} had its error already handled locally; ignoring.\")\n return\n\n debug_message = (\n f\"Command {command} invoked by {ctx.message.author} with error \"\n f\"{e.__class__.__name__}: {e}\"\n )\n\n if isinstance(e, errors.CommandNotFound) and not getattr(ctx, \"invoked_from_error_handler\", False):\n if await self.try_silence(ctx):\n return\n if await self.try_run_fixed_codeblock(ctx):\n return\n await self.try_get_tag(ctx) # Try to look for a tag with the command's name\n elif isinstance(e, errors.UserInputError):\n log.debug(debug_message)\n await self.handle_user_input_error(ctx, e)\n elif isinstance(e, errors.CheckFailure):\n log.debug(debug_message)\n await self.handle_check_failure(ctx, e)\n elif isinstance(e, errors.CommandOnCooldown | errors.MaxConcurrencyReached):\n log.debug(debug_message)\n await ctx.send(e)\n elif isinstance(e, errors.CommandInvokeError):\n if isinstance(e.original, ResponseCodeError):\n await self.handle_api_error(ctx, e.original)\n elif isinstance(e.original, LockedResourceError):\n await ctx.send(f\"{e.original} Please wait for it to finish and try again later.\")\n elif isinstance(e.original, InvalidInfractedUserError):\n await ctx.send(f\"Cannot infract that user. {e.original.reason}\")\n else:\n await self.handle_unexpected_error(ctx, e.original)\n elif isinstance(e, errors.ConversionError):\n if isinstance(e.original, ResponseCodeError):\n await self.handle_api_error(ctx, e.original)\n else:\n await self.handle_unexpected_error(ctx, e.original)\n elif isinstance(e, errors.DisabledCommand):\n log.debug(debug_message)\n else:\n # ExtensionError\n await self.handle_unexpected_error(ctx, e)", "def test_fail_pipeline_stage():\n fail_stage = FailStage()\n df = _test_df()\n with pytest.raises(FailedPreconditionError):\n fail_stage.apply(df, verbose=True)", "def test_error(self) -> None:\n context: Dict[str, ArtifactDescriptor] = dict()\n cmd = ModuleCommand(\n package_id='error', \n command_id='error',\n arguments=[],\n packages=None\n )\n controller = FakeWorkflowController()\n self.backend.execute_async(\n task=TaskHandle(\n task_id='000',\n project_id=self.PROJECT_ID,\n controller=controller\n ),\n command=cmd,\n artifacts=context\n )\n time.sleep(2)\n self.assertEqual(controller.task_id, '000')\n self.assertEqual(controller.state, 'ERROR')\n self.assertEqual(len(controller.outputs.stdout), 0)\n self.assertNotEqual(len(controller.outputs.stderr), 0)", "def test_cli_exit_emessage_to_stderr():\n\n # SystemExit print's to stdout by default\n with pytest.raises(SystemExit) as excinfo:\n Packager.from_cli(['-d', 'data/output.csv', '-f', 'sdfa'])\n\n excinfo.match('Field sdfa not found in file list.')", "def shell_command(context, cmd, err_msg=\"Shell command error\"):\n try:\n\n context.last_cmd = cmd\n output = check_output(cmd, shell=True, cwd=os.getcwd())\n context.output = output\n\n except:\n raise Exception(err_msg)", "def execute_failure(self, *args, **kwargs):\n return 1, \"\", None", "def exit_failure():\n\tglobal state\n\tglobal c1, c2\n\tglobal a1, a2\n\n\t# Tell clients to close connections\n\tc1.send(\"CLOSE\")\n\tc2.send(\"CLOSE\")\n\n\tprint \"EXIT FAILURE\"\n\tstate += 1 # increment state to 10", "def exit(status=None): # real signature unknown; restored from __doc__\n pass", "def fail_with(s):\n print \"[FAILURE] %s\" % s\n sys.exit(1)", "def __exit__(self, type, value, traceback):\n log.Print('Shutting down metadata credentials')\n subprocess.check_call(['docker', 'rm', '-f', self._name],\n stdin=None, stdout=None, stderr=None)", "def test__clean_status(self):\n assert not dockerprettyps._clean_status(\"Exited (1) 22 minutes ago\")\n assert dockerprettyps._clean_status(\"Up 12 minutes\")", "def state_failsafe_do(cfg, app, win, events):", "def test_option_unrecognized(self):\n cmd, output = runCmdOutput(['--unrecognized'])\n self.assertEqual(cmd.returncode, os.EX_USAGE)", "def test_halt_state(get_pipe_manager):\n pm = get_pipe_manager(name=\"test-pipe\")\n assert pm._active\n pm.halt(raise_error=False)\n assert pm.halted\n assert not pm._active", "def abortAndBrake(self):\n return self.set_command(\"B\")", "def fail(msg, exit_code=1):\n sys.stderr.write(\"{}\\n\".format(msg))\n sys.exit(exit_code)", "def test_failedCommandProvidesOutput(self):\n bookTeX = FilePath(self.mktemp() + \".tex\")\n builder = BookBuilder()\n inputState = bookTeX.parent().children()\n exc = self.assertRaises(\n CommandFailed,\n builder.buildPDF,\n bookTeX, self.howtoDir, FilePath(self.mktemp()))\n self.assertTrue(exc.output)\n newOutputState = set(bookTeX.parent().children()) - set(inputState)\n self.assertEqual(len(newOutputState), 1)\n workPath = newOutputState.pop()\n self.assertTrue(\n workPath.isdir(),\n \"Expected work path %r was not a directory.\" % (workPath.path,))", "def error(self, msg):\n fullmsg = \"Subcommand '%s': %s\\n%s\" % (self.tool.get_command(), msg,\n self.get_usage_command())\n raise SBToolError(fullmsg, True)", "def test_bad_input():\n\n for arg in ['5', 'ch']:\n rv, out = getstatusoutput('{} {}'.format(prg, arg))\n assert rv == 0\n expected = 'I do not know \"{}\".'.format(arg)\n assert out.strip() == expected", "def print_failure_msg(msg):\n click.secho(msg, fg='red', file=sys.stderr)", "def test_ComputerPartition_error_CleanupResource_StoppedState(self):\n sequence_list = SequenceList()\n sequence_string = self.prepare_confirmed_cleanup_resource_packing_list + '\\\n LoginDefaultUser \\\n StartSalePackingList \\\n CleanTic \\\n StopSalePackingList \\\n Tic \\\n Logout \\\n SlapLoginCurrentComputer \\\n CheckSuccessComputerPartitionErrorCall \\\n CleanTic \\\n SlapLogout \\\n LoginDefaultUser \\\n CheckSalePackingListErrorText \\\n CheckStoppedSalePackingList \\\n Logout \\\n LoginERP5TypeTestCase \\\n CheckSiteConsistency \\\n Logout \\\n '\n sequence_list.addSequenceString(sequence_string)\n sequence_list.play(self)", "def cleanup_exit(self,submode='qsub'):\n foo='cleanup_exit () {\\n'\n foo+=' shared_temporal_libraries _CLEANUP_\\n'\n foo+=' hostname && echo $wd\\n'\n foo+=' basename $LOCAL_LOG\\n'\n foo+=' echo \"exit mode = $1\"\\n'\n foo+=' /bin/mkdir -p `dirname $REMOTE_LOG`\\n'\n if submode=='sub':\n foo+=' /bin/mv $LOCAL_LOG $REMOTE_LOG\\n'\n foo+=' /bin/rm -rf $wd\\n'\n foo+=' exit $1\\n'\n foo+='}' \n return foo", "def ConsoleExit(self, errorcode=200):\n pass", "def fail(self):\n self.cleanup()\n self.runner.report_job_fail(self.id)", "def __try_command(cmd, description):\n try:\n out = subprocess.check_output(cmd, stderr=subprocess.STDOUT);\n return (True, out.decode(\"utf-8\")) # success\n except subprocess.CalledProcessError as e:\n print(\"Error while {:s}, return code is non-zero ({:d})\".format(description, e.returncode))\n print(\"Command: {:s}\".format(\" \".join(e.cmd)))\n if e.output:\n print(\"Output: {:s}\".format(e.output.decode(\"utf-8\").strip()))\n\n return (False, None) # error", "def test_on_record_error(sdc_builder, sdc_executor, stage_attributes):\n\n DATA = {'name': 'Al Gore', 'birthplace': 'Washington, D.C.'}\n on_record_error = stage_attributes['on_record_error']\n\n pipeline_builder = sdc_builder.get_pipeline_builder()\n\n dev_raw_data_source = pipeline_builder.add_stage('Dev Raw Data Source')\n dev_raw_data_source.data_format = 'JSON'\n dev_raw_data_source.raw_data = json.dumps(DATA)\n dev_raw_data_source.stop_after_first_batch = True\n\n field_replacer = pipeline_builder.add_stage('Field Replacer')\n field_replacer.set_attributes(replacement_rules=[{'setToNull': False, 'fields': '/age'}],\n field_does_not_exist='TO_ERROR',\n **stage_attributes)\n\n wiretap = pipeline_builder.add_wiretap()\n\n dev_raw_data_source >> field_replacer >> wiretap.destination\n\n pipeline = pipeline_builder.build()\n\n sdc_executor.add_pipeline(pipeline)\n\n if on_record_error == 'DISCARD':\n sdc_executor.start_pipeline(pipeline).wait_for_finished()\n\n assert not wiretap.error_records and not wiretap.output_records\n\n elif on_record_error == 'STOP_PIPELINE':\n try:\n sdc_executor.start_pipeline(pipeline).wait_for_status('RUN_ERROR')\n\n assert False, 'An exception should have been thrown'\n except RunError:\n\n assert not wiretap.error_records and not wiretap.output_records\n\n elif on_record_error == 'TO_ERROR':\n sdc_executor.start_pipeline(pipeline).wait_for_finished()\n\n record = wiretap.error_records[0]\n assert record.field == DATA and not wiretap.output_records", "def identify_result_exit(self, record):\n return [\"exit\"]", "def test_no_options(cli_runner: CliRunner, base_context: CGConfig):\n # GIVEN\n # WHEN dry running\n result = cli_runner.invoke(workflow_cmd, obj=base_context)\n\n # THEN command should have returned all pipelines that is supported\n assert result.exit_code == EXIT_SUCCESS\n assert \"balsamic\" in result.output\n assert \"balsamic-qc\" in result.output\n assert \"balsamic-umi\" in result.output\n assert \"microsalt\" in result.output\n assert \"mip-dna\" in result.output\n assert \"mip-rna\" in result.output\n assert \"rnafusion\" in result.output", "def clean_exit(code: int) -> None:\n raise StopJupyterExecution", "def test_norn_textfsm(task, result, cmd):\n if type(result) != list or type(result[0]) != dict:\n c_print(f'*** {task.host}: ERROR running \"{cmd}\" ***')", "def test_no_args(self):\r\n errstring = \"export requires two arguments\"\r\n with self.assertRaisesRegexp(CommandError, errstring):\r\n self.command.handle()", "def halt(*_, **kwargs):\n raise ExecutionFinished(\"Reached halt\")", "def mark_failure(self):\n LOGGER.debug('Marking current_state as: %s', self.States.FAILED)\n self.current_state = self.States.FAILED", "def finalize_error():\n print('')\n exit(-1)", "async def on_command_error(self, ctx, error):\n if self._error_cd.get_bucket(ctx.message).update_rate_limit():\n return self.dispatch('global_cooldown', ctx, '_warn_cd', '⚠️') \n \n error = getattr(error, 'original', error)\n \n is_owner = await ctx.is_owner()\n e_args = (type(error), error, error.__traceback__, 4) \n \n if not isinstance(error, (HTTPException, ClientException, CommandOnCooldown)):\n print_exception(*e_args)\n \n # Cooldown bypass \n if (isinstance(error, CommandOnCooldown) # there must be a better way\n and (is_owner or ctx.permissions_for(ctx.author).manage_messages)):\n return await ctx.reinvoke()\n \n if is_owner:\n lines = ''.join(format_exception(*e_args)) \n else:\n lines = str(error)\n \n await ctx.display(embed=ColoredEmbed(title='Error',\n description='```py\\n' + lines + '```'))", "def set_test_failed(self):\n self.set_result(Status.FAILED)", "async def on_command_error(self, ctx, err):\n if type(err) is CommandNotFound:\n await self.send_message(ctx, 'I don\\'t know how to do that.')", "def tc_fail(self, msg):\n self.recover()\n tc_fail(msg)", "def test_use_exit_status(self): # suppress(no-self-use)\n subprocess.call.return_value = 1\n GreenTestCommand(Distribution()).run()\n sys.exit.assert_called_with(1)" ]
[ "0.63274103", "0.59832925", "0.58065456", "0.5717935", "0.57014954", "0.56815827", "0.56410784", "0.5618887", "0.5596389", "0.5563665", "0.55591875", "0.5529268", "0.5520761", "0.55194193", "0.550422", "0.54478794", "0.5444886", "0.54415536", "0.5407555", "0.5348784", "0.5331414", "0.5328514", "0.52958626", "0.52953565", "0.5294402", "0.52738404", "0.5272428", "0.5267076", "0.52580595", "0.52576786", "0.52566475", "0.52496123", "0.52322865", "0.52312356", "0.521878", "0.5206138", "0.518883", "0.5188588", "0.51861286", "0.51806", "0.51768804", "0.51620907", "0.51500887", "0.5141884", "0.5141158", "0.51332474", "0.5123592", "0.5118905", "0.51110566", "0.5109225", "0.5105772", "0.5102047", "0.5090687", "0.50902843", "0.5087626", "0.5081077", "0.5075905", "0.50746906", "0.5073088", "0.5061488", "0.5058896", "0.50531286", "0.50514674", "0.50371146", "0.50370973", "0.5030129", "0.50290954", "0.50286", "0.50273997", "0.5018589", "0.50121385", "0.5010668", "0.50050557", "0.5004603", "0.49987963", "0.4994592", "0.49860057", "0.49842307", "0.4979492", "0.49757764", "0.49756068", "0.49732372", "0.49685705", "0.49685583", "0.496801", "0.496801", "0.49670598", "0.4962017", "0.4961806", "0.49577266", "0.49562523", "0.49548876", "0.495337", "0.49479207", "0.49470744", "0.4945704", "0.49439356", "0.49429917", "0.49415407", "0.49358675" ]
0.57412744
3
FF on, axis status, axis init, slew, FF on, guider flat, FF off, open FFS 3xguider axes off, guider on
def test_goto_field_apogee(self): cmdState = self.actorState.gotoField cmdState.reinitialize(self.cmd) self._goto_feld_apogee(13, 46, 0, 0, cmdState)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def setup(self, flags):\n self.figure = pylab.figure(1)\n self.axes = {}\n self.stream_data = {}\n self.flags = flags", "def update_focal_axes(self):\n #self.update_sigma()\n self.updateGL()", "def update_flags(self):\n # view mode, filled vs wirefrom\n if self.view['wireframe']:\n gl.glPolygonMode(gl.GL_FRONT_AND_BACK, gl.GL_LINE)\n else:\n gl.glPolygonMode(gl.GL_FRONT_AND_BACK, gl.GL_FILL)\n\n # set fullscreen or windowed\n self.set_fullscreen(fullscreen=self.view['fullscreen'])\n\n # backface culling on or off\n if self.view['cull']:\n gl.glEnable(gl.GL_CULL_FACE)\n else:\n gl.glDisable(gl.GL_CULL_FACE)\n\n # case where we WANT an axis and NO vertexlist\n # is stored internally\n if self.view['axis'] and self._axis is None:\n from .. import creation\n # create an axis marker sized relative to the scene\n axis = creation.axis(origin_size=self.scene.scale / 100)\n # create ordered args for a vertex list\n args = rendering.mesh_to_vertexlist(axis)\n # store the axis as a reference\n self._axis = self.batch.add_indexed(*args)\n\n # case where we DON'T want an axis but a vertexlist\n # IS stored internally\n elif not self.view['axis'] and self._axis is not None:\n # remove the axis from the rendering batch\n self._axis.delete()\n # set the reference to None\n self._axis = None", "def _checkForSixaxis():\n return sixaxis.init(\"/dev/input/js1\")", "def update_focal_axes(self):\n self.update_sigma()\n self.updateGL()", "def gff_init():\n pass", "def main():\n\n robot = S2Fluke2( \"/dev/rfcomm2\" )\n\n print( \"getVersion :\", robot.getVersion() )\n print( \"identifyRobot :\", robot.identifyRobot() )\n print( \"getBattery :\", robot.getBattery() )\n print( \"setForwardness: SCRIBBLER_FORWARD\" )\n robot.setForwardness( robot.SCRIBBLER_FORWARD )\n print( \"setForwardness: FLUKE_FORWARD\" )\n robot.setForwardness( robot.FLUKE_FORWARD )\n print( \"setForwardness: SCRIBBLER_FORWARD\" )\n robot.setForwardness( robot.SCRIBBLER_FORWARD )\n print( \"getErrors : \" )\n print( robot.getErrors() )\n robot.resetScribbler()\n\n robot.close()", "def paint_focal_axes(self):\n GL.glTranslatef(*self.focus) # translate to focus\n self.paint_axes(self.sigma)\n GL.glTranslatef(*-self.focus) # translate back", "def graphics(env, fovea, objects, unit):\n plt.clf()\n\n env = environment.redraw(env, unit, objects)\n fovea_im = fovea.get_focus_image(env)\n\n plt.subplot(121)\n plt.title('Training environment')\n plt.xlim(0, unit)\n plt.ylim(0, unit)\n plt.imshow(env)\n\n # PLOT DESK EDGES\n plt.plot([0.2*unit, 0.2*unit, 0.8*unit, 0.8*unit, 0.2*unit],\n [0.2*unit, 0.8*unit, 0.8*unit, 0.2*unit, 0.2*unit], 'w-'\n )\n\n # PLOT FOVEA EDGES\n fov_indices = fovea.get_index_values()\n plt.plot([fov_indices[0][0], fov_indices[0][0], fov_indices[0][1],\n fov_indices[0][1], fov_indices[0][0]],\n [fov_indices[1][0], fov_indices[1][1], fov_indices[1][1],\n fov_indices[1][0], fov_indices[1][0]], 'w-'\n )\n\n plt.subplot(122)\n plt.title('Focus image')\n plt.imshow(fovea_im)\n\n plt.draw()\n plt.pause(0.01)", "def f_fixed(self):\n self.fx_free = self.fy_free = self.fz_free = False\n return self", "def init_plot_force(nb_mus):\n # --- Curve graph --- #\n # app = pg.mkQApp(\"force\")\n # remote = []\n # layout = pg.LayoutWidget()\n # layout.resize(800, 800)\n # label = QtGui.QLabel()\n # box = []\n # rplt = []\n # row_count = 0\n # col_span = 4 if nb_mus > 8 else 8\n # for mus in range(nb_mus):\n # remote.append(rgv.RemoteGraphicsView())\n # remote[mus].pg.setConfigOptions(antialias=True)\n # app.aboutToQuit.connect(remote[mus].close)\n # box.append(QtGui.QCheckBox(f\"muscle_{mus}\"))\n # if mus >= 8:\n # layout.addWidget(box[mus], row=1, col=mus-8)\n # layout.addWidget(remote[mus], row=mus - 8 + 2, col=4, colspan=col_span)\n # else:\n # layout.addWidget(box[mus], row=0, col=mus)\n # layout.addWidget(remote[mus], row=mus + 2, col=0, colspan=col_span)\n # rplt.append(remote[mus].pg.PlotItem())\n # rplt[mus]._setProxyOptions(deferGetattr=True) ## speeds up access to rplt.plot\n # remote[mus].setCentralItem(rplt[mus])\n # layout.addWidget(label)\n # layout.show()\n # row_count += 1\n # return rplt, layout, app , box\n\n # --- Progress bar graph --- #\n # app = pg.mkQApp(\"force\")\n # layout = pg.LayoutWidget()\n # layout.resize(400, 800)\n # layout.move(0, 0)\n # box = []\n # rplt = []\n # row_count = 0\n # for mus in range(nb_mus):\n # rplt.append(QProgressBar())\n # rplt[mus].setMaximum(1000)\n # layout.addWidget(rplt[mus], row=mus, col=0)\n # layout.show()\n # row_count += 1\n # return rplt, layout, app\n\n # --- Bar graph --- #\n app = pg.mkQApp()\n layout = pg.plot()\n layout.resize(800, 800)\n rplt = pg.BarGraphItem(x=range(nb_mus), height=np.zeros((nb_mus)), width=0.3, brush=\"r\")\n layout.addItem(rplt)\n return rplt, layout, app", "def addFluxcal():\n # Overall\n i = s.getScriptInt(odi.INDX_INT_NOBS_FLUX) - 1\n if i < 0: i = 0\n s.setScriptInt(odi.INDX_INT_NOBS_FLUX, i)\n\n # Primary\n i = s.getScriptInt(odi.INDX_INT_NOBS_FLUXPRI) - 1\n if i < 0: i = 0\n s.setScriptInt(odi.INDX_INT_NOBS_FLUXPRI, i)", "def plotAll(fx,tfarray,tlst,flst,fignum=1,starttime=0,timeinc='hrs',\r\n dt=1.0,title=None,vmm=None,cmap=None,aspect=None,interpolation=None,\r\n cbori=None,cbshrink=None,cbaspect=None,cbpad=None,normalize='n',\r\n scale='log'):\r\n \r\n #time increment\r\n if timeinc=='hrs':\r\n tinc=3600/dt\r\n elif timeinc=='min':\r\n tinc=60/dt\r\n elif timeinc=='sec':\r\n tinc=1/dt\r\n else:\r\n raise ValueError(timeinc+'is not defined')\r\n #colormap\r\n if cmap==None:\r\n cmap='jet'\r\n else:\r\n cmap=cmap\r\n #aspect ratio\r\n if aspect==None:\r\n aspect='auto'\r\n else:\r\n aspect=aspect\r\n #interpolation\r\n if interpolation==None:\r\n interpolation='gaussian'\r\n else:\r\n interpolation=interpolation\r\n #colorbar orientation\r\n if cbori==None:\r\n cbori='vertical'\r\n else:\r\n cbori=cbori\r\n #colorbar shinkage\r\n if cbshrink==None:\r\n cbshrink=.99\r\n else:\r\n cbshrink=cbshrink\r\n #colorbar aspect\r\n if cbaspect==None:\r\n cbaspect=20\r\n else:\r\n cbaspect=cbaspect\r\n #colorbar pad\r\n if cbpad==None:\r\n cbpad=.1\r\n else:\r\n cbpad=cbpad\r\n \r\n #scale\r\n if scale=='log':\r\n zerofind=np.where(abs(tfarray)==0)\r\n tfarray[zerofind]=1.0\r\n if normalize=='y':\r\n plottfarray=20*np.log10(abs(tfarray/np.max(abs(tfarray))))\r\n else:\r\n plottfarray=20*np.log10(abs(tfarray))\r\n elif scale=='linear':\r\n if normalize=='y':\r\n plottfarray=abs(plottfarray/np.max(abs(plottfarray)))**2\r\n else:\r\n plottfarray=abs(tfarray)**2\r\n \r\n t=np.arange(len(fx))*dt+starttime*dt\r\n FX=np.fft.fft(padzeros(fx))\r\n FXfreq=np.fft.fftfreq(len(FX),dt)\r\n \r\n #set some plot parameters\r\n plt.rcParams['font.size']=10\r\n plt.rcParams['figure.subplot.left']=.13\r\n plt.rcParams['figure.subplot.right']=.98\r\n plt.rcParams['figure.subplot.bottom']=.07\r\n plt.rcParams['figure.subplot.top']=.96\r\n plt.rcParams['figure.subplot.wspace']=.25\r\n plt.rcParams['figure.subplot.hspace']=.20\r\n #plt.rcParams['font.family']='helvetica'\r\n \r\n fig=plt.figure(fignum)\r\n \r\n #plot FFT of fx\r\n fax=fig.add_axes([.05,.25,.1,.7])\r\n plt.plot(abs(FX[0:len(FX)/2]/max(abs(FX)))**2,FXfreq[0:len(FX)/2],'-k')\r\n plt.xlim(0,1)\r\n plt.ylim(0,FXfreq[len(FX)/2-1])\r\n fax.xaxis.set_major_locator(MultipleLocator(.5))\r\n \r\n #plot TFD\r\n pax=fig.add_axes([.25,.25,.75,.7])\r\n if vmm!=None:\r\n vmin=vmm[0]\r\n vmax=vmm[1]\r\n plt.imshow(plottfarray,extent=(tlst[0]/tinc,tlst[-1]/tinc,\r\n flst[0],flst[-1]),aspect=aspect,vmin=vmin,vmax=vmax,cmap=cmap,\r\n interpolation=interpolation)\r\n else:\r\n plt.imshow(plottfarray,extent=(tlst[0]/tinc,tlst[-1]/tinc,\r\n flst[0],flst[-1]),aspect=aspect,cmap=cmap,\r\n interpolation=interpolation)\r\n plt.xlabel('Time('+timeinc+')',fontsize=12,fontweight='bold')\r\n plt.ylabel('Frequency (Hz)',fontsize=12,fontweight='bold')\r\n if title!=None:\r\n plt.title(title,fontsize=14,fontweight='bold')\r\n plt.colorbar(orientation=cbori,shrink=cbshrink,pad=cbpad,aspect=cbaspect)\r\n \r\n #plot timeseries\r\n tax=fig.add_axes([.25,.05,.60,.1])\r\n plt.plot(t,fx,'-k')\r\n plt.axis('tight')\r\n plt.show()", "def _fp_setup2(self):\n # TODO: right now it's hard to implement this required stage", "def _show_feature_flags(graph: nx.DiGraph, mode='sorted'):\n #plt.figure(figsize=(15, 15))\n if mode == 'sorted':\n pos = nx.multipartite_layout(graph)\n nx.draw(graph, pos, with_labels=True, arrows=True, node_color=\"#BA9DFB\")\n else:\n pos = graphviz_layout(graph)\n nx.draw_networkx(graph, pos, arrows=True, node_color=\"#BA9DFB\")\n plt.show()\n plt.clf()", "def drawAxes(t):\r\n t.speed(0)\r\n t.pd()\r\n t.forward(500)\r\n t.back(500)", "def go_infFD(self):\n\n response = self.send_lens_cmd(['05', '00', '00', '00'], fast_mode=True)\n self.wait_focus_move()", "def main_multimodal_fusion(im_vis, im_ir, kernel, levels, window_size):\n\n im_vis = convert_image_to_floats(im_vis)\n im_ir = convert_image_to_floats(im_ir)\n\n im_vis_hsv = rgb2hsv(im_vis)\n value_channel = im_vis_hsv[:, :, 2]\n\n plt.subplot(1, 2, 1)\n plt.imshow(value_channel, cmap='gray')\n plt.subplot(1, 2, 2)\n plt.imshow(im_ir, cmap='gray')\n plt.show()\n\n # kernels to compute visibility\n kernel1 = classical_gaussian_kernel(5, 2)\n kernel2 = classical_gaussian_kernel(5, 2)\n\n # Computation of local entropy, local contrast and visibility for value channel\n local_entropy_value = normalized_local_entropy(value_channel, window_size)\n local_contrast_value = local_contrast(value_channel, window_size)\n visibility_value = visibility(value_channel, kernel1, kernel2)\n # Combination of local entropy, local contrast and visibility for value channel\n weight_value = weight_combination(local_entropy_value, local_contrast_value, visibility_value, 1, 1, 1)\n\n # Computation of local entropy, local contrast and visibility for IR image\n local_entropy_ir = normalized_local_entropy(im_ir, window_size)\n local_contrast_ir = local_contrast(im_ir, window_size)\n visibility_ir = visibility(im_ir, kernel1, kernel2)\n # Combination of local entropy, local contrast and visibility for IR image\n weight_ir = weight_combination(local_entropy_ir, local_contrast_ir, visibility_ir, 1, 1, 1)\n\n plt.subplot(2, 3, 1)\n plt.imshow(local_entropy_value, cmap='gray')\n plt.subplot(2, 3, 2)\n plt.imshow(local_contrast_value, cmap='gray')\n plt.subplot(2, 3, 3)\n plt.imshow(visibility_value, cmap='gray')\n plt.subplot(2, 3, 4)\n plt.imshow(local_entropy_ir, cmap='gray')\n plt.subplot(2, 3, 5)\n plt.imshow(local_contrast_ir, cmap='gray')\n plt.subplot(2, 3, 6)\n plt.imshow(visibility_ir, cmap='gray')\n plt.show()\n\n # Normalising weights of value channel and IR image\n weightN_value, weightN_ir = weight_normalization(weight_value, weight_ir)\n\n plt.subplot(1, 2, 1)\n plt.imshow(weightN_value, cmap='gray')\n plt.subplot(1, 2, 2)\n plt.imshow(weightN_ir, cmap='gray')\n plt.show()\n\n # Creating Gaussian pyramids of the weights maps of respectively the value channel and IR image\n gauss_pyr_value_weights = gaussian_pyramid(weightN_value, kernel, levels)\n gauss_pyr_ir_weights = gaussian_pyramid(weightN_ir, kernel, levels)\n\n # Creating Laplacian pyramids of respectively the value channel and IR image\n lap_pyr_value = laplacian_pyramid(value_channel, kernel, levels)\n lap_pyr_ir = laplacian_pyramid(im_ir, kernel, levels)\n\n # Creating the fused Laplacian of the two modalities\n lap_pyr_fusion = fused_laplacian_pyramid(gauss_pyr_value_weights, gauss_pyr_ir_weights, lap_pyr_value, lap_pyr_ir)\n\n i = 1\n for l in lap_pyr_fusion:\n plt.subplot(1, len(lap_pyr_fusion), i)\n plt.imshow(l, cmap='gray')\n i += 1\n plt.show()\n\n # Creating the Gaussian pyramid of value channel in order to collapse the fused Laplacian pyramid\n gauss_pyr_value = gaussian_pyramid(value_channel, kernel, levels)\n collapsed_image = collapse_pyramid(lap_pyr_fusion, gauss_pyr_value)\n\n # Replacing the value channel in HSV visible image by the collapsed image\n im_vis_hsv_fusion = im_vis_hsv.copy()\n im_vis_hsv_fusion[:, :, 2] = collapsed_image\n im_vis_rgb_fusion = hsv2rgb(im_vis_hsv_fusion)\n\n plt.subplot(1, 2, 1)\n plt.imshow(im_vis)\n plt.subplot(1, 2, 2)\n plt.imshow(im_vis_rgb_fusion)\n plt.show()", "def toggle_axis(self):\n # cycle through three axis states\n states = [False, 'world', 'all']\n # the state after toggling\n index = (states.index(self.view['axis']) + 1) % len(states)\n # update state to next index\n self.view['axis'] = states[index]\n # perform gl actions\n self.update_flags()", "def enableaxes(self):\n debug('ControllerStartup.enableaxes()')\n if not self.pidevice.HasEAX() or self.prop['skipeax']:\n return\n for axis in self.pidevice.axes:\n try:\n self.pidevice.EAX(axis, True)\n except GCSError as exc:\n if exc != gcserror.E2_PI_CNTR_UNKNOWN_COMMAND:\n raise\n waitonready(self.pidevice, **self._kwargs)", "def _fctl(self):\n if self._fctl_written:\n return\n data = struct.pack(\n \">4L2H2B\",\n self.width, self.height, 0, 0,\n self.delay_numerator, self.delay_denominator,\n 1, 0)\n self._chunk(b\"fcTL\", self._seqno() + data)\n self._fctl_written = True", "def front_wheel_from_axis():", "def plot_fppy(self,LAXIS,xbl,xbr,ybu,ybd,ilg): \n\t\t\n # load x GRID\n grd1 = self.xzn0\n\t\n # load DATA to plot\n plt1 = self.fppy\n\t\t\t\t\n # create FIGURE\n plt.figure(figsize=(7,6))\n\t\t\n # format AXIS, make sure it is exponential\n plt.gca().yaxis.get_major_formatter().set_powerlimits((0,0))\t\t\n\n # set plot boundaries \n to_plot = [plt1]\t\t\n self.set_plt_axis(LAXIS,xbl,xbr,ybu,ybd,to_plot)\n\t\t\t\t\n # plot DATA \n plt.title(r'pressure flux y')\n plt.plot(grd1,plt1,color='brown',label = r'f$_{py}$')\n\n # define and show x/y LABELS\n setxlabel = r\"r (cm)\"\n setylabel = r\"$f_{py}$ (erg cm$^{-2}$ s$^{-1}$)\"\n plt.xlabel(setxlabel)\n plt.ylabel(setylabel)\n\t\t\n # show LEGEND\n plt.legend(loc=ilg,prop={'size':18})\n\n # display PLOT\n plt.show(block=False)\n\n # save PLOT\n plt.savefig('RESULTS/'+self.data_prefix+'mean_fppy.png')", "def plot_single_hfo(hfo, envelope = False, xlim =[-1,1], cutoff = None, v = True,\n axes = None, figure_size = (15,10),dpi=600,saveplot = None):\n if axes == None:\n # Creating the figure \n fig = plt.figure(figsize=figure_size,dpi=dpi)\n ax1 = fig.add_subplot(311)\n ax2 = fig.add_subplot(312)\n ax3 = fig.add_subplot(313)\n else:\n ax1 = axes[0]\n ax2 = axes[1]\n ax3 = axes[2]\n\n # number of points\n npoints = hfo.waveform.shape[0]\n time_v = np.linspace(-1,1,npoints,endpoint=True)\n # creating the axes\n \n ax1.plot(time_v,hfo.waveform[:,0],'b')\n ax1.plot(time_v[hfo.start_idx:hfo.end_idx],hfo.waveform[hfo.start_idx:hfo.end_idx,0],'k')\n \n adjust_spines(ax1, ['left'])\n ax1.set_xlim(xlim)\n \n \n \n filt = hfo.waveform[:,1]\n ax2.plot(time_v,filt) \n ax2.plot(time_v[hfo.start_idx:hfo.end_idx],filt[hfo.start_idx:hfo.end_idx],'k')\n if envelope:\n env = hfo.waveform[:,2]\n ax4 = ax2.twinx()\n ax4.plot(time_v,env,'g')\n \n\n \n adjust_spines(ax2, ['left', 'bottom'])\n ax2.set_xlim(xlim)\n \n \n hfo.spectrum.plot(cutoff = cutoff, v = v, ax = ax3)\n ax3.set_title('peak freq = ' + str(hfo.spectrum.peak_freq))\n adjust_spines(ax3, ['left', 'bottom'])\n \n if saveplot != None:\n if type(saveplot) == str: \n plt.savefig(saveplot, bbox_inches='tight')\n else:\n raise Exception('saveplot should be a string')\n plt.draw()", "def show():\n setup()\n plt.show()", "def __call__(self, info, *fargs):\n frame = info[0] # Frame number\n update = info[1] # Update value\n grid_data = info[2] # Data to draw our grids\n mask = info[3] # Mask of data\n self._setup['update'].set_text(f'Update {update}')\n for ndx,data in enumerate(grid_data):\n self._setup['plots'][ndx].set_array(check_mask(data,mask[ndx]))\n for pp in self._setup['post_plot']:\n pp.blit_update(frame, update, ax_ndx=ndx)\n if self._setup._pbar:\n self._setup._pbar.update(frame)\n if frame == self._setup._num_frames - 1:\n self._setup._pbar.finish()\n return self._setup.get_drawables()", "def displayFiducial(self):\n #obsolete?\n profbox()\n modelNodes = slicer.util.getNodes('vtkMRMLModelNode*')\n for modelNode in modelNodes.values():\n displayNode = modelNode.GetDisplayNode()\n if modelNode.GetAttribute(\"segmented\") == \"1\" and modelNode.GetAttribute(\"nth\")!=None:\n if 1:\n i = int(modelNode.GetAttribute(\"nth\"))\n if self.fiducialnode[i] == 0: \n polyData = modelNode.GetPolyData()\n nb = int(polyData.GetNumberOfPoints()-1)\n coord = [0,0,0]\n if nb>10:\n self.fiducialnode[i] = slicer.vtkMRMLAnnotationFiducialNode()\n polyData.GetPoint(nb,coord) \n self.fiducialnode[i].SetName(self.option[i])\n self.fiducialnode[i].SetFiducialCoordinates(coord) \n self.fiducialnode[i].Initialize(slicer.mrmlScene)\n self.fiducialnode[i].SetLocked(1)\n self.fiducialnode[i].SetSelectable(0)\n fidDN = self.fiducialnode[i].GetDisplayNode()\n fidDN.SetColor(modelNode.GetDisplayNode().GetColor())\n fidDN.SetGlyphScale(0)\n fidTN = self.fiducialnode[i].GetAnnotationTextDisplayNode()\n fidTN.SetTextScale(3)\n fidTN.SetColor(modelNode.GetDisplayNode().GetColor())\n \n self.fiducialnode[i].SetDisplayVisibility(modelNode.GetDisplayNode().GetVisibility())\n else: \n if modelNode.GetDisplayNode().GetVisibility():\n self.fiducialnode[i].SetDisplayVisibility(abs(self.fiducialnode[i].GetDisplayVisibility()-1))\n if self.fiducialnode[i].GetDisplayVisibility()==1:\n self.displayFiducialButton.text = \"Hide Labels on Needles\"\n else:\n self.displayFiducialButton.text = \"Display Labels on Needles\"", "def wfsInit():\n wfs.init()\n wfs.setExposure()", "def fc_visual_save(fc, lowweight, savenamefile_prefix):\r\n\r\n\r\n ### text setup for brain areas ###\r\n pos_text_lefttop1 = [-80, 50, 30]\r\n pos_text_middletop1 = [120, 50, 30]\r\n pos_text_lefttop2 = [-80, 70, 10]\r\n pos_text_leftDown1 = [-80, 550, 30]\r\n pos_text_leftDown2 = [-80, 570, 10]\r\n pos_text_leftDown3 = [-80, 580, 10]\r\n \r\n texts_org = dict()\r\n\r\n lowweight = np.round(lowweight, decimals = 2) \r\n\r\n # plot\r\n df_chninf = assign_coord2chnArea(area_coord_file, fc['chnAreas'])\r\n for ci, cond in enumerate(fc['ciCOH'].keys()):\r\n ciCOH = fc['ciCOH'][cond]\r\n ntrials, ntemp = fc['setup']['ntrials_' + cond], fc['setup']['ntemp_' + cond]\r\n\r\n\r\n texts = texts_org.copy()\r\n \r\n text_thred = 'thred = ' + str(np.round(lowweight, decimals = 2))\r\n text_ntrials = 'ntrials = ' + str(ntrials)\r\n\r\n texts[cond] = pos_text_middletop1\r\n texts[text_task] = pos_text_leftDown1\r\n texts[text_ntrials] = pos_text_leftDown2\r\n texts[text_thred] = pos_text_leftDown3\r\n \r\n\r\n saveFCGraph = os.path.join(savefolder, savenamefile_prefix + '_lw' + str(np.round(lowweight, decimals = 2)) + '_' + cond + '.png')\r\n\r\n igplot = ciCOH_visual_save(ciCOH = ciCOH, chnInf = df_chninf, lowweight = lowweight, \r\n savefile = saveFCGraph, texts = texts, threds_edge = None)\r\n\r\n del texts[cond], texts[text_ntrials]\r\n\r\n img = cv2.imread(saveFCGraph)\r\n if ci == 0:\r\n imgs = img\r\n else:\r\n imgs = np.concatenate((imgs, np.zeros((img.shape[0], 5, 3)),img), axis = 1)\r\n\r\n os.remove(saveFCGraph)\r\n\r\n # combine all conditions\r\n print(imgs.shape)\r\n saveFCGraph_comb = os.path.join(savefolder, 'comb_' + savenamefile_prefix + '_lw' + str(np.round(lowweight, decimals = 2)) + '.png')\r\n cv2.imwrite(saveFCGraph_comb, imgs)", "def SetStandbyFPMode(self):\n handler = self.get_command_object(\"SetStandbyFPMode\")\n handler()", "def updateFreqAxis(self, ax, n_ticks=5, delay=False):\n rf = self.uv.h_common['REF_FREQ'] /1e6\n chw = self.uv.d_frequency['CH_WIDTH'] / 1e6\n bw = self.uv.d_frequency['TOTAL_BANDWIDTH'] / 1e6\n rp = self.uv.h_common['REF_PIXL']\n nchan = self.uv.h_common['NO_CHAN']\n\n\n #print rf, chw, bw\n\n ticks = ax.get_xticks()\n #print ticks\n tmin, tmax = np.min(ticks), np.max(ticks)\n if tmin < 0: tmin = 0\n #print tmin, tmax\n tlocs = map(int, np.linspace(tmin, tmax, n_ticks))\n\n if rp == 1:\n tlabs = np.linspace(rf, rf+bw, n_ticks)\n else:\n rf_low = rf - chw * rp\n tlabs = np.linspace(rf_low, rf_low+bw, n_ticks)\n #print tlocs\n #print tlabs\n ax.set_xticks(tlocs)\n if not delay:\n ax.set_xticklabels([\"%2.2f\"%tt for tt in tlabs])\n else:\n tlabs = np.linspace(-1.0/chw/1e3 * nchan/2, 1.0/chw/1e3 * nchan/2, n_ticks)\n ax.set_xticklabels([\"%2.2f\"%tt for tt in tlabs])", "def pylab_setup(figure, stream_data, original_width, runlimits, runflags):\n\n def on_key(event):\n \"\"\"on_key\"\"\"\n print('you pressed', event.key, event.xdata, event.ydata)\n\n #def diag_event(event):\n # \"\"\"diag_event\"\"\"\n # print event.name\n # if hasattr(event, 'height'):\n # print event.height, event.width\n # print event.name, event.canvas, event.guiEvent\n\n def pause_axis(unused_event):\n \"\"\"pause_axis\"\"\"\n # stops update of axis when updating lines\n # allows smooth scrolling by user\n print \"PAUSE pause axis\"\n runflags.update_axis = False\n\n def unpause_axis(event):\n \"\"\"unpause_axis\"\"\"\n # continues updating scrolling\n print \"RESUME axis\"\n runflags.update_axis = True\n if hasattr(event, 'height'):\n print event.height, event.width\n new_ratio = float(event.width)/float(event.height)\n default_ratio = 1.3\n print \"BEFORE: \", FLAGS.width\n FLAGS.width = original_width * new_ratio / default_ratio\n print \"AFTER: \", FLAGS.width\n\n figure.canvas.mpl_connect('key_press_event', on_key)\n figure.canvas.mpl_connect('resize_event', unpause_axis)\n figure.canvas.mpl_connect('scroll_event', pause_axis)\n\n timer = figure.canvas.new_timer(interval=500)\n timer.add_callback(plot_refresh_handler, (stream_data, runlimits, runflags))\n timer.start()\n print \"SHOW\"\n pylab.show()\n print \"AFTER\"", "def _f1_draw_ ( self , *opts ) :\n if not hasattr ( self , '_tf1' ) :\n \n if hasattr ( self , 'xmin' ) and hasattr ( self , 'xmax' ) :\n \n if hasattr ( self , 'npars' ) :\n \n self._tf1 = _tf1_ ( self ,\n self.xmin () ,\n self.xmax () ,\n self.npars () )\n for i in range ( 0, self.npars() ) :\n self._tf1.SetParameter ( i , self.par ( i ) )\n \n else : self._tf1 = _tf1_ ( self , self.xmin() , self.xmax() )\n \n if type(self) in ( Gaudi.Math.Positive ,\n Gaudi.Math.Monothonic , \n Gaudi.Math.Convex , \n Gaudi.Math.ConvexOnly , \n Gaudi.Math.PositiveSpline , \n Gaudi.Math.MonothonicSpline , \n Gaudi.Math.ConvexSpline ,\n Gaudi.Math.ConvexOnlySpline ,\n Gaudi.Math.ExpoPositive ,\n Gaudi.Math.TwoExpoPositive ) : \n self._tf1.SetMinimum(0)\n \n return self._tf1.Draw ( *opts )", "def plot_a(self, LAXIS, bconv, tconv, xbl, xbr, ybu, ybd, ilg):\n\n if self.ig != 1 and self.ig != 2:\n print(\"ERROR(TurbulentMassFluxEquation.py):\" + self.errorGeometry(self.ig))\n sys.exit()\n\n # load x GRID\n grd1 = self.xzn0\n\n # load DATA to plot\n plt1 = -self.dd*self.eht_a\n plt2 = self.eht_a_model1\n plt3 = self.eht_a_model2\n plt4 = self.ux\n plt5 = self.fht_ux\n plt6 = self.eht_a_model3\n plt7 = self.eht_a_model4\n plt8 = self.fht_ux_model\n plt9 = self.eht_a_grad_model\n plt10 = self.eht_a_tempflx\n\n # create FIGURE\n plt.figure(figsize=(7, 6))\n\n # format AXIS, make sure it is exponential\n plt.gca().yaxis.get_major_formatter().set_powerlimits((0, 0))\n\n # set plot boundaries \n to_plot = [plt1, plt2, plt3, plt4, plt5, plt6, plt7, plt8, plt9, plt10]\n self.set_plt_axis(LAXIS, xbl, xbr, ybu, ybd, to_plot)\n\n # plot DATA \n # plt.title(r'turbulent mass flux'+ ' c = ' + str(self.coeff))\n plt.title(r'turbulent mass flux')\n if self.ig == 1:\n plt.plot(grd1, plt1, color='brown', label=r\"$+\\overline{\\rho' u'_x}$\")\n # plt.plot(grd1,plt2,color='r',label='model1')\n # plt.plot(grd1,plt3,color='g',label='model2')\n # plt.plot(grd1, plt4, color='pink', label=r'$\\overline{u}_x$')\n # plt.plot(grd1, plt5, color='m', label=r'$\\widetilde{u}_x$')\n # plt.plot(grd1,plt6,color='b',label=r'model3')\n # plt.plot(grd1, plt7, color='b', label=r'model4')\n # plt.plot(grd1, plt8, color='r', linestyle='--', label=r'model for fht ux')\n # plt.plot(grd1, plt9, color='r', linestyle='--', label=r\"$+c*u_{rms}*l_c * \\partial_r \\overline{\\rho}$\")\n plt.plot(grd1, plt10, color='g', linestyle='--', label=r\"$- \\overline{\\rho} \\ \\alpha_T \\ \\overline{T'u'_r}$\")\n elif self.ig == 2:\n plt.plot(grd1, plt1, color='brown', label=r\"$a$\")\n # plt.plot(grd1,plt2,color='r',label='model1')\n # plt.plot(grd1,plt3,color='g',label='model2')\n plt.plot(grd1, plt4, color='pink', label=r'$\\overline{u}_r$')\n plt.plot(grd1, plt5, color='m', label=r'$\\widetilde{u}_r$')\n # plt.plot(grd1,plt6,color='b',label=r'model3')\n # plt.plot(grd1, plt7, color='b', label=r'model4')\n # plt.plot(grd1, plt8, color='r', linestyle='--', label=r'model for fht ux')\n\n # horizontal line at y = 0\n plt.axhline(0.0, linestyle='dotted', linewidth=0.7, color='k')\n\n # convective boundary markers\n plt.axvline(bconv, linestyle='--', linewidth=0.7, color='k')\n plt.axvline(tconv, linestyle='--', linewidth=0.7, color='k')\n\n # define and show x/y LABELS\n if self.ig == 1:\n setxlabel = r\"x (cm)\"\n #setylabel = r\"$\\overline{\\rho}$ $\\overline{u''_x}$ (g cm$^{-2}$ s$^{-1}$)\"\n setylabel = r\"+$\\overline{\\rho' u'_x}$ (g cm$^{-2}$ s$^{-1}$)\"\n plt.xlabel(setxlabel)\n plt.ylabel(setylabel)\n elif self.ig == 2:\n setxlabel = r\"r (cm)\"\n setylabel = r\"$\\overline{\\rho}$ $\\overline{u''_r}$ (g cm$^{-2}$ s$^{-1}$)\"\n plt.xlabel(setxlabel)\n plt.ylabel(setylabel)\n\n # show LEGEND\n plt.legend(loc=ilg, prop={'size': 18})\n\n # create FIGURE\n # plt.figure(figsize=(7, 6))\n\n # format AXIS, make sure it is exponential\n # plt.gca().yaxis.get_major_formatter().set_powerlimits((0, 0))\n\n # set plot boundaries \n # to_plot = [plt1,plt2,plt3,plt4,plt5,plt6]\n # self.set_plt_axis(LAXIS,xbl,xbr,ybu,ybd,to_plot)\n\n # self.minus_dt_mm = -self.dt(t_mm,xzn0,t_timec,intc)\n # self.plus_dt_mm = -4.*np.pi*(self.xzn0**2.)*dd*fht_ux\n # self.plus_grad_mm = +self.Grad(mm,xzn0)\n\n # plot DATA \n # plt.plot(grd1, 1. / self.ux, color='brown', label=r\"$1/\\overline{u}_r$\")\n # plt.plot(grd1, 1. / self.fht_ux, color='r', label=r\"$1/\\widetilde{u}_r$\")\n # plt.plot(grd1,+self.eht_a_model1,color='g',label='model1')\n # plt.plot(grd1,(1./(self.ux)+(1./(self.fht_ux))),linestyle='--',color='b',label='xx')\n # plt.plot(grd1,1./(self.eht_a+self.fht_ux),color='pink',label='1/a')\n # plt.plot(grd1, -self.plus_grad_mm / self.plus_dt_mm, color='k', linestyle='--', label='drMM/dtMM')\n\n # show LEGEND\n # plt.legend(loc=ilg, prop={'size': 18})\n\n # display PLOT\n plt.show(block=False)\n\n\n plt.savefig('RESULTS/' + self.data_prefix + 'mean_a.png')\n plt.savefig('RESULTS/' + self.data_prefix + 'mean_a.eps')", "def __init__(self, fpga, plots, chann=6069, freqs=[0, 67.5], bw=67.5):\n\n self.fpga = fpga\n self.fpga.write_int('cnt_rst',0) #just in case\n self.plots = plots\n self.nplots = len(self.plots)\n self.chann = chann\n self.freq = freqs\n self.bw = bw\n self.fft_freq = np.linspace(0, bw, 2**13,endpoint=False)\n self.plot_map = {1:'11', 2:'12', 3:'22', 4:'22', 5:'23',\n 6:'23', 7: '33', 8:'33', 9:'33'}\n self.fig = plt.figure()\n self.axes = []\n self.data = [] \n \n #generate a dict for the specification of each plot\n #the info is encoded in [title, y_label, x_label,(y_init, y_end), (x_init, x_end), [brams], data_type]\n self.plot_info = {'spect0':['Spectrum ZDOK0', '[dB]', '[MHz]',\n (30, 180), (self.freq), ['1_A2'], '>8192Q'],\n 'spect1':['Spectrum ZDOK1', '[dB]', '[MHz]',\n (30, 180), (self.freq), ['1_B2'], '>8192Q'],\n 're_full':['Real correlation', '', '[MHz]',\n (30,180), (self.freq), ['AB_re'], '>8192q'],\n 'im_full':['Imag correlation', '', '[MHz]',\n (30,180), (self.freq), ['AB_im'], '>8192q'],\n 'phase':['Relative Phase', ('['+u'\\xb0'+']'), '[MHz]',\n (-180,180), (self.freq), ['AB_im', 'AB_re'], '>8192q'],\n 'chann_pow':['Relative Power at'+str(self.fft_freq[self.chann]),\n '[dB]','[MHz]',(-180,180), (0, 8191),\n ['PowA', 'PowB'], '>8192Q'],\n 'chann_phase':['Relative phase at'+str(self.fft_freq[self.chann]),\n ('['+u'\\xb0'+']'), '[MHz]',(-180,180), (0,8191),\n ['phase'], '>16384q']}\n\n\tself.fpga.write_int('mux_sel',0)\n\tself.fpga.write_int('n_points', 16384)\n\tself.fpga.write_int('reading_data',1)\n\tself.fpga.write_int('reading_data',0)\n\tself.create_plots()\n\tanim = animation.FuncAnimation(self.fig, self.animate, blit=True)\n\tplt.show()", "def plot5(self):\n\n cond = ((self.ds.freq<32.6) & (self.ds.freq>17))\n freq = self.ds.freq[cond]\n power = self.ds.power[cond]\n\n # the modes for KIC 9205705\n m = pd.read_csv('/home/mxs191/Desktop/MathewSchofield/TRG/GetData/Modes/modes_9205705.csv')\n m1 = [17.65, 20.6, 23.7, 26.9, 30.1] # l=1\n\n plt.rc('font', size=18)\n fig, ax = plt.subplots()\n plt.plot(freq, power, zorder=1, alpha=0.4)\n\n # NOTE: annotate mode angular degrees\n plt.scatter(m['f0'].as_matrix(), np.full(len(m), 150000), c='k', zorder=2, s=80)\n plt.scatter(m['f2'].as_matrix(), np.full(len(m), 130000), c='mediumseagreen', zorder=2, s=80, marker='^')\n plt.scatter(m1, np.full(len(m1), 140000), c='grey', zorder=2, s=80, marker='v')\n\n # NOTE: plot envelope\n numax = info['numax'].as_matrix() # mu Hz\n env_width = 0.66 * numax**0.88\n plt.plot(freq, 40004*np.exp( -( (freq-24.7)**2 / (2*7.**2) ) ), c='k', linestyle='--')\n\n # NOTE: annotate envelope\n style = dict(size=16, color='k')\n ax.text(24.1, 49167, r\"$\\nu_{\\rm max}$\", color='k', size=18)\n ax.text(24.1, 20994, r\"$\\Gamma_{\\rm env}$\", color='k', size=18)\n ax.text(23, 162944, r\"$\\Delta \\nu$\", **style)\n plt.annotate(s='', xy=(25.3, 158610), xytext=(21.91, 158610),\n arrowprops=dict(arrowstyle='<->')) # dnu\n plt.annotate(s='', xy=((24.7-env_width/2.), 15861), xytext=((24.7+env_width/2.), 15861),\n arrowprops=dict(arrowstyle='<->')) # env width\n\n ax.set_xlabel(r'Frequency ($\\rm \\mu Hz$)')\n ax.set_ylabel(r'PSD ($\\rm ppm^{2} \\, \\mu Hz^{-1}$)')\n plt.xlim(17, 32.6)\n plt.ylim(17, 195181)\n plt.tight_layout()\n plt.show()\n fig.savefig(os.getcwd() + '/DetTest1_plots/Plot5_ps_' + str(self.ds.epic) + '.pdf')", "def OnFloated(self, event):\n self._floating = True\n wx.PostEvent(self, wxDockPaneFloatedEvent())", "def setup_fermi(self):\n eventclass=5 # 2 (Source) or 5 (UltracleanVeto)\n eventtype=0 # 0 (all), 3 (bestpsf) or 5 (top3 quartiles)\n mask_type='top300'\n force_mask_at_bin_number=10\n\n self.f1 = fp.fermi_plugin(maps_dir,fermi_data_dir=fermi_data_dir,work_dir=work_dir,CTB_en_min=0,CTB_en_max=40,nside=self.nside,eventclass=eventclass,eventtype=eventtype,newstyle=1,data_July16=True)\n\n if mask_type != 'False':\n self.f1.make_ps_mask(mask_type = mask_type,force_energy = True,energy_bin = force_mask_at_bin_number)\n self.f1.add_diffuse_newstyle(comp = 'p7', eventclass = eventclass, eventtype = eventtype)\n self.f1.add_bubbles(comp='bubs') #bubbles\n self.f1.add_iso(comp='iso') #iso\n self.f1.add_ps_model(comp='ps_model')\n\n # Exposure correct J_map_arr\n self.J_map_arr *= self.f1.CTB_exposure_maps\n\n # Add J-factor map with mean 1 in each energy bin\n self.f1.add_template_by_hand('J_map',np.array([self.J_map_arr[i]/np.mean(self.J_map_arr[i]) for i in range(40)]))", "def plotArt(self):\n self.isArt=True\n warr=self.ws.value(self.xarr)\n asfarr=st.interpolate(warr, self.swarr, self.sfarr, left=0.0, right=0.0)\n asfarr=asfarr*self.farr.max()/asfarr.max()\n self.fpcurve,=self.axes.plot(self.xarr,asfarr,linewidth=0.5,linestyle='-',\n marker='None',color='r')", "def check_fit_gui(wls,fxc,trans):\n\n\n import sys\n import matplotlib.pyplot as plt\n from matplotlib.widgets import Slider, Button, RadioButtons, CheckButtons\n import lib.functions as fun\n import numpy as np\n\n M = molecfit_gui(wls,fxc,trans)\n\n #The slider to cycle through orders:\n rax_slider = plt.axes([0.8, 0.2, 0.1, 0.02])\n rax_slider.set_title('Order')\n M.spectrum_slider = Slider(rax_slider,'', 0,M.N-1,valinit=0,valstep=1)#Store the slider in the model class\n M.spectrum_slider.on_changed(M.slide_spectrum)\n\n #The Previous order button:\n rax_prev = plt.axes([0.8, 0.1, 0.04, 0.05])\n bprev = Button(rax_prev, ' <<< ')\n bprev.on_clicked(M.previous)\n\n #The Next order button:\n rax_next = plt.axes([0.86, 0.1, 0.04, 0.05])\n bnext = Button(rax_next, ' >>> ')\n bnext.on_clicked(M.next)\n\n #The save button:\n rax_save = plt.axes([0.92, 0.1, 0.07, 0.05])\n bsave = Button(rax_save, 'Continue')\n bsave.on_clicked(M.save)\n\n #The cancel button:\n rax_cancel = plt.axes([0.92, 0.025, 0.07, 0.05])\n bcancel = Button(rax_cancel, 'Cancel')\n bcancel.on_clicked(M.cancel)\n\n #This is to rescale the x-size of the checkboxes so that they are squares.\n bbox = M.fig.get_window_extent().transformed(M.fig.dpi_scale_trans.inverted())\n width, height = bbox.width*M.fig.dpi, bbox.height*M.fig.dpi\n\n\n M.selec=plt.axes([0.05,0.03,0.7,0.05*M.nrows])\n M.selec.spines['bottom'].set_color('white')\n M.selec.spines['top'].set_color('white')\n M.selec.spines['left'].set_color('white')\n M.selec.spines['right'].set_color('white')\n vlines = fun.findgen(M.N-1)+0.5\n\n row = M.nrows\n offset = 0\n for i in range(M.N):\n #print(i,float(i)-offset)\n\n if float(i)-offset > M.maxboxes-1.0:\n row -= 1\n offset += M.maxboxes\n M.selec.plot(float(i)-offset+np.array([-0.5,-0.5,0.5,0.5,-0.5]),[row,row-1,row-1,row,row],color='black')\n M.selec.text(float(i)-offset,row-0.5,'%s' % i,color='black',horizontalalignment='center',verticalalignment='center')\n\n\n\n M.selec.set_xlim(-0.55,M.maxboxes-1.0+0.55)#A little margin to make sure that the line thickness is included.\n M.selec.set_ylim(-0.05,1.0*M.nrows+0.05)\n #M.selec.set_yticklabels([])\n M.selec.xaxis.set_tick_params(labelsize=8)\n M.selec.yaxis.set_tick_params(labelsize=8)\n\n\n\n def select_spectrum_box(event):\n\n #This handles with a mouseclick in either of the three plots while in add mode.\n if event.inaxes in [M.selec]:#Check that it occurs in one of the subplots.\n cc = event.xdata*1.0#xdata is the column that is selected.\n cr = event.ydata*1.0\n spectrum = np.round(cc)+np.round((M.nrows-cr-0.5))*M.maxboxes\n if spectrum < M.N:\n if spectrum in M.selected:\n M.selected.remove(spectrum)\n print('---Removed spectrum %s from manual' % spectrum)\n else:\n M.selected.append(spectrum)\n print('---Added spectrum %s to manual' % spectrum)\n M.draw_crosses()\n M.click_connector = M.fig.canvas.mpl_connect('button_press_event',select_spectrum_box)#This is the connector that registers clicks\n\n plt.show()\n print('Closed GUI, returning.')\n return(M.selected)", "def cb_reset(event):\n axDirichlet.cla()\n # Reset Sliders\n sAlpha0.reset() # resetが駄目!一番最初に戻ってしまう\n sAlpha1.reset()\n sAlpha2.reset()\n alpha_update = [sAlpha0.val, sAlpha1.val, sAlpha2.val]\n print('alpha_update=', alpha_update)\n\n # ML\n lambda_ML = CatML.MLinfer(x_cat)\n\n axML.cla()\n drawBarGraph( axML, \"ML\", lambda_ML, bar_y_max, col_ML ) # Draw Bar graph\n\n\n # MAP\n dirichlet.set_param(alpha_update)\n lambda_MAP = CatMAP.MAPinfer(x_cat, dirichlet)\n\n axMAP.cla()\n drawBarGraph( axMAP, \"MAP\", lambda_MAP, bar_y_max, col_MAP ) # Draw Bar Graph\n\n # Bayes\n posteriorDirichlet.set_param(alpha_update)\n posteriorDirichlet.calcPosterior(x_cat)\n lambda_Bayes = np.zeros(3)\n for k in range(3):\n lambda_Bayes[k] = posteriorDirichlet.BayesInfer(k)\n\n axBayes.cla()\n drawBarGraph( axBayes, \"Bayes\", lambda_Bayes, bar_y_max, col_Bayes ) # Draw Bar Graph\n\n draw_pdf_contours(axDirichlet, dirichlet, True) # Draw Dirichlet\n\n print('Reset')\n print('lambda_ML =', lambda_ML)\n print('lambda_MAP =', lambda_MAP)\n print('lambda_Bayes=', lambda_Bayes)\n draw_point(axDirichlet, lambda_ML, col_ML)\n draw_point(axDirichlet, lambda_MAP, col_MAP)\n draw_point(axDirichlet, lambda_Bayes, col_Bayes)\n draw_point(axLikelihood, lambda_ML, col_ML)\n draw_point(axPosteriorDirichlet, lambda_MAP, col_MAP)\n draw_point(axPosteriorDirichlet, lambda_Bayes, col_Bayes)\n\n fig.canvas.draw_idle()", "def __init__(self, options, imgs, frq_sim_guess, otf=None,\n wiener_parameter=1, fbounds=(0.01, 1), fbounds_shift=(0.01, 1),\n use_wicker=True, normalize_histograms=True, background_counts=100,\n do_global_phase_correction=True, determine_amplitudes=False, find_frq_first=True,\n default_to_guess_on_bad_phase_fit=True, max_phase_err=20*np.pi/180,\n default_to_guess_on_low_mcnr=True, min_mcnr=1,\n size_near_fo_to_remove=0,\n phases_guess=None, mod_depths_guess=None, pspec_params_guess=None,\n use_fixed_phase=False, use_fixed_frq=False, use_fixed_mod_depths=False,\n plot_diagnostics=True, interactive_plotting=False, save_dir=None, figsize=(20, 10)):\n # #############################################\n # saving information\n # #############################################\n self.save_dir = save_dir\n self.hold_figs_open = False\n self.figsize = figsize\n\n if self.save_dir is not None:\n self.log_file = open(os.path.join(self.save_dir, \"sim_log.txt\"), 'w')\n else:\n self.log_file = None\n\n # #############################################\n # setup plotting\n # #############################################\n if not interactive_plotting:\n plt.ioff()\n plt.switch_backend(\"agg\")\n\n # #############################################\n # analysis settings\n # #############################################\n self.wiener_parameter = wiener_parameter\n self.use_wicker = use_wicker\n self.global_phase_correction = do_global_phase_correction\n self.normalize_histograms = normalize_histograms\n self.size_near_fo_to_remove = size_near_fo_to_remove\n self.default_to_guess_on_bad_phase_fit = default_to_guess_on_bad_phase_fit\n self.max_phase_error = max_phase_err\n self.default_to_guess_on_low_mcnr = default_to_guess_on_low_mcnr\n self.min_mcnr = min_mcnr\n self.determine_amplitudes = determine_amplitudes\n self.use_fixed_phase = use_fixed_phase\n self.use_fixed_frq = use_fixed_frq\n self.use_fixed_mod_depths = use_fixed_mod_depths\n self.find_frq_first = find_frq_first\n self.plot_diagnostics = plot_diagnostics\n\n # #############################################\n # images\n # #############################################\n self.background_counts = background_counts\n self.imgs = imgs.astype(np.float64)\n self.nangles, self.nphases, self.ny, self.nx = imgs.shape\n \n # #############################################\n # get basic parameters\n # #############################################\n self.dx = options['pixel_size']\n self.dy = options['pixel_size']\n self.na = options['na']\n self.wavelength = options['wavelength']\n\n self.fmax = 1 / (0.5 * self.wavelength / self.na)\n self.fbounds = fbounds\n self.fbounds_shift = fbounds_shift\n\n self.frqs_guess = frq_sim_guess\n self.phases_guess = phases_guess\n self.mod_depths_guess = mod_depths_guess\n self.power_spectrum_params_guess = pspec_params_guess\n\n # #############################################\n # get frequency data and OTF\n # #############################################\n self.fx = tools.get_fft_frqs(self.nx, self.dx)\n self.fy = tools.get_fft_frqs(self.ny, self.dy)\n\n if otf is None:\n otf = psf.circ_aperture_otf(self.fx[None, :], self.fy[:, None], self.na, self.wavelength)\n self.otf = otf\n\n # #############################################\n # print current time\n # #############################################\n now = datetime.datetime.now()\n\n self.print_tee(\"####################################################################################\", self.log_file)\n self.print_tee(\"%d/%02d/%02d %02d:%02d:%02d\" % (now.year, now.month, now.day, now.hour, now.minute, now.second), self.log_file)\n self.print_tee(\"####################################################################################\", self.log_file)\n\n # #############################################\n # normalize histograms for input images\n # #############################################\n if self.normalize_histograms:\n tstart = time.process_time()\n\n for ii in range(self.nangles):\n for jj in range(1, self.nphases):\n self.imgs[ii, jj] = match_histograms(self.imgs[ii, jj], self.imgs[ii, 0])\n\n tend = time.process_time()\n self.print_tee(\"Normalizing histograms took %0.2fs\" % (tend - tstart), self.log_file)\n\n # #############################################\n # remove background\n # #############################################\n self.imgs = self.imgs - self.background_counts\n self.imgs[self.imgs <= 0] = 1e-12\n\n # #############################################\n # Fourier transform SIM images\n # #############################################\n tstart = time.process_time()\n\n self.imgs_ft = np.zeros((self.nangles, self.nphases, self.ny, self.nx), dtype=np.complex)\n for jj in range(self.nangles):\n for kk in range(self.nphases):\n # use periodic/smooth decomposition instead of traditional apodization\n img_to_xform, _ = psd.periodic_smooth_decomp(self.imgs[jj, kk])\n self.imgs_ft[jj, kk] = fft.fftshift(fft.fft2(fft.ifftshift(img_to_xform)))\n\n tend = time.process_time()\n\n self.print_tee(\"FT images took %0.2fs\" % (tend - tstart), self.log_file)\n\n # #############################################\n # get widefield image\n # #############################################\n tstart = time.process_time()\n\n self.widefield = get_widefield(self.imgs)\n wf_to_xform, _ = psd.periodic_smooth_decomp(self.widefield)\n self.widefield_ft = fft.fftshift(fft.fft2(fft.ifftshift(wf_to_xform)))\n\n tend = time.process_time()\n self.print_tee(\"Computing widefield image took %0.2fs\" % (tend - tstart), self.log_file)\n\n # #############################################\n # get optically sectioned image\n # #############################################\n tstart = time.process_time()\n\n sim_os = np.zeros((self.nangles, self.imgs.shape[-2], self.imgs.shape[-1]))\n for ii in range(self.nangles):\n sim_os[ii] = sim_optical_section(self.imgs[ii])\n # todo: maybe want to weight by power/mod depth?\n self.imgs_os = np.mean(sim_os, axis=0)\n\n tend = time.process_time()\n self.print_tee(\"Computing OS image took %0.2fs\" % (tend - tstart), self.log_file)", "def trigger_set_fetc(self):\n self.write(\"*DDT #15FETC?\")", "def show_axes(self):\n if hasattr(self, 'axes_widget'):\n self.axes_widget.EnabledOn()\n self.axes_widget.SetCurrentRenderer(self)\n else:\n self.add_axes()\n self.Modified()", "def plot(self):\n\t\t\n\t\ttf=tfData(self.shotno,tStart=None,tStop=None)\n\t\t\n\t\t_plt.figure()\n\t\tax1 = _plt.subplot2grid((3,2), (0,1), rowspan=3) #tf\n\t\tax2 = _plt.subplot2grid((3,2), (0,0)) #vf\n\t\tax3 = _plt.subplot2grid((3,2), (1,0),sharex=ax2) #oh\n\t\tax4 = _plt.subplot2grid((3,2), (2, 0),sharex=ax2) #sh\n\t\tfig=_plt.gcf()\n\t\tfig.set_size_inches(10,5)\n\t\t\t\t\n\t\ttStart=-2\n\t\ttStop=20\n\t\t\n\t\tax1.plot(tf.time*1e3,tf.tfBankField)\n\t\tax1.axvspan(tStart,tStop,color='r',alpha=0.3)\n\t\t_plot.finalizeSubplot(ax1,xlabel='Time (s)',xlim=[-150,450],ylabel='TF Field (T)')#,title=self.title\n\t\t\n\t\tax2.plot(self.vfTime*1e3,self.vfBankCurrent*1e-3)\n\t\t_plot.finalizeSubplot(ax2,ylabel='VF Current\\n(kA)')\n\t\t\n\t\tax3.plot(self.ohTime*1e3,self.ohBankCurrent*1e-3)\n\t\t_plot.finalizeSubplot(ax3,ylim=[-20,30],ylabel='OH Current\\n(kA)')\n\t\t\n\t\tax4.plot(self.shTime*1e3,self.shBankCurrent*1e-3)\n\t\t_plot.finalizeSubplot(ax4,ylim=[tStart,tStop],xlabel='Time (s)',ylabel='SH Current\\n(kA)')\n\t\t\n\t\t_plot.finalizeFigure(fig,title=self.title)\n#\t\tfig.set_tight_layout(True)\n\t\t\n\t\treturn fig", "def main(self):\n if self.mode == 0: # drawing\n pass\n elif self.mode == 1: # construction\n if self.step > self.max_step:\n self.mode = 2\n else:\n self.construction = Fourier.build(self.coefficients, self.time)\n self.display.append(self.construction[-1])\n if not self.pause:\n self.step += 1\n elif self.mode == 2: # display\n pass", "def fdplot(self, imx):\n fig = plt.figure()\n maxval = np.max(imx)\n ims = list(map(lambda im: [plt.imshow(np.fabs(im),norm=colors.Normalize(0.0,maxval))], imx))\n animation = anim.ArtistAnimation(fig,ims,interval=50)\n plt.show()", "def main():\n save = False\n show = True\n\n #hd_parameter_plots = HDparameterPlots(save=save)\n #hd_parameter_plots.flow_parameter_distribution_for_non_lake_cells_for_current_HD_model()\n #hd_parameter_plots.flow_parameter_distribution_current_HD_model_for_current_HD_model_reprocessed_without_lakes_and_wetlands()\n #hd_parameter_plots.flow_parameter_distribution_ten_minute_data_from_virna_0k_ALG4_sinkless_no_true_sinks_oceans_lsmask_plus_upscale_rdirs()\n #hd_parameter_plots.flow_parameter_distribution_ten_minute_data_from_virna_0k_ALG4_sinkless_no_true_sinks_oceans_lsmask_plus_upscale_rdirs_no_tuning()\n #ice5g_comparison_plots = Ice5GComparisonPlots(save=save)\n #ice5g_comparison_plots.plotLine()\n #ice5g_comparison_plots.plotFilled()\n #ice5g_comparison_plots.plotCombined()\n #ice5g_comparison_plots.plotCombinedIncludingOceanFloors()\n #flowmapplot = FlowMapPlots(save)\n #flowmapplot.FourFlowMapSectionsFromDeglaciation()\n #flowmapplot.Etopo1FlowMap()\n #flowmapplot.ICE5G_data_all_points_0k()\n #flowmapplot.ICE5G_data_all_points_0k_no_sink_filling()\n #flowmapplot.ICE5G_data_all_points_0k_alg4_two_color()\n #flowmapplot.ICE5G_data_all_points_21k_alg4_two_color()\n #flowmapplot.Etopo1FlowMap_two_color()\n #flowmapplot.Etopo1FlowMap_two_color_directly_upscaled_fields()\n #flowmapplot.Corrected_HD_Rdirs_FlowMap_two_color()\n #flowmapplot.ICE5G_data_ALG4_true_sinks_21k_And_ICE5G_data_ALG4_true_sinks_0k_FlowMap_comparison()\n #flowmapplot.Corrected_HD_Rdirs_And_Etopo1_ALG4_sinkless_directly_upscaled_fields_FlowMap_comparison()\n #flowmapplot.Corrected_HD_Rdirs_And_Etopo1_ALG4_true_sinks_directly_upscaled_fields_FlowMap_comparison()\n #flowmapplot.Corrected_HD_Rdirs_And_ICE5G_data_ALG4_sinkless_0k_directly_upscaled_fields_FlowMap_comparison()\n #flowmapplot.Corrected_HD_Rdirs_And_ICE5G_data_ALG4_true_sinks_0k_directly_upscaled_fields_FlowMap_comparison()\n #flowmapplot.Corrected_HD_Rdirs_And_ICE5G_data_ALG4_corr_orog_0k_directly_upscaled_fields_FlowMap_comparison()\n #flowmapplot.Corrected_HD_Rdirs_And_ICE5G_data_ALG4_corr_orog_downscaled_ls_mask_0k_directly_upscaled_fields_FlowMap_comparison()\n #flowmapplot.Corrected_HD_Rdirs_And_ICE5G_data_ALG4_no_true_sinks_corr_orog_0k_directly_upscaled_fields_FlowMap_comparison()\n #flowmapplot.Corrected_HD_Rdirs_And_ICE5G_HD_as_data_ALG4_true_sinks_0k_directly_upscaled_fields_FlowMap_comparison()\n #flowmapplot.Upscaled_Rdirs_vs_Directly_Upscaled_fields_ICE5G_data_ALG4_corr_orog_downscaled_ls_mask_0k_FlowMap_comparison()\n #flowmapplot.Ten_Minute_Data_from_Virna_data_ALG4_corr_orog_downscaled_lsmask_no_sinks_21k_vs_0k_FlowMap_comparison()\n #flowmapplot.Upscaled_Rdirs_vs_Corrected_HD_Rdirs_ICE5G_data_ALG4_corr_orog_downscaled_ls_mask_0k_FlowMap_comparison()\n flowmapplotwithcatchment = FlowMapPlotsWithCatchments(save)\n #flowmapplotwithcatchment.Upscaled_Rdirs_vs_Corrected_HD_Rdirs_ICE5G_data_ALG4_corr_orog_downscaled_ls_mask_0k_FlowMap_comparison()\n #flowmapplotwithcatchment.compare_present_day_and_lgm_river_directions_with_catchments_virna_data_plus_tarasov_style_orog_corrs_for_both()\n #flowmapplotwithcatchment.compare_present_day_river_directions_with_catchments_virna_data_with_vs_without_tarasov_style_orog_corrs()\n #flowmapplotwithcatchment.compare_lgm_river_directions_with_catchments_virna_data_with_vs_without_tarasov_style_orog_corrs()\n #flowmapplotwithcatchment.Upscaled_Rdirs_vs_Corrected_HD_Rdirs_tarasov_upscaled_data_ALG4_corr_orog_downscaled_ls_mask_0k_FlowMap_comparison()\n #flowmapplotwithcatchment.upscaled_rdirs_with_and_without_tarasov_upscaled_data_ALG4_corr_orog_downscaled_ls_mask_0k_FlowMap_comparison()\n #flowmapplotwithcatchment.\\\n #upscaled_rdirs_with_and_without_tarasov_upscaled_north_america_only_data_ALG4_corr_orog_downscaled_ls_mask_0k_FlowMap_comparison()\n #flowmapplotwithcatchment.\\\n #Upscaled_Rdirs_vs_Corrected_HD_Rdirs_tarasov_upscaled_north_america_only_data_ALG4_corr_orog_downscaled_ls_mask_0k_FlowMap_comparison()\n #flowmapplotwithcatchment.\\\n #Upscaled_Rdirs_vs_Corrected_HD_Rdirs_tarasov_upscaled_north_america_only_data_ALG4_corr_orog_glcc_olson_lsmask_0k_FlowMap_comparison()\n #flowmapplotwithcatchment.compare_present_day_and_lgm_river_directions_with_catchments_ICE5G_plus_tarasov_style_orog_corrs_for_both()\n #flowmapplotwithcatchment.compare_present_day_and_lgm_river_directions_with_catchments_ICE6G_plus_tarasov_style_orog_corrs_for_both()\n #flowmapplotwithcatchment.compare_ICE5G_and_ICE6G_with_catchments_tarasov_style_orog_corrs_for_both()\n #flowmapplotwithcatchment.compare_river_directions_with_dynriver_corrs_and_MERIThydro_derived_corrs()\n #flowmapplotwithcatchment.compare_river_directions_with_dynriver_corrs_and_MERIThydro_derived_corrs_original_ts()\n flowmapplotwithcatchment.compare_river_directions_with_dynriver_corrs_and_MERIThydro_derived_corrs_new_ts_10min()\n outflowplots = OutflowPlots(save)\n #outflowplots.Compare_Upscaled_Rdirs_vs_Directly_Upscaled_fields_ICE5G_data_ALG4_corr_orog_downscaled_ls_mask_0k()\n #outflowplots.Compare_Corrected_HD_Rdirs_And_ICE5G_as_HD_data_ALG4_sinkless_all_points_0k()\n #outflowplots.Compare_Corrected_HD_Rdirs_And_ICE5G_as_HD_data_ALG4_true_sinks_all_points_0k()\n #outflowplots.Compare_Corrected_HD_Rdirs_And_ICE5G_ALG4_sinkless_all_points_0k_directly_upscaled_fields()\n #outflowplots.Compare_Corrected_HD_Rdirs_And_ICE5G_ALG4_true_sinks_all_points_0k_directly_upscaled_fields()\n #outflowplots.Compare_Corrected_HD_Rdirs_And_ICE5G_ALG4_corr_orog_all_points_0k_directly_upscaled_fields()\n #outflowplots.Compare_Corrected_HD_Rdirs_And_ICE5G_ALG4_corr_orog_downscaled_ls_mask_all_points_0k_directly_upscaled_fields()\n #outflowplots.Compare_Corrected_HD_Rdirs_And_Etopo1_ALG4_sinkless_directly_upscaled_fields()\n #outflowplots.Compare_Corrected_HD_Rdirs_And_Etopo1_ALG4_true_sinks_directly_upscaled_fields()\n #outflowplots.Compare_Corrected_HD_Rdirs_And_ICE5G_plus_tarasov_upscaled_srtm30_ALG4_corr_orog_0k_directly_upscaled_fields()\n #outflowplots.Compare_Original_Corrections_vs_Upscaled_MERIT_DEM_0k()\n outflowplots.Compare_Original_Corrections_vs_Upscaled_MERIT_DEM_0k_new_truesinks()\n #outflowplots.Compare_Original_Corrections_vs_Upscaled_MERIT_DEM_0k_new_truesinks_individual_rivers()\n #outflowplots.Compare_ICE5G_with_and_without_tarasov_upscaled_srtm30_ALG4_corr_orog_0k_directly_upscaled_fields()\n #hd_output_plots = HDOutputPlots()\n #hd_output_plots.check_water_balance_of_1978_for_constant_forcing_of_0_01()\n #hd_output_plots.plot_comparison_using_1990_rainfall_data()\n #hd_output_plots.plot_comparison_using_1990_rainfall_data_adding_back_to_discharge()\n #coupledrunoutputplots = CoupledRunOutputPlots(save=save)\n #coupledrunoutputplots.ice6g_rdirs_lgm_run_discharge_plot()\n #coupledrunoutputplots.extended_present_day_rdirs_lgm_run_discharge_plot()\n #coupledrunoutputplots.ocean_grid_extended_present_day_rdirs_vs_ice6g_rdirs_lgm_run_discharge_plot()\n #coupledrunoutputplots.extended_present_day_rdirs_vs_ice6g_rdirs_lgm_echam()\n #coupledrunoutputplots.extended_present_day_rdirs_vs_ice6g_rdirs_lgm_mpiom_pem()\n #lake_plots = LakePlots()\n #lake_plots.plotLakeDepths()\n #lake_plots.LakeAndRiverMap()\n #lake_plots.LakeAndRiverMaps()\n if show:\n plt.show()", "def main(argv=None):\n\n if argv is None:\n argv = sys.argv\n\n parser = E.OptionParser(\n version=\"%prog version: $Id: gff2plot.py 2781 2009-09-10 11:33:14Z andreas $\", usage=globals()[\"__doc__\"])\n\n parser.add_option(\"-f\", \"--file\", dest=\"filenames\", type=\"string\",\n help=\"files[s] to take data from,stdin = -.\")\n parser.add_option(\"\", \"--symbols\", dest=\"symbols\", type=\"string\",\n help=\"symbols to use for each histogram [steps|...].\")\n parser.add_option(\"--slide-show\", dest=\"slide_show\", type=\"choice\",\n choices=(\"first\", \"all\", \"sequence\"),\n help=\"do a slide show - otherwise, write image to file.\")\n parser.add_option(\"--config\", dest=\"filename_config\", type=\"string\",\n help=\"filename of track configuration file.\")\n parser.add_option(\"--dpi\", dest=\"dpi\", type=\"int\",\n help=\"dpi for hardcopy output.\")\n parser.add_option(\"--window-size\", dest=\"window_size\", type=\"int\",\n help=\"window-size.\")\n parser.add_option(\"--output-filename-pattern\", dest=\"output_pattern_image\", type=\"string\",\n help=\"output pattern for images. Should contain a '%(contig)s' pattern .\")\n parser.add_option(\"--global-colours\", dest=\"global_colours\", action=\"store_true\",\n help=\"cycle through colours for all tracks.\")\n\n parser.set_defaults(\n filenames=None,\n symbols=\"k-,b-,r-,c-,m-,y-,g-\",\n output_pattern_image=\"%(contig)s.png\",\n slide_show=None,\n window_size=None,\n filename_config=None,\n dpi=None,\n global_colours=False,\n )\n\n (options, args) = E.Start(parser)\n options.symbols = options.symbols.split(\",\")\n\n #--------------------------------------------------------\n # collect all the data\n # list of data per source and contig\n tracks = {}\n extra_features = {}\n\n if options.filenames:\n options.filenames = options.filenames.split(\",\")\n\n if len(args) > 0:\n options.filenames = args\n\n if options.filenames:\n\n for filename in options.filenames:\n\n if filename == \"-\":\n infile = sys.stdin\n else:\n infile = IOTools.openFile(filename)\n\n data = readData(infile)\n\n if filename != \"-\":\n infile.close()\n\n track[filename] = Track(title=filename, data=data)\n\n elif options.filename_config:\n # get track information from config file\n config = ConfigParser.ConfigParser()\n config.read(os.path.expanduser(options.filename_config))\n\n # first extract special sections\n for section in config.sections():\n if section == \"vlines\":\n infile = IOTools.openFile(config.get(section, \"filename\"), \"r\")\n data = readData(infile)\n infile.close()\n extra_features[section] = Track(title=section,\n data=data,\n config=config)\n config.remove_section(section)\n elif section in (\"figure\", \"legend\"):\n extra_features[section] = Track(title=section,\n data=None,\n config=config)\n config.remove_section(section)\n n = 0\n for section in config.sections():\n\n if config.has_option(section, \"filename\"):\n infile = IOTools.openFile(config.get(section, \"filename\"), \"r\")\n data = readData(infile)\n infile.close()\n\n tracks[section] = Track(title=section,\n data=data,\n priority=n,\n config=config)\n\n elif config.has_option(section, \"tracks\"):\n subtracks = config.get(section, \"tracks\")\n subtracks = map(lambda x: x.strip(), subtracks.split(\",\"))\n\n tracks[section] = Track(title=section,\n data=None,\n config=config,\n priority=n,\n subtracks=subtracks)\n n += 1\n\n # compile set of all contigs\n contigs = set()\n for track in tracks.values():\n if track.mData:\n contigs = contigs.union(track.mData.keys())\n\n # re-arrange tracks and subtracks\n tracks = layoutTracks(tracks)\n\n nplots = 0\n figures = []\n legend = None\n for contig in contigs:\n figure, l = plotContig(contig, tracks, options,\n plot_legend=legend is None,\n extra_features=extra_features)\n figures.append(figure)\n if l:\n legend = l\n\n if options.slide_show:\n if options.slide_show == \"first\":\n pylab.show()\n elif options.slide_show == \"all\":\n pylab.show()\n elif options.slide_show == \"sequence\":\n pylab.show()\n else:\n\n extra_args = {}\n if options.dpi:\n extra_args['dpi'] = options.dpi\n\n for contig, figure in zip(contigs, figures):\n params = {'contig': contig}\n filename = options.output_pattern_image % params\n E.info(\"# creating image: %s\" % filename)\n figure.savefig(os.path.expanduser(filename), **extra_args)\n if legend:\n params = {'contig': \"legend\"}\n filename = options.output_pattern_image % params\n E.info(\"creating image: %s\" % filename)\n legend.savefig(os.path.expanduser(filename), **extra_args)\n\n E.info(\"ninput=%i, ncontigs=%i, nplots=%i\" %\n (len(tracks), nplots, len(contigs)))\n\n E.Stop()", "def toggle_draw_axes(self):\n if self.draw_axes:\n self.draw_axes = False\n else:\n self.draw_axes = True\n self.redraw()", "def toggle_draw_axes(self):\n if self.draw_axes:\n self.draw_axes = False\n else:\n self.draw_axes = True\n self.redraw()", "def _setFig(self):\n self.p.background_fill_color = grey['light']\n self.p.xgrid.grid_line_color = None\n self.p.ygrid.grid_line_color = None\n self.p.ygrid.grid_line_dash = 'dotted'\n self.p.ygrid.grid_line_dash = 'dotted'\n\n self.p.xgrid.minor_grid_line_color = grey['median']\n self.p.ygrid.minor_grid_line_color = grey['median']\n self.p.xgrid.minor_grid_line_dash = 'dotted'\n self.p.ygrid.minor_grid_line_dash = 'dotted'\n\n self.p.xaxis.axis_label = \"tsne_feature_0\"\n self.p.yaxis.axis_label = \"tsne_feature_1\"", "def set_axes(self,pdf=None,sfr=(-6,2,100),vout=(0,4,500),cs=(0,4,500),verbose=False):\n if pdf is not None:\n if verbose:\n print('Setting up from simulation PDF...')\n attrs = pdf.attrs\n u = pdf.logvout.data\n x1,x2,dbin = u.min(), u.max(), attrs['dbin']\n print(' u in ({:.1f},{:.1f}) with du = {:.2f}'.format(x1,x2,dbin))\n w = pdf.logcs.data\n x1,x2,dbin = w.min(), w.max(), attrs['dbin']\n print(' w in ({:.1f},{:.1f}) with dw = {:.2f}'.format(x1,x2,dbin))\n print(' Sigma_SFR = {:.3g},'.format(attrs['sfr']), end=' ')\n print('ZISM = {:.3g}'.format(attrs['ZISM']))\n for fl in ['Mpdf','ppdf','Epdf','Zpdf']:\n # log value of 2.e4 and 5.5e5\n T1,T2=(1.1854266752455402,1.9121660193614398)\n c=pdf[fl].sel(logcs=slice(0,T1)).sum().data*dbin**2\n i=pdf[fl].sel(logcs=slice(T1,T2)).sum().data*dbin**2\n h=pdf[fl].sel(logcs=slice(T2,4)).sum().data*dbin**2\n t=pdf[fl].sel().sum().data*dbin**2\n msg = ' {:5s}:'.format(fl)\n for ph, fph in zip(['cool','int','hot','total'],[c,i,h,t]):\n msg += ' {}={:.3f}'.format(ph,fph)\n print(msg)\n self.logvout = pdf.logvout\n self.logcs = pdf.logcs\n self.dlogvout = pdf.attrs['dbin']\n self.dlogcs = pdf.attrs['dbin']\n self.sfr = pdf.attrs['sfr']\n self.logsfr = np.log10(self.sfr)\n self.vout = 10.**self.logvout\n self.cs = 10.**self.logcs\n self.params['ZISM0']=pdf.attrs['ZISM']\n else:\n ranges=dict(cs=cs,vout=vout)\n if hasattr(sfr, '__len__'):\n if len(sfr) == 3:\n ranges['sfr']=sfr\n else:\n raise ValueError('sfr should either be an array/list/tuple of'+\n 'three elements (log min, log max, N), '+\n 'but len(sfr)={}'.format(len(sfr)))\n else: # scalar\n self.sfr=sfr\n if sfr>0:\n self.logsfr=np.log10(sfr)\n else:\n raise ValueError('sfr must be positive, but sfr={}'.format(sfr))\n if verbose: print('sfr={}'.format(sfr))\n\n for f in ranges:\n if len(ranges[f]) != 3:\n raise ValueError('{} should either be array-like with '.format(f)+\n 'three elements (log min, log max, N), '+\n 'but len({})={}'.format(f,len(ranges[f])))\n\n x1,x2,N = ranges[f]\n if verbose: print('{}: min={}, max={}, N={}'.format(f,x1,x2,N))\n x = np.linspace(x1,x2,N)\n x_da = xr.DataArray(x,coords=[x],dims=['log'+f])\n setattr(self,'dlog'+f,x[1]-x[0])\n setattr(self,'log'+f,getattr(x_da,'log'+f))\n setattr(self,f,10.**getattr(self,'log'+f))\n\n self.u = self.logvout\n self.w = self.logcs\n self.vBz = np.sqrt(5.0*self.cs**2+self.vout**2)\n self.Mach = 1/self.cs*self.vout", "def set_flammable(self, f):\n self.flammable = f", "def update(self):\n self.plot.draw()\n \n func=str(self.edit1b.currentText())\n if self.win.test()==0:\n x=np.linspace(0,10,200)\n elif self.win.test()==1:\n x=np.linspace(0,0.40,200)\n \n pattern1=r'Steel'\n pattern2=r'Aluminium'\n pattern3=r'[\\d]+'\n \n if (func!='Comparison Chart'):\n self.edit2b.setDisabled(False)\n self.edit3b.setDisabled(False)\n self.edit4b.setDisabled(False)\n if (func=='Quenched/Tempered Steel'):\n alpha = 0.0025\n elif (func=='Annealed Steel'):\n alpha = 0.01\n elif (func=='Steel (input Su)'):\n S = str(self.edit2b.text())\n if (self.win.test()==0):\n S = str(float(S)/6.895)\n alpha = notch.alpha(eval(S))\n elif (func=='Aluminium Alloy 356.0 as cast'):\n rho = 0.08\n elif (func=='Aluminium Alloy 6061'):\n rho = 0.025\n elif (func=='Aluminium Alloy 7075'):\n rho = 0.015\n elif (func=='Material dropdown'):\n pass\n \n y1=[]\n if re.search(pattern1,func):\n Su=notch.su_s(alpha)\n if (self.win.test()==0):\n Su = Su*6.895\n for i in range(len(x)):\n y1.append(notch.nsp(alpha,x[i],self.win.test()))\n y=np.asarray(y1)\n if (re.search(pattern3,str(self.edit3b.text()))):\n r=eval(str(self.edit3b.text()))\n self.edit4b.setText(str(notch.nsp(alpha,r,self.win.test())))\n elif re.search(pattern2,func):\n Su=notch.su_a(rho)\n if (self.win.test()==0):\n Su = Su*6.895\n for i in range(len(x)):\n y1.append(notch.nsn(rho,x[i],self.win.test()))\n y=np.asarray(y1)\n if (re.search(pattern3,str(self.edit3b.text()))):\n r=eval(str(self.edit3b.text()))\n self.edit4b.setText(str(notch.nsn(rho,r,self.win.test())))\n \n self.edit2b.setText(str(Su))\n func1 = 'Steel (Su='+str(self.edit2b.text())+')'\n if (func!='Steel (input Su)'):\n self.plot.redraw(x,y,func, self.xlabel)\n elif (func=='Steel (input Su)'):\n self.plot.redraw(x,y,func1, self.xlabel)\n \n elif (func=='Comparison Chart'):\n self.edit2b.setText(\"\")\n self.edit2b.setDisabled(True)\n self.edit3b.setText(\"\")\n self.edit3b.setDisabled(True)\n self.edit4b.setText(\"\")\n self.edit4b.setDisabled(True)\n self.plot.draw_comp(self.xlabel, self.win.test())", "def test_draw():\n circ_m = test_QFTn(3)\n print(launch(1024, circ_m))\n fig = circ_m.draw(output='mpl', filename='C:/Users/RaphaelLambert/Pictures/test.png')\n return fig", "def fullcore_detectors():\n\n cwd = os.getcwd()\n fname = get_sample_data('%s/oecd-fullcore_geom1.png' % (cwd))\n im = plt.imread(fname)\n\n # crop the image\n height, width, color = np.shape(im)\n y1 = int(height*0.15)\n y2 = int(height*0.6)\n x1 = int(width*0.45)\n x2 = int(width)\n plt.imshow(im[y1:y2,x1:x2,:])\n plt.axis('off')\n\n # Axial 1\n x = 158\n y = 291\n P = 55\n s = P/2/np.cos(np.pi/6)\n plt.plot([s+x, 2*s+x], [0+y, 0+y], 'r-', lw=1.5, label='1- Axial1')\n plt.plot([s+x, 2*s+x], [P+y, P+y], 'r-', lw=1.5)\n plt.plot([s+x, s/2+x], [0+y, P/2+y], 'r-', lw=1.5)\n plt.plot([s/2+x, s+x], [P/2+y, P+y], 'r-', lw=1.5)\n plt.plot([2*s+x, 2*s+s/2+x], [0+y, P/2+y], 'r-', lw=1.5)\n plt.plot([2*s+s/2+x, 2*s+x], [P/2+y, P+y], 'r-', lw=1.5)\n\n plt.text(x=x+37, y=y+40, s='1', fontsize=20, color='w')\n\n # Axial 2\n x = 210\n y = 321\n P = 55\n s = P/2/np.cos(np.pi/6)\n plt.plot([s+x, 2*s+x], [0+y, 0+y], 'r-', lw=1.5, label='2- Axial2')\n plt.plot([s+x, 2*s+x], [P+y, P+y], 'r-', lw=1.5)\n plt.plot([s+x, s/2+x], [0+y, P/2+y], 'r-', lw=1.5)\n plt.plot([s/2+x, s+x], [P/2+y, P+y], 'r-', lw=1.5)\n plt.plot([2*s+x, 2*s+s/2+x], [0+y, P/2+y], 'r-', lw=1.5)\n plt.plot([2*s+s/2+x, 2*s+x], [P/2+y, P+y], 'r-', lw=1.5)\n plt.text(x=x+37, y=y+40, s='2', fontsize=20, color='w')\n\n # Axial 3\n x = 262\n y = 291\n P = 55\n s = P/2/np.cos(np.pi/6)\n plt.plot([s+x, 2*s+x], [0+y, 0+y], 'r-', lw=1.5, label='3- Axial3')\n plt.plot([s+x, 2*s+x], [P+y, P+y], 'r-', lw=1.5)\n plt.plot([s+x, s/2+x], [0+y, P/2+y], 'r-', lw=1.5)\n plt.plot([s/2+x, s+x], [P/2+y, P+y], 'r-', lw=1.5)\n plt.plot([2*s+x, 2*s+s/2+x], [0+y, P/2+y], 'r-', lw=1.5)\n plt.plot([2*s+s/2+x, 2*s+x], [P/2+y, P+y], 'r-', lw=1.5)\n\n plt.text(x=x+37, y=y+40, s='3', fontsize=20, color='w')\n\n # Radial 1\n x = 52\n y = 349\n plt.plot([x, 495+x], [y, y], 'r-', lw=1.5, label='4- Radial1')\n plt.plot([x, 495+x], [y, y], 'r-', lw=1.5, label='5- Radial2')\n plt.text(x=x+380, y=y-10, s='4, 5', fontsize=20, color='black')\n\n # Radial 2\n x = 52\n y = 349\n L = 495\n plt.plot([x, L*np.cos(np.pi/6)+x], [y, -L/2+y], 'r-', lw=1.5, label='6- Radial3')\n plt.text(x=350, y=y-200, s='6', rotation=30, fontsize=20, color='black')\n plt.legend(loc='best')\n\n plt.savefig(\"oecd-fullcore-detectors\", dpi=300, bbox_inches=\"tight\")", "def plot(self, fig=None, ax=None,\n curve=True, control_points=True, frenet_serret=False, axis_off=False, ticks_off=False):\n\n if fig is None:\n\n # One dimension (law of evolution)\n if self.ndim == 1:\n fig = plt.figure(figsize=(6, 5))\n ax = fig.add_subplot(111)\n ax.set_xlabel('$u$ parameter', fontsize=12, color='k', labelpad=12)\n ax.set_ylabel('NURBS curve value', fontsize=12, color='k', labelpad=12)\n # ax_xy.xaxis.set_major_formatter(mpl.ticker.FormatStrFormatter('%.1f'))\n # ax_xy.yaxis.set_major_formatter(mpl.ticker.FormatStrFormatter('%.1f'))\n for t in ax.xaxis.get_major_ticks(): t.label.set_fontsize(12)\n for t in ax.yaxis.get_major_ticks(): t.label.set_fontsize(12)\n if ticks_off:\n ax.set_xticks([])\n ax.set_yticks([])\n if axis_off:\n ax.axis('off')\n\n # Two dimensions (plane curve)\n elif self.ndim == 2:\n fig = plt.figure(figsize=(6, 5))\n ax = fig.add_subplot(111)\n ax.set_xlabel('$x$ axis', fontsize=12, color='k', labelpad=12)\n ax.set_ylabel('$y$ axis', fontsize=12, color='k', labelpad=12)\n # ax_xy.xaxis.set_major_formatter(mpl.ticker.FormatStrFormatter('%.1f'))\n # ax_xy.yaxis.set_major_formatter(mpl.ticker.FormatStrFormatter('%.1f'))\n for t in ax.xaxis.get_major_ticks(): t.label.set_fontsize(12)\n for t in ax.yaxis.get_major_ticks(): t.label.set_fontsize(12)\n if ticks_off:\n ax.set_xticks([])\n ax.set_yticks([])\n if axis_off:\n ax.axis('off')\n\n # Three dimensions (space curve)\n elif self.ndim == 3:\n fig = mpl.pyplot.figure(figsize=(6, 5))\n ax = fig.add_subplot(111, projection='3d')\n ax.view_init(azim=-120, elev=30)\n ax.grid(False)\n ax.xaxis.pane.fill = False\n ax.yaxis.pane.fill = False\n ax.zaxis.pane.fill = False\n ax.xaxis.pane.set_edgecolor('k')\n ax.yaxis.pane.set_edgecolor('k')\n ax.zaxis.pane.set_edgecolor('k')\n ax.xaxis.pane._alpha = 0.9\n ax.yaxis.pane._alpha = 0.9\n ax.zaxis.pane._alpha = 0.9\n ax.set_xlabel('$x$ axis', fontsize=12, color='k', labelpad=12)\n ax.set_ylabel('$y$ axis', fontsize=12, color='k', labelpad=12)\n ax.set_zlabel('$z$ axis', fontsize=12, color='k', labelpad=12)\n # ax_xy.xaxis.set_major_formatter(mpl.ticker.FormatStrFormatter('%.1f'))\n # ax_xy.yaxis.set_major_formatter(mpl.ticker.FormatStrFormatter('%.1f'))\n # ax_xy.zaxis.set_major_formatter(mpl.ticker.FormatStrFormatter('%.1f'))\n for t in ax.xaxis.get_major_ticks(): t.label.set_fontsize(8)\n for t in ax.yaxis.get_major_ticks(): t.label.set_fontsize(8)\n for t in ax.zaxis.get_major_ticks(): t.label.set_fontsize(8)\n ax.xaxis.set_rotate_label(False)\n ax.yaxis.set_rotate_label(False)\n ax.zaxis.set_rotate_label(False)\n if ticks_off:\n ax.set_xticks([])\n ax.set_yticks([])\n ax.set_zticks([])\n if axis_off:\n ax.axis('off')\n\n else: raise Exception('The number of dimensions must be 1, 2 or 3')\n\n\n # Add objects to the plot\n if curve: self.plot_curve(fig, ax)\n if control_points: self.plot_control_points(fig, ax)\n if frenet_serret: self.plot_frenet_serret(fig, ax)\n\n # Set the scaling of the axes\n self.rescale_plot(fig, ax)\n\n return fig, ax", "def setup_fader(self):\n ScreenFader(fade=\"in\")\n self.should_change_scene = False\n self.should_fade_out = False\n self.change_scene_timer = 0.0", "def plot_fenics_mesh(mesh, new_fig=True):\n if(new_fig):\n plt.figure()\n\n plot(mesh)\n #plt.title(\"FEniCS mesh\")\n plt.show(block=False)\n\n pass", "def plot_f(self, *args, **kwargs):\r\n kwargs['plot_raw'] = True\r\n self.plot(*args, **kwargs)", "def plot_true(self, ax):\n t = self.t\n x_true = self.x_true\n b = self.b\n\n ax.plot(t, x_true, 'k-', label='true image', lw=1.5)\n ax.plot(t, b, 'ro', label='blurred')\n ax.set_title(r'True')\n ax.set_xlabel(r'$t$')\n ax.set_ylabel(r'$x$')\n leg = ax.legend(loc='upper left')\n leg.get_frame().set_alpha(0.5)\n ax.grid()", "def friewallOn():\n pass", "def show_filters(self):\n w_mat = np.transpose(self.sess.run(self.W_fc1))\n\n plt.figure(figsize=(10,10), facecolor='w', edgecolor='w')\n plot_positions = [(0,0),(0,1),(1,0),(1,1)]\n for ch in range(self.n_input_channels):\n grid,_ = ia.image_grid_RGB( w_mat,\n n_channels=self.n_input_channels,\n image_size=(self.y_res,self.x_res), n_x=6, n_y=6,\n channel_order=(ch,ch,ch), amplitude_scaling=(1,1,1),\n line_color=1, auto_scale=True, return_borders=False )\n colormax = np.abs(grid).max()\n with sns.axes_style(\"white\"):\n ax = plt.subplot2grid( (2,2), plot_positions[ch] )\n ax.imshow( grid[:,:,0], interpolation='nearest',\n cmap=plt.get_cmap('seismic'),\n clim=(-1*colormax,colormax) )\n ax.set_title(\"Hidden units, channel {}\".format(ch))\n plt.axis('tight')\n plt.axis('off')\n plt.tight_layout()", "def event_m20_11_x115(z7=_):\n \"\"\"State 0,1: Flying animation playback\"\"\"\n ChangeObjState(z7, 70)\n assert CompareObjStateId(z7, 20, 0)\n \"\"\"State 2: End state\"\"\"\n return 0", "def force_show(sub_Idx):\n force_path = './dataset/subj_' + f'{sub_Idx:02d}'+ '/forces/force_' + f'{sub_Idx:02d}' + '.txt'\n image_path = './dataset/subj_' + f'{sub_Idx:02d}'+ '/images/'\n force_num = len(glob.glob(image_path + '*.jpg'))\n force_list = load_force_txt(force_path,force_num)\n print('showing '+f'{force_num:03d}'+ ' raw forces for subject ' + f'{sub_Idx:02d}')\n\n fig = plt.figure(figsize = (10, 7)) \n ax = plt.axes(projection =\"3d\") \n\n for x, y, z in force_list:\n ax.scatter3D(x, y, z, color = \"green\")\n ax.set_xlabel('X-axis', fontweight ='bold') \n ax.set_ylabel('Y-axis', fontweight ='bold') \n ax.set_zlabel('Z-axis', fontweight ='bold')\n plt.title(\"3D force data\") \n plt.show()", "def do_fit(self):\n\n if (self._flag == 1):\n self._gf = [0.2]\n self._gf = self.par*(self._num_fu*len(self._sites)*2)\n x, F = self.read_from_file(\n self._sn, self._qn, self._path) # read data from the file\n # ,ftol=1.0e-7,xtol=1.0e-8)\n popt, pcov = curve_fit(\n self.modelfun, x, F, p0=self._gf, maxfev=5000)\n self._gf = popt\n\n elif (self._flag == 2):\n\n# par=[0.0]*(self._num_fu*5)\n# for j in range(self._num_fu):\n# par[j*5]=0.0*math.copysign(1,(pow(-1,j)))\n# self._gf[j*5]=0.1\n# par[j*5+1]=6.45\n# par[j*5+2]=0.0\n# par[j*5+3]=0.05\n# par[j*5+4]=1.0\n\n X, F = self.read_from_file(self._sn, self._qn, self._path) # read data from the file\n\n# height, xx, width=self.moments(F)\n# Tracer()()\n# par=[0.0]*(self._num_fu*5)\n# for j in range(self._num_fu):\n# par[j*5]=x[0,xx]\n# par[j*5]=X[0,xx]*math.copysign(1,(pow(-1,j)))\n# par[j*5+1]=X[1,xx]\n# par[j*5+2]=X[2,xx]\n# par[j*5+3]=0.007\n# par[j*5+4]=height*math.copysign(1,(pow(-1,j)))\n\n xi, yi, zi = np.mgrid[-6.5:6.5:160j, 4.0:8.9:160j, -7.5:7.5:160j]\n x, y, z = xi.flatten(), yi.flatten(), zi.flatten()\n XX = np.vstack((x, y, z))\n\n invdisttree = Invdisttree(X.T, F, leafsize=10, stat=1)\n AA = invdisttree(XX.T, nnear=130, eps=0, p=1)\n\n# aaa1,bbb1=self.detect_local_minima(-AA.reshape(xi.shape))\n# aaa2,bbb2=self.detect_local_maxima(-AA.reshape(xi.shape))\n if self.peaks==[]:\n print('\\n---------------------------------------------------------------------')\n print('Detecting maxima and minima of target function...',)\n\n peaks_min, min_coord, peaks_max, max_coord = self.detect_min_max(AA.reshape(xi.shape))\n print('done')\n print('Number of the min peaks: {}'.format(len(peaks_min)))\n print('Number of the max peaks: {}'.format(len(peaks_max)))\n print('---------------------------------------------------------------------\\n')\n # fig=plt.figure()\n # ax = fig.add_subplot(111, projection='3d')\n # ax.plot_surface(xi[:,:,60],yi[:,:,60],bbb2[:,:,60], cmap=cm.jet, linewidth=0.2)\n # plt.hold(True)\n # plt.show()\n\n if peaks_max==[]:\n peaks=np.insert(peaks_min, np.arange(len(peaks_max)), peaks_max)\n coords=np.insert(min_coord, np.arange(max_coord.shape[1]), max_coord, axis=1)\n else:\n peaks = np.insert(peaks_max, np.arange(len(peaks_min)), peaks_min)\n coords = np.insert(max_coord, np.arange(min_coord.shape[1]), min_coord, axis=1)\n\n self.peaks=peaks\n self.coords=coords\n\n par = [0.0]*(self._num_fu*5)\n j1 = 0\n aaaa = 1\n for j in range(self._num_fu):\n if (j > aaaa*self.coords.shape[1]-1):\n j1 = 0\n aaaa += 1\n par[j*5] = xi[self.coords[0, j1], self.coords[0, j1], self.coords[0, j1]]\n par[j*5+1] = yi[self.coords[1, j1], self.coords[1, j1], self.coords[1, j1]]\n par[j*5+2] = zi[self.coords[2, j1], self.coords[2, j1], self.coords[2, j1]]\n # par[j*5+3] = 0.1003+0.1000*math.copysign(1, (pow(-1, j)))\n par[j*5+3] = 0.0001\n# if j < 15:\n# par[j*5+3] = 0.00001\n# else:\n# par[j*5+3] = 0.0005\n par[j*5+4] = self.peaks[j1]\n# print(coords[0, j1], coords[1, j1], coords[2, j1])\n j1 += 1\n # popt, pcov = curve_fit(self.modelfun1, x[:,1:20000], F[1:20000],p0=par,maxfev=150000,xtol=1e-8,ftol=1e-8)\n popt, pcov = curve_fit(\n self.modelfun1, X, F, p0=par, maxfev=150000, xtol=1e-6,\n ftol=1e-8)\n # popt, pcov = curve_fit(self.modelfun1, XX, AA, p0=par)\n self._gf = popt\n# self.error=np.diagonal(pcov, offset=0)\n# print(pcov)\n else:\n # pass\n sys.exit(\"Wrong flag in do_fit\")", "def add_omniflux_plot(self, nameflux, target=None, zlim=[1E4,1E9],\n add_cbar=True, do_orbticks=False, title=False,\n timelim=False, loc=111, no_xlabels=False):\n\n import matplotlib.pyplot as plt\n from matplotlib.colors import LogNorm\n from matplotlib.ticker import (FuncFormatter, LogLocator, \n LogFormatterMathtext)\n from matplotlib.dates import date2num\n\n fig, ax = set_target(target, loc=loc, figsize=(10,4))\n \n # Check for omni fluxes, calculate as necessary.\n if not nameflux in self:\n self.create_omniflux()\n if not nameflux in self:\n raise KeyError('%s is not a valid omnidirectional flux.' \n % nameflux)\n # Create a time vector that binds each pixel correctly.\n time=np.zeros(self.time.size+1)\n time[0]=date2num(self.time[0]-dt.timedelta(seconds=self.dt/2.0))\n time[1:]=date2num(self.time+dt.timedelta(seconds=self.dt/2.0))\n #egrid=self['energy_grid']\n ecenter, eboundary, ewidth=gen_egrid(nE=self['energy_grid'].size)\n# print(\"Need better energy grid setup for pcolormesh.\")\n flx=ax.pcolormesh(time,eboundary,self[nameflux].transpose(),\n norm=LogNorm(),vmin=zlim[0],vmax=zlim[1])\n ax.set_yscale('log')\n ax.set_ylim( [eboundary[0],eboundary[-1]] )\n if not timelim:\n timelim=[self.time[0],self.time[-1]]\n applySmartTimeTicks(ax,timelim,dolabel=True)\n if no_xlabels:\n ax.set_xlabel('')\n ax.set_xticklabels([''])\n do_orbticks=False\n ax.set_ylabel('E ($keV$)')\n if title: #If title not set, use a default:\n ax.set_title(title)\n else:\n labels={'omniH':'H$^{+}$','omniHe':'He$^{+}$',\n 'omniO':'O$^{+}$','omnie':'e$^{-}$'}\n ax.set_title('Omnidirectional %s Flux' % (labels[nameflux]))\n if do_orbticks:\n ax.xaxis.set_major_formatter(FuncFormatter(self._orbit_formatter))\n if add_cbar:\n cbar = plt.colorbar(flx, pad=0.01, shrink=.85, ticks=LogLocator(), \n format=LogFormatterMathtext(), ax=ax)\n cbar.set_label('$cm^{-2}s^{-1}keV^{-1}$')\n else:\n cbar=False\n\n return fig, ax, flx, cbar", "def fplot(xlab, ylab, ax=None, axargs=None, scales=None):\n if ax is None:\n ax = plt.gca()\n if scales is not None: # Could allow to accept only one arg\n ax.set_xscale(scales[0])\n ax.set_yscale(scales[1])\n ax.set_xlabel(xlab)\n ax.set_ylabel(ylab)\n if axargs is not None:\n ax.axis(axargs)\n # plt.ticklabel_format(useOffset=False, axis='x')\n # plt.ticklabel_format(useOffset=False, axis='y')", "def progMode(state):\n\t# Envoie la commande setTorque a tous les servos\n\taxDriver.setTorque(axDriver.BROADCASTID, state)", "def calc_fffb_inhibition(self) -> None:\n # Feedforward inhibition\n ffi = self.spec.ff * max(self.avg_net - self.spec.ff0, 0)\n # Feedback inhibition\n self.fbi = self.spec.fb_dt * (self.spec.fb * self.avg_act - self.fbi)\n # Global inhibition\n self.gc_i = self.spec.gi * (ffi * self.fbi)", "def initial_setup_baxter():\n \n #print 'Initializing node...'\n #rospy.init_node('baxter_or')\n baxter.enable()\n baxter.calibrateLeftGripper()", "def _set_draw_mode(draw_mode):\n###############################################################################\n global _draw_mode\n _draw_mode = draw_mode", "def plot_finalize():\n global figure\n global axes\n\n plot_refresh()\n plt.ioff()\n plt.show()\n\n figure, axes = None, None", "def ff_callback(self):\n self.rokucontrol.ff_callback()", "def setup():\n\trfm.setup()\n\t\"\"\"Read the interrupt status1 register\"\"\"\n\tItStatus1 = rfm.read_register(0x03)\n\tItStatus2 = rfm.read_register(0x04)\n\n\t\"\"\"Set RF Parameters\"\"\"\n\t# Set the center frequency to 915MHz\n\trfm.write_register(0x75, 0x75) # Write 0x75 to the Frequency Band Select register\n\trfm.write_register(0x76, 0xBB) # Write 0xBB to the Nominal Carrier Frequency1 register\n\trfm.write_register(0x77, 0x80) # Write 0x80 to the Nominal Carrier Frequency0 register\n\t# Set the desired TX data rate (9.6kbps)\n\trfm.write_register(0x6E, 0x4E) # Write 0x4E to the TXDataRate 1 register\n\trfm.write_register(0x6F, 0xA5) # Write 0xA5 to the TXDataRate 0 register\n\trfm.write_register(0x70, 0x2C) # Write 0x2C to the Modulation Mode Control 1 register\n\t# Set the desired TX deviation (+=45kHz)\n\trfm.write_register(0x72, 0x48) # Write 0x48 to the Frequency Deviation Register\n\n\t\"\"\"Set Packet Configuration\"\"\"\n\t# Set packet structure and modulation type\n\trfm.write_register(0x34, 0x09) # Write 0x09 to the Preamble length register\n\t# Disable header bytes; set variable packet length (the length of the packet is defined by the\n\t# received packet length field of the packet); set the synch word to two bytes long\n\trfm.write_register(0x33, 0x02) # Write 0x02 to the Header Control 2 register\n\t# Set the sync word pattern to 0x2DD4\n\trfm.write_register(0x36, 0x2D) # Write 0x2D to the Sync Word 3 register\n\trfm.write_register(0x37, 0xD4) # Write 0xD4 to the Sync Word 2 register\n\t# Enable the TX packet handler and CRC-16 (IBM) check\n\trfm.write_register(0x30, 0x0D) # Write 0x0D to the Data Access Control register\n\t# Enable FIFO mode and GFSK modulation\n\trfm.write_register(0x71, 0x63) # Write 0x63 to the Modulation Mode Control 2 Register \n\n\t\"\"\"Select modulation\"\"\"\n\t# Set VCO and PLL\n\trfm.write_register(0x54, 0x7F) # Write 0x7F to the VCO Current Trimming register\n\trfm.write_register(0x59, 0x40) # Write 0x40 to the Divider Current Trimming register", "def force_draw(self):\n import matplotlib.pyplot as plt\n\n plt.show()", "def sysPLQF(mirror, blkFlag=True):\n import matplotlib.pyplot as plt\n import numpy as np # to ndarray.flatten ax\n\n mir = mirror\n xend = max(mir.r_t)\n\n fig, ax = plt.subplots(nrows=2, ncols=2,)\n ax = np.ndarray.flatten(ax)\n ax[0].set_title('Real Power Generated')\n for mach in mir.Machines:\n ax[0].plot(mir.r_t, mach.r_Pe, \n marker = 10,\n fillstyle='none',\n #linestyle = ':',\n label = 'Pe Gen '+ mach.Busnam)\n ax[0].set_xlabel('Time [sec]')\n ax[0].set_ylabel('MW')\n\n ax[2].set_title('Reactive Power Generated')\n for mach in mir.Machines:\n ax[2].plot(mir.r_t, mach.r_Q, \n marker = 10,\n fillstyle='none',\n #linestyle = ':',\n label = 'Q Gen '+ mach.Busnam)\n ax[2].set_xlabel('Time [sec]')\n ax[2].set_ylabel('MVAR')\n\n ax[1].set_title('Total System P Loading')\n ax[1].plot(mir.r_t, mir.r_ss_Pload, \n marker = 11,\n #fillstyle='none',\n #linestyle = ':',\n label = 'Pload')\n ax[1].set_xlabel('Time [sec]')\n ax[1].set_ylabel('MW')\n\n ax[3].set_title('System Mean Frequency')\n ax[3].plot(mir.r_t, mir.r_f,\n marker = '.',\n #linestyle = ':',\n label = r'System Frequency')\n ax[3].set_xlabel('Time [sec]')\n ax[3].set_ylabel('Frequency [PU]')\n\n # Global Plot settings\n for x in np.ndarray.flatten(ax):\n x.set_xlim(0,xend)\n x.legend()\n x.grid(True)\n\n fig.tight_layout()\n\n plt.show(block = blkFlag)", "def main():\n\n varList = {'beta': 6., 'convSpeed': 1.2, 'Mark': 0., 'axi': 1, 'acModes': 4, 'Nr': 801, 'Tf': 600., 'xf': 0.51}\n\n # Solve steady flame.\n # BC1: I have the attachment BC at r = 1, always\n # BC2: I need to set dF/dr = 0 at r = 0 iff Mark != 0\n [qMean, r, FMean] = steady_flame_area_FD3(varList['Mark'], varList['beta'], varList['axi'], varList['Nr'])\n r = r * varList['beta']\n\n # Calculate mean flame derivatives\n dFMeanDr = derivsnew.FD1_CT2_D(FMean, r[1] - r[0])\n d2FMeanDr2 = derivsnew.FD2_CT2_D(FMean, r[1] - r[0])\n\n #Apply BC smooth tip:\n if(varList['Mark']!=0.0):\n dFMeanDr[-1] = 0.0\n\n # Use correct number of points. Remember that the extrems need to be set depending on the BC!\n # The attach BC (first point) is always assumed to be true and removed from the vector list\n if(varList['Mark']==0):\n Nr = varList['Nr'] / 2\n dFMeanDr = dFMeanDr[1:]\n d2FMeanDr2 = d2FMeanDr2[1:]\n r = r[1:]\n # The smooth BC holds only if Mark!=0 (second derivatives appear): remove also the last point\n else:\n Nr = varList['Nr'] / 2 - 1\n dFMeanDr = dFMeanDr[1:-1]\n d2FMeanDr2 = d2FMeanDr2[1:-1]\n r = r[1:-1]\n\n # Calculate geometric values\n den = 1 + varList['beta'] * varList['beta'] * dFMeanDr * dFMeanDr\n dR = r[1] - r[0]\n # Set Nx equal to Nr for now.\n # The implementation is more complicated if they differ, and need to interpolate between values.\n Nx = Nr\n\n # Nonuniform grid spacing along x!\n # Nx = length(dx) has to hold.\n dx = np.empty(len(FMean) - 1)\n for ii in range(1, len(FMean)):\n dx[ii - 1] = FMean[ii] - FMean[ii - 1]\n\n [A, B, C, tau] = loadAcoustics(varList['xf'], varList['Tf'], varList['acModes'], varList['beta'])\n\n Matrix = buildMatrix(Nr, dR, varList['beta'], den, r, FMean, dFMeanDr, d2FMeanDr2, varList['Mark'], varList['acModes'], A,\n B, C, Nx, dx, tau, qMean, varList['convSpeed'])\n\n [d, W, V] = eigProblem.solveEigProb(Matrix)\n [dnew, Wnew, Vnew] = eigProblem.selectUnstable(d, W, V)\n\n print dnew / (2. * np.pi)", "def displayFiducial(self):\r\n # obsolete?\r\n profbox()\r\n modelNodes = slicer.util.getNodes('vtkMRMLModelNode*')\r\n for modelNode in modelNodes.values():\r\n displayNode = modelNode.GetDisplayNode()\r\n if modelNode.GetAttribute(\"segmented\") == \"1\" and modelNode.GetAttribute(\"nth\") != None:\r\n if 1:\r\n i = int(modelNode.GetAttribute(\"nth\"))\r\n if self.fiducialnode[i] == 0:\r\n polyData = modelNode.GetPolyData()\r\n nb = int(polyData.GetNumberOfPoints() - 1)\r\n coord = [0, 0, 0]\r\n if nb > 10:\r\n self.fiducialnode[i] = slicer.vtkMRMLAnnotationFiducialNode()\r\n polyData.GetPoint(nb, coord)\r\n self.fiducialnode[i].SetName(self.option[i])\r\n self.fiducialnode[i].SetFiducialCoordinates(coord)\r\n self.fiducialnode[i].Initialize(slicer.mrmlScene)\r\n self.fiducialnode[i].SetLocked(1)\r\n self.fiducialnode[i].SetSelectable(0)\r\n fidDN = self.fiducialnode[i].GetDisplayNode()\r\n fidDN.SetColor(modelNode.GetDisplayNode().GetColor())\r\n fidDN.SetGlyphScale(0)\r\n fidTN = self.fiducialnode[i].GetAnnotationTextDisplayNode()\r\n fidTN.SetTextScale(3)\r\n fidTN.SetColor(modelNode.GetDisplayNode().GetColor())\r\n\r\n self.fiducialnode[i].SetDisplayVisibility(modelNode.GetDisplayNode().GetVisibility())\r\n else:\r\n if modelNode.GetDisplayNode().GetVisibility():\r\n self.fiducialnode[i].SetDisplayVisibility(abs(self.fiducialnode[i].GetDisplayVisibility() - 1))\r\n if self.fiducialnode[i].GetDisplayVisibility() == 1:\r\n self.displayFiducialButton.text = \"Hide Labels on Needles\"\r\n else:\r\n self.displayFiducialButton.text = \"Display Labels on Needles\"", "def fiducial_evolution():\n \n # fiducial model\n wangle = 180*u.deg\n pk = pickle.load(open('../data/gd1_fiducial.pkl', 'rb'))\n x = pk['x'].to(u.kpc)\n xorig = x[:2]\n \n plt.close()\n fig, ax = plt.subplots(1,1,figsize=(6,6))\n \n plt.sca(ax)\n \n Nsnap = 8\n times = np.linspace(0,0.5,Nsnap)[::-1]\n angles = np.linspace(0,322,Nsnap)[::-1]*u.deg\n\n for e, t in enumerate(times):\n c = mpl.cm.Blues(0.05+0.85*(Nsnap-e)/Nsnap)\n #a = 0.5 + 0.5*(Nsnap-e)/Nsnap\n \n pk = pickle.load(open('../data/gd1_fiducial_t{:.4f}.pkl'.format(t), 'rb'))\n x = pk['x'].to(u.kpc)\n x_, y_ = x[0], x[1]\n \n plt.plot(x_[120:-120], y_[120:-120], '.', color=c, ms=10, zorder=Nsnap-e, rasterized=False)\n \n xt = 24*np.cos(angles[e]+90*u.deg)\n yt = 24*np.sin(angles[e]+90*u.deg)\n if e<Nsnap-1:\n txt = plt.text(xt, yt, '+ {:.2f} Gyr'.format(t), va='center', ha='center', fontsize='small', color='0.2', rotation=(angles[e]).value, zorder=10)\n txt.set_bbox(dict(facecolor='w', alpha=0.7, ec='none'))\n \n plt.text(0, 24, 'Flyby', va='center', ha='center', fontsize='small', color='0.2')\n\n lim = 27\n plt.xlim(-lim,lim)\n plt.ylim(-lim,lim)\n plt.gca().set_aspect('equal')\n \n plt.xlabel('x [kpc]')\n plt.ylabel('y [kpc]')\n \n plt.tight_layout()\n plt.savefig('../plots/loop_evolution.pdf')", "def HasFOV(self):\n return _gmat_py.Hardware_HasFOV(self)", "def complete_paper_plot(plot_dir,\n index,\n image1,\n image2,\n flow_uv,\n ground_truth_flow_uv,\n flow_valid_occ,\n predicted_occlusion,\n ground_truth_occlusion,\n frame_skip=None):\n\n def save_fig(name, plot_dir):\n plt.xticks([])\n plt.yticks([])\n if frame_skip is not None:\n filename = str(index) + '_' + str(frame_skip) + '_' + name\n plt.savefig(os.path.join(plot_dir, filename), bbox_inches='tight')\n else:\n filepath = str(index) + '_' + name\n plt.savefig(os.path.join(plot_dir, filepath), bbox_inches='tight')\n plt.clf()\n\n#############here#######################\n # def robust_l1(x):\n # \"\"\"Robust L1 metric.\"\"\"\n # return (x ** 2 + 0.001 ** 2) ** 0.5\n #\n #\n # error = robust_l1(ground_truth_flow_uv - flow_uv)\n #\n # mask_non_zero = ground_truth_flow_uv != 0\n # mask_zero = ground_truth_flow_uv == 0\n #\n # loss_gt = (tf.reduce_sum(error[mask_non_zero]) / (tf.reduce_sum(tf.cast(mask_non_zero, tf.float32)) + 1e-16))\n # loss_zero = (tf.reduce_sum(error[mask_zero]) / (tf.reduce_sum(tf.cast(mask_zero, tf.float32)) + 1e-16))\n #\n # # flowpy.flow_write(plot_dir + '/flow_gt'+ str(index)+\".flo\",ground_truth_flow_uv)\n # flowpy.flow_write(plot_dir + '/flow_pred_bkwd' + str(index) + \".flo\", flow_uv)\n #\n # # print(flow_uv.shape)\n # fig, axis = plt.subplots(3,2)\n # fig.set_figheight(14)\n # fig.set_figwidth(14)\n # axis[0,0].imshow(image1)\n # axis[0,0].set_title(\"Image1\")\n # axis[0, 1].imshow(image2)\n # axis[0, 1].set_title(\"Image2\")\n # max_radius_f = flowpy.get_flow_max_radius(ground_truth_flow_uv)\n # axis[1, 0].imshow(flowpy.flow_to_rgb(ground_truth_flow_uv, flow_max_radius= max_radius_f))\n # axis[1, 0].set_title(\"Ground-truth Flow\")\n # flowpy.attach_calibration_pattern(axis[1,1], flow_max_radius=max_radius_f)\n # max_radius_p = flowpy.get_flow_max_radius(flow_uv)\n # axis[2, 0].imshow(flowpy.flow_to_rgb(flow_uv, flow_max_radius=max_radius_p))\n # axis[2, 0].set_title(\"Predicted Flow\")\n # axis[2,0].set_xlabel('l1 loss for gt pixels: {} \\n l1 loss for zero pixels: {}'.format(loss_gt,loss_zero))\n # flowpy.attach_calibration_pattern(axis[2,1], flow_max_radius=max_radius_p)\n # # print(np.mean(ground_truth_flow_uv), np.mean(flow_uv))\n #\n # axis[2,1].imshow((1-predicted_occlusion[:, :, 0]) * 255, cmap='Greys')\n # axis[2,1].set_title(\"Predicted Occlusion\")\n #\n # # plt.imshow(flowpy.flow_to_rgb(flow_uv))\n # # plt.savefig( plot_dir+'/pred_flow'+str(index))\n # # plt.imshow(flowpy.flow_to_rgb(ground_truth_flow_uv))\n # # plt.savefig( plot_dir+'/gt_flow'+ str(index))\n # # print(ground_truth_flow_uv.shape)\n # plt.imshow(image1)\n # plt.savefig(plot_dir + '/plots'+ str(index ))\n\n\n#############till_here##########################\n flow_uv = -flow_uv[::-1,:,:]\n ground_truth_flow_uv = -ground_truth_flow_uv[::-1,:, :]\n plt.figure()\n plt.clf()\n\n plt.imshow(np.moveaxis(((image1 + image2) / 2.),0,-1))\n\n save_fig('image_rgb', plot_dir)\n # np.save(\"flow_pred\"+plot_dir,flow_uv)\n plt.imshow(flow_to_rgb(flow_uv))\n save_fig('predicted_flow', plot_dir)\n # np.save(\"flow_gt\" + plot_dir, ground_truth_flow_uv * flow_valid_occ)\n plt.imshow(flow_to_rgb(ground_truth_flow_uv * flow_valid_occ))\n save_fig('ground_truth_flow', plot_dir)\n\n endpoint_error = np.sum(\n (ground_truth_flow_uv - flow_uv)**2, axis= 0 , keepdims=True)**0.5\n\n plt.imshow(\n (endpoint_error * flow_valid_occ)[0],\n cmap='viridis',\n vmin=0,\n vmax=40)\n save_fig('flow_error', plot_dir)\n\n plt.imshow((predicted_occlusion[0]) * 255, cmap='Greys')\n save_fig('predicted_occlusion', plot_dir)\n\n plt.imshow((ground_truth_occlusion[0]) * 255, cmap='Greys')\n save_fig('ground_truth_occlusion', plot_dir)\n\n plt.close('all')", "def clean_graph(self):\n #self.time = 0#\n \n # values of microcontroller\n #if self.graf_t.buffer_info()[1] != 0:\n for a in range(self.graf_t.buffer_info()[1]):\n self.graf_t.pop()\n \n for a in range(self.graf_r.buffer_info()[1]):\n self.graf_r.pop()\n\n for a in range(self.graf_x0.buffer_info()[1]):\n self.graf_x0.pop()\n\n for a in range(self.graf_x1.buffer_info()[1]):\n self.graf_x1.pop()\n\n for a in range(self.graf_u.buffer_info()[1]):\n self.graf_u.pop()\n \n self.referenceLine.set_data(self.graf_t, self.graf_r)\n self.x0Line.set_data(self.graf_t, self.graf_x0)\n self.x1Line.set_data(self.graf_t, self.graf_x1)\n self.uLine.set_data(self.graf_t, self.graf_u)\n \n try:\n #Draw the lines\n if self.checkBox_R.isChecked():\n self.mplWidget.canvas.ax.draw_artist(self.referenceLine)\n if self.checkBox_x0.isChecked():\n self.mplWidget.canvas.ax.draw_artist(self.x0Line)\n if self.checkBox_U.isChecked():\n self.mplWidget.canvas.ax.draw_artist(self.uLine)\n if self.checkBox_x1.isChecked():\n self.mplWidget.canvas.ax.draw_artist(self.x1Line)\n except AssertionError:\n pass\n try:\n self.mplWidget.canvas.blit(self.mplWidget.canvas.ax.bbox)\n except AttributeError:\n pass\n \n # force an image redraw\n self.mplWidget.canvas.draw()", "def plot(self):\n\t\tself.plotOfTF().plot()", "def plot_reconstruction_diagnostics(self, figsize=(20, 10)):\n figs = []\n fig_names = []\n\n # upsampled frequency\n fx_us = tools.get_fft_frqs(2 * self.nx, 0.5 * self.dx)\n fy_us = tools.get_fft_frqs(2 * self.ny, 0.5 * self.dx)\n\n # plot different stages of inversion\n extent = tools.get_extent(self.fy, self.fx)\n extent_upsampled = tools.get_extent(fy_us, fx_us)\n\n for ii in range(self.nangles):\n fig = plt.figure(figsize=figsize)\n grid = plt.GridSpec(3, 4)\n\n for jj in range(self.nphases):\n\n # ####################\n # separated components\n # ####################\n ax = plt.subplot(grid[jj, 0])\n\n to_plot = np.abs(self.separated_components_ft[ii, jj])\n to_plot[to_plot <= 0] = np.nan\n plt.imshow(to_plot, norm=LogNorm(), extent=extent)\n\n circ = matplotlib.patches.Circle((0, 0), radius=self.fmax, color='k', fill=0, ls='--')\n ax.add_artist(circ)\n\n if jj == 0:\n plt.title('O(f)otf(f)')\n plt.scatter(self.frqs[ii, 0], self.frqs[ii, 1], edgecolor='r', facecolor='none')\n plt.scatter(-self.frqs[ii, 0], -self.frqs[ii, 1], edgecolor='r', facecolor='none')\n elif jj == 1:\n plt.title('m*O(f-fo)otf(f)')\n plt.scatter(self.frqs[ii, 0], self.frqs[ii, 1], edgecolor='r', facecolor='none')\n elif jj == 2:\n plt.title('m*O(f+fo)otf(f)')\n plt.scatter(-self.frqs[ii, 0], -self.frqs[ii, 1], edgecolor='r', facecolor='none')\n\n plt.setp(ax.get_xticklabels(), visible=False)\n plt.setp(ax.get_yticklabels(), visible=False)\n\n plt.xlim([-2 * self.fmax, 2 * self.fmax])\n plt.ylim([2 * self.fmax, -2 * self.fmax])\n\n # ####################\n # deconvolved component\n # ####################\n ax = plt.subplot(grid[jj, 1])\n\n plt.imshow(np.abs(self.components_deconvolved_ft[ii, jj]), norm=LogNorm(), extent=extent)\n\n if jj == 0:\n plt.scatter(self.frqs[ii, 0], self.frqs[ii, 1], edgecolor='r', facecolor='none')\n plt.scatter(-self.frqs[ii, 0], -self.frqs[ii, 1], edgecolor='r', facecolor='none')\n elif jj == 1:\n plt.scatter(self.frqs[ii, 0], self.frqs[ii, 1], edgecolor='r', facecolor='none')\n elif jj == 2:\n plt.scatter(-self.frqs[ii, 0], -self.frqs[ii, 1], edgecolor='r', facecolor='none')\n\n circ = matplotlib.patches.Circle((0, 0), radius=self.fmax, color='k', fill=0, ls='--')\n ax.add_artist(circ)\n\n if jj == 0:\n plt.title('deconvolved component')\n plt.setp(ax.get_xticklabels(), visible=False)\n plt.setp(ax.get_yticklabels(), visible=False)\n\n plt.xlim([-2 * self.fmax, 2 * self.fmax])\n plt.ylim([2 * self.fmax, -2 * self.fmax])\n\n # ####################\n # shifted component\n # ####################\n ax = plt.subplot(grid[jj, 2])\n\n # avoid any zeros for LogNorm()\n cs_ft_toplot = np.abs(self.components_shifted_ft[ii, jj])\n cs_ft_toplot[cs_ft_toplot <= 0] = np.nan\n plt.imshow(cs_ft_toplot, norm=LogNorm(), extent=extent_upsampled)\n plt.scatter(0, 0, edgecolor='r', facecolor='none')\n\n circ = matplotlib.patches.Circle((0, 0), radius=self.fmax, color='k', fill=0, ls='--')\n ax.add_artist(circ)\n\n if jj == 1:\n circ2 = matplotlib.patches.Circle(-self.frqs[ii], radius=self.fmax, color='k', fill=0, ls='--')\n ax.add_artist(circ2)\n elif jj == 2:\n circ2 = matplotlib.patches.Circle(self.frqs[ii], radius=self.fmax, color='k', fill=0, ls='--')\n ax.add_artist(circ2)\n\n if jj == 0:\n plt.title('shifted component')\n plt.setp(ax.get_xticklabels(), visible=False)\n plt.setp(ax.get_yticklabels(), visible=False)\n\n plt.xlim([-2 * self.fmax, 2 * self.fmax])\n plt.ylim([2 * self.fmax, -2 * self.fmax])\n\n # ####################\n # normalized weights\n # ####################\n ax = plt.subplot(grid[jj, 3])\n\n to_plot = self.weights[ii, jj] / self.weight_norm\n to_plot[to_plot <= 0] = np.nan\n im2 = plt.imshow(to_plot, norm=LogNorm(), extent=extent_upsampled)\n im2.set_clim([1e-5, 1])\n fig.colorbar(im2)\n\n circ = matplotlib.patches.Circle((0, 0), radius=self.fmax, color='k', fill=0, ls='--')\n ax.add_artist(circ)\n\n if jj == 1:\n circ2 = matplotlib.patches.Circle(-self.frqs[ii], radius=self.fmax, color='k', fill=0, ls='--')\n ax.add_artist(circ2)\n elif jj == 2:\n circ2 = matplotlib.patches.Circle(self.frqs[ii], radius=self.fmax, color='k', fill=0, ls='--')\n ax.add_artist(circ2)\n\n if jj == 0:\n plt.title('normalized weight')\n plt.setp(ax.get_xticklabels(), visible=False)\n plt.setp(ax.get_yticklabels(), visible=False)\n\n plt.xlim([-2 * self.fmax, 2 * self.fmax])\n plt.ylim([2 * self.fmax, -2 * self.fmax])\n\n plt.suptitle('period=%0.3fnm at %0.3fdeg=%0.3frad, f=(%0.3f,%0.3f) 1/um\\n'\n 'mod=%0.3f, min mcnr=%0.3f, wiener param=%0.2f\\n'\n 'phases (deg) =%0.2f, %0.2f, %0.2f, phase diffs (deg) =%0.2f, %0.2f, %0.2f' %\n (self.periods[ii] * 1e3, self.angles[ii] * 180 / np.pi, self.angles[ii],\n self.frqs[ii, 0], self.frqs[ii, 1], self.mod_depths[ii, 1], np.min(self.mcnr[ii]), self.wiener_parameter,\n self.phases[ii, 0] * 180/np.pi, self.phases[ii, 1] * 180/np.pi, self.phases[ii, 2] * 180/np.pi,\n 0, np.mod(self.phases[ii, 1] - self.phases[ii, 0], 2*np.pi) * 180/np.pi,\n np.mod(self.phases[ii, 2] - self.phases[ii, 0], 2*np.pi) * 180/np.pi))\n\n figs.append(fig)\n fig_names.append('sim_combining_angle=%d' % (ii + 1))\n\n # #######################\n # net weight\n # #######################\n figh = plt.figure(figsize=figsize)\n grid = plt.GridSpec(1, 2)\n plt.suptitle('Net weight, Wiener param = %0.2f' % self.wiener_parameter)\n\n ax = plt.subplot(grid[0, 0])\n net_weight = np.sum(self.weights, axis=(0, 1)) / self.weight_norm\n im = ax.imshow(net_weight, extent=extent_upsampled, norm=PowerNorm(gamma=0.1))\n\n figh.colorbar(im, ticks=[1, 0.9, 0.8, 0.7, 0.6, 0.5, 0.4, 0.3, 0.2, 0.1, 1e-2, 1e-3, 1e-4, 1e-5])\n\n ax.set_title(\"non-linear scale\")\n circ = matplotlib.patches.Circle((0, 0), radius=self.fmax, color='k', fill=0, ls='--')\n ax.add_artist(circ)\n\n circ2 = matplotlib.patches.Circle((0, 0), radius=2*self.fmax, color='k', fill=0, ls='--')\n ax.add_artist(circ2)\n\n circ3 = matplotlib.patches.Circle(self.frqs[0], radius=self.fmax, color='k', fill=0, ls='--')\n ax.add_artist(circ3)\n\n circ4 = matplotlib.patches.Circle(-self.frqs[0], radius=self.fmax, color='k', fill=0, ls='--')\n ax.add_artist(circ4)\n\n circ5 = matplotlib.patches.Circle(self.frqs[1], radius=self.fmax, color='k', fill=0, ls='--')\n ax.add_artist(circ5)\n\n circ6 = matplotlib.patches.Circle(-self.frqs[1], radius=self.fmax, color='k', fill=0, ls='--')\n ax.add_artist(circ6)\n\n circ7 = matplotlib.patches.Circle(self.frqs[2], radius=self.fmax, color='k', fill=0, ls='--')\n ax.add_artist(circ7)\n\n circ8 = matplotlib.patches.Circle(-self.frqs[2], radius=self.fmax, color='k', fill=0, ls='--')\n ax.add_artist(circ8)\n\n ax.set_xlim([-2 * self.fmax, 2 * self.fmax])\n ax.set_ylim([2 * self.fmax, -2 * self.fmax])\n\n ax = plt.subplot(grid[0, 1])\n ax.set_title(\"linear scale\")\n im = ax.imshow(net_weight, extent=extent_upsampled)\n\n figh.colorbar(im)\n circ = matplotlib.patches.Circle((0, 0), radius=self.fmax, color='k', fill=0, ls='--')\n ax.add_artist(circ)\n\n circ2 = matplotlib.patches.Circle((0, 0), radius=2 * self.fmax, color='k', fill=0, ls='--')\n ax.add_artist(circ2)\n\n circ3 = matplotlib.patches.Circle(self.frqs[0], radius=self.fmax, color='k', fill=0, ls='--')\n ax.add_artist(circ3)\n\n circ4 = matplotlib.patches.Circle(-self.frqs[0], radius=self.fmax, color='k', fill=0, ls='--')\n ax.add_artist(circ4)\n\n circ5 = matplotlib.patches.Circle(self.frqs[1], radius=self.fmax, color='k', fill=0, ls='--')\n ax.add_artist(circ5)\n\n circ6 = matplotlib.patches.Circle(-self.frqs[1], radius=self.fmax, color='k', fill=0, ls='--')\n ax.add_artist(circ6)\n\n circ7 = matplotlib.patches.Circle(self.frqs[2], radius=self.fmax, color='k', fill=0, ls='--')\n ax.add_artist(circ7)\n\n circ8 = matplotlib.patches.Circle(-self.frqs[2], radius=self.fmax, color='k', fill=0, ls='--')\n ax.add_artist(circ8)\n\n ax.set_xlim([-2 * self.fmax, 2 * self.fmax])\n ax.set_ylim([2 * self.fmax, -2 * self.fmax])\n\n figs.append(figh)\n fig_names.append('net_weight')\n\n return figs, fig_names", "def create_init_fig(wrapped_signal, freq_arr, xcm_arr):\n \n fig, ax = pyplot.subplots(figsize=(10.0, 5.0))\n pyplot.tight_layout()\n fig.suptitle('Frequency = {:.2f}'.format(freq_arr[0]))\n\n ax1 = pyplot.subplot2grid((1, 3), (0, 0))\n ax2 = pyplot.subplot2grid((1, 3), (0, 1), colspan=2)\n\n circle1 = pyplot.Circle((0, 0), 1, fill=None, lw=2, ls='--', alpha=0.3)\n\n ax1.add_patch(circle1)\n ax1.grid()\n\n ticks= numpy.linspace(-1,1, 5, endpoint=True)\n\n ylabels = [-1, -0.5, None, 0.5, 1]\n\n ax1.set_xticks(ticks)\n ax1.set_yticks(ticks)\n ax1.set_yticklabels(ylabels)\n\n\n wrapped_signal_plot = ax1.plot(wrapped_signal.real, \n wrapped_signal.imag, alpha=0.5,\n label=r'$g(t)e^{2\\pi ift}$')[0]\n\n # Move left y-axis and bottim x-axis to centre, passing through (0,0)\n ax1.spines['left'].set_position('center')\n ax1.spines['bottom'].set_position('center')\n\n # Eliminate upper and right axes\n ax1.spines['right'].set_color('none')\n ax1.spines['top'].set_color('none')\n\n\n ax1.set_adjustable('box')\n ax1.set_aspect('equal')\n ax1.set_xlim(-1.1,1.1)\n ax1.set_ylim(-1.1,1.1)\n ax1.legend(loc='upper left', bbox_to_anchor=(0.48, 1.12))\n\n #f_list = numpy.full_like(freqs, None)\n almost_fourier_plot = ax2.plot(freq_arr[0], xcm_arr[0], '-')[0]\n ax2.spines['right'].set_color('none')\n ax2.spines['top'].set_color('none')\n ax2.set_adjustable('box')\n ax2.set_aspect('equal')\n ax2.set_xlabel('Frequency')\n ax2.set_ylabel('xcm')\n\n ax2.set_xlim(0.9,5.1)\n ax2.set_ylim(-0.3,1.1)\n ax2.grid()\n pyplot.tight_layout()\n pyplot.close()\n \n return {'fig': fig, 'WSP': wrapped_signal_plot, 'AF': almost_fourier_plot}", "def plot_omni_quicklook(self, flux_opts=None, eflux_opts=None,\n hflux_opts=None, oflux_opts=None):\n import matplotlib.pyplot as plt\n import matplotlib.gridspec as gridspec\n \n fig=plt.figure(figsize=(11,7))\n fig.subplots_adjust(left=0.07, right=0.99, bottom=0.19, \n top=0.94, wspace=0.4, hspace=0.25)\n gs=gridspec.GridSpec(3,3)\n\n # Do orbits first.\n a1=fig.add_subplot(gs[0,0])\n a2=fig.add_subplot(gs[1,0])\n a3=fig.add_subplot(gs[2,0])\n self.add_orbit_plot('XY', target=a1)\n self.add_orbit_plot('XZ', target=a2)\n self.add_orbit_plot('YZ', target=a3)\n\n # Add fluxes.\n a1=fig.add_subplot(gs[0,1:])\n a2=fig.add_subplot(gs[1,1:])\n a3=fig.add_subplot(gs[2,1:])\n if eflux_opts is None:\n eflux_opts = {}\n if hflux_opts is None:\n hflux_opts = {}\n if oflux_opts is None:\n oflux_opts = {}\n if flux_opts is None:\n flux_opts = {}\n for k in flux_opts:\n for d in (eflux_opts, hflux_opts, oflux_opts):\n if not k in d:\n d[k] = flux_opts[k]\n self.add_omniflux_plot('omnie', target=a1, no_xlabels=True,\n **eflux_opts)\n self.add_omniflux_plot('omniH', target=a2, no_xlabels=True,\n **hflux_opts)\n self.add_omniflux_plot('omniO', target=a3, do_orbticks=True,\n **oflux_opts)\n \n return fig", "def setupForFTK(self):\n t1 = self.getKeyword('ISS CONF T1NAME').strip()\n t2 = self.getKeyword('ISS CONF T2NAME').strip()\n #swapped = self.getKeyword('ISS PRI STS'+t1[2]+' GUIDE_MODE').strip()\n\n fsub_pos_fri = self.maxSnrInScan(fsu='FSUB', opdc='OPDC', plot=1)\n fsua_pos_fri = self.maxSnrInScan(fsu='FSUA', opdc='OPDC', plot=2)\n print '---{'+self.insmode+'}---'\n if swapped == 'NORMAL':\n print ' OPDC -> [ZPD offset] x [sign',self.DLtrack,\\\n '] =',-fsub_pos_fri\n print 'DOPDC -> [ZPD offset] x [sign',\\\n self.getKeyword('ISS DDL1 NAME').strip(),\\\n '] = ',(fsub_pos_fri-fsua_pos_fri)\n else:\n print ' OPDC -> [ZPD offset] x [sign',self.DLtrack,\\\n '] =', fsua_pos_fri\n print 'DOPDC -> [ZPD offset] x [sign',\\\n self.getKeyword('ISS DDL2 NAME').strip(),\\\n '] = ',(fsua_pos_fri-fsub_pos_fri)\n return", "def setupconfig():\n from Manager import Studio\n studio = Studio.Instance\n cfgeff = studio.configEffect_st\n cfgeff.bloomToggle.isOn = False\n cfgeff.vignetteToggle.isOn = False\n cfgeff.sunShaftsToggle.isOn = False\n cfgeff.fogToggle.isOn = False\n cfgeff.depthOfFieldToggle.isOn = False\n #cfgeff.ssaoToggle.isOn = True\n #cfgeff.selfShadowToggle.isOn = True\n \n # Turn off backgrounds\n studio.uiBGChanger.onOffToggle.isOn = False", "def switch_frequency_plot_channel_six(self):\n if self.plot_channel_key_booleans[5]:\n self.plot_channel_key_booleans[5] = False\n self.parent_widget.graph_channel_six_button.setStyleSheet(\n \"background-color:rgb(%d,%d,%d)\" % (255, 255, 255))\n else:\n self.plot_channel_key_booleans[5] = True\n self.parent_widget.graph_channel_six_button.setStyleSheet(\n \"background-color:rgb(%d,%d,%d)\" % (LINE_COLORS[5]))", "def __init__(self,options,pos):\n self.options = options\n numobjects = pos.shape[1]\n plt.ion() # turn on interactive plotting mode\n dpi=72.0 # set dpi (I think this is appropriate on mac)\n # fig accepts size in inches\n # so divide desired pixel width, height by dpi to get inches\n w,h=(self.options.width/dpi,self.options.height/dpi)\n fig = plt.figure(1,figsize=(w,h),dpi=dpi)\n fig.clear()\n\n #w = self.options.width/fig.get_dpi() # desired width in inches\n #h = self.options.height/fig.get_dpi() # desired height in inches\n #fig.set_size_inches(w,h,forward=True) # last arg resizes the canvas to match\n\n self.ax = plt.axes()\n self.ax.set_xlim(self.options.xmin,self.options.xmax)\n self.ax.set_ylim(self.options.ymin,self.options.ymax)\n #pyplot.axis('scaled')\n\n # I don't know why axis('scaled') doesn't work here\n # But I think the next two commands are equivalent\n self.ax.set_aspect('equal', adjustable='box', anchor='C')\n self.ax.set_autoscale_on(False)\n\n #self.redraw()\n\n\n #facecolors = [cm.jet(x) for x in np.random.rand(len(vicon_objects))]\n facecolors = [cm.jet(x) for x in np.linspace(0,1,numobjects)]\n if self.options.visualize_switch_xy:\n if self.options.axis==1:\n self.ax.axvline(linewidth=4, c='k')\n else:\n self.ax.axhline(linewidth=4, c='k')\n self.col = plt.scatter(pos[:,1],pos[:,0],c=facecolors,s=3000)\n else:\n if self.options.axis==1:\n self.ax.axhline(linewidth=4, c='k')\n else:\n self.ax.axvline(linewidth=4, c='k')\n self.col = plt.scatter(pos[:,0],pos[:,1],c=facecolors,s=3000)\n\n # scores\n self.tpos = self.ax.text(0.75*self.options.xmax,0.75*self.options.ymin,str(50),\n size=72,color='k',ha='center',va='center')\n self.tneg = self.ax.text(0.75*self.options.xmin,0.75*self.options.ymin,str(50),\n size=72,color='k',ha='center',va='center')\n\n self.canvas = agg.FigureCanvasAgg(fig)\n self.canvas.draw()\n self.renderer = self.canvas.get_renderer()\n raw_data = self.renderer.tostring_rgb()\n\n pygame.init()\n \n self.window = pygame.display.set_mode((options.width,options.height), DOUBLEBUF)\n self.screen = pygame.display.get_surface()\n\n self.set_caption(\"Possession: Waiting for Vicon\")\n \n size = self.canvas.get_width_height()\n \n surf = pygame.image.fromstring(raw_data, size, \"RGB\")\n self.screen.blit(surf, (0,0))\n pygame.display.flip()", "def get_toggle_axes_args(self,comArgs):\n params, flags = self.get_params(comArgs)\n args = [flags, False]\n return args", "def early_gradient_fusion():\n pass", "def __init__(\n self,\n label=\"PyFstat\",\n tstart=None,\n duration=None,\n dtglitch=None,\n delta_phi=0,\n delta_F0=0,\n delta_F1=0,\n delta_F2=0,\n tref=None,\n F0=None,\n F1=0,\n F2=0,\n Alpha=None,\n Delta=None,\n h0=None,\n cosi=None,\n psi=0.0,\n phi=0,\n Tsft=1800,\n outdir=\".\",\n sqrtSX=None,\n noiseSFTs=None,\n SFTWindowType=None,\n SFTWindowParam=None,\n SFTWindowBeta=None,\n Band=None,\n detectors=None,\n earth_ephem=None,\n sun_ephem=None,\n transientWindowType=\"rect\",\n randSeed=None,\n timestamps=None,\n ):\n\n self.set_ephemeris_files(earth_ephem, sun_ephem)\n self._basic_setup()\n self.calculate_fmin_Band()\n\n shapes = np.array(\n [\n np.shape(x)\n for x in [self.delta_phi, self.delta_F0, self.delta_F1, self.delta_F2]\n ]\n )\n if not np.all(shapes == shapes[0]):\n raise ValueError(\"all delta_* must be the same shape: {}\".format(shapes))\n\n for d in self.delta_phi, self.delta_F0, self.delta_F1, self.delta_F2:\n if np.size(d) == 1:\n d = np.atleast_1d(d)\n\n if self.dtglitch is None:\n self.tbounds = [self.tstart, self.tend]\n else:\n self.dtglitch = np.atleast_1d(self.dtglitch)\n self.tglitch = self.tstart + self.dtglitch\n self.tbounds = np.concatenate(([self.tstart], self.tglitch, [self.tend]))\n logger.info(\"Using segment boundaries {}\".format(self.tbounds))\n\n tbs = np.array(self.tbounds)\n self.durations = tbs[1:] - tbs[:-1]\n\n self.delta_thetas = np.atleast_2d(\n np.array([delta_phi, delta_F0, delta_F1, delta_F2]).T\n )", "def setup(args):\n # chaparral,denseForest,lake,canyon,burning,burnt = neighbours\n config_path = args[0]\n config = utils.load(config_path)\n # -- THE CA MUST BE RELOADED IN THE GUI IF ANY OF THE BELOW ARE CHANGED --\n config.title = \"Forest Fire\"\n config.dimensions = 2\n config.states = \\\n (\n CHAPARRAL,\n DENSE_FORREST,\n LAKE,\n CANYON,\n BURNING,\n BURNT,\n START_BURN,\n END_BURN\n )\n\n # ------------ -------------------------------------------------------------\n\n config.state_colors = \\\n [\n (0.6,0.6,0), #chaparral\n (0,0.4,0), #dense forrest\n (0,0.5,1), #lake\n (0.5,0.5,0.5), #canyon\n (1,0,0), #burning\n (0.25,0.25,0.25), #burnt\n (1,0.7,0), #starting to burn\n (0.8,0,0.2) #ending burn\n ]\n\n config.grid_dims = (grid_size, grid_size)\n config.num_generations = 1000\n config.set_initial_grid(initial_grid)\n config.wrap = False\n\n # --------------------------------------------------------------------\n\n # the GUI calls this to pass the user defined config\n # into the main system with an extra argument\n # do not change\n if len(args) == 2:\n config.save()\n sys.exit()\n return config", "def update(self):\n #update checkboxes\n self.average_check_box.SetValue(self.parent.fftsink.average)\n self.use_persistence_check_box.SetValue(self.parent.fftsink.use_persistence)\n self.peak_hold_check_box.SetValue(self.parent.fftsink.peak_hold)\n #update radio buttons\n try:\n index = list(DIV_LEVELS).index(self.parent.fftsink.y_per_div)\n self.radio_buttons[index].SetValue(True)\n except: pass", "def axInit():\n ax.init()", "def draw(self):\n # TODO: take graph axe value but careful with changing size when computing\n # need to change the equation because based on full scale and not arbitrary position\n left_side = self.zoom*self.FOV_img/self.FOV_img_Y\n right_side = self.axe_X - self.zoom*self.FOV_img/self.FOV_img_Y\n up_side = self.zoom\n down_side = self.axe_Y - self.zoom\n\n if right_side == self.axe_X:\n right_side -= 1\n if down_side == self.axe_Y:\n down_side -= 1\n\n self.ax.set_xlim((left_side, right_side))\n self.ax.set_ylim((down_side, up_side))\n# print((self.left_side, self.right_side), (self.down_side, self.up_side))\n self.fig.canvas.draw()", "def use_fscale(self,use_fscale):\n if type(use_fscale).__name__ == 'bool':\n self._use_fscale = use_fscale\n else:\n raise KINSOL_Exception(\"The variable sent to 'use_fscale' must be a boolean.\")", "def late_gradient_fusion():\n pass" ]
[ "0.61186206", "0.5865818", "0.5800843", "0.5676216", "0.55347174", "0.55038637", "0.5467715", "0.54164624", "0.5378973", "0.5370552", "0.5300264", "0.52715015", "0.52276176", "0.521771", "0.5209924", "0.5208267", "0.520253", "0.5198544", "0.51961386", "0.519605", "0.5194642", "0.5191218", "0.51910484", "0.51604474", "0.5136282", "0.5116591", "0.50955683", "0.5085472", "0.5079972", "0.50745535", "0.5072196", "0.5068737", "0.5044979", "0.50432795", "0.5036101", "0.50226", "0.5022186", "0.50213575", "0.5018721", "0.50183076", "0.50151277", "0.50048536", "0.49956027", "0.4993488", "0.4992815", "0.49879712", "0.4972734", "0.4957806", "0.49541584", "0.49522564", "0.49522564", "0.49480915", "0.49394074", "0.49382803", "0.4920621", "0.49201864", "0.49156526", "0.49128622", "0.49065468", "0.49022987", "0.49002013", "0.48990464", "0.48965713", "0.4895992", "0.48936585", "0.48849475", "0.4882539", "0.48823547", "0.48804194", "0.48758543", "0.48734322", "0.4871638", "0.4868825", "0.48686928", "0.48651886", "0.4864767", "0.48592114", "0.4857585", "0.48562086", "0.4855124", "0.48473978", "0.48443183", "0.48428056", "0.48346618", "0.48341838", "0.4831849", "0.48312646", "0.48258984", "0.48234046", "0.48187515", "0.48119202", "0.4811388", "0.48075208", "0.48070502", "0.4806954", "0.48061362", "0.48035517", "0.48030874", "0.48020887", "0.48014155", "0.4800979" ]
0.0
-1
axis status, axis init, slew
def test_goto_field_apogee_no_guider(self): cmdState = self.actorState.gotoField cmdState.reinitialize(self.cmd) cmdState.doGuider = False self._goto_feld_apogee(3, 11, 0, 0, cmdState)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __init__(self, axis=-1):\n self.axis = axis", "def __init__(self, axes=()):\n self._axes = []\n self._dimension = 0\n for axis in axes:\n self.add_axis(axis)", "def _set_axes(self):\n self += helper.line(stroke=\"black\", x1=self.__dict__['x'], x2=self.__dict__['x'], y1=0, y2=self.__dict__['y']*2)\n self += helper.line(stroke=\"black\", x1=0, x2=self.__dict__['x']*2, y1=self.__dict__['y'], y2=self.__dict__['y'])", "def _handle_setup_axis(self, axis_args):\n axis_name = axis_args['name']\n axes_dict = self.server.axes\n\n if axis_name not in [name for name, _ in axes_dict.items()]:\n print \"Adding a new axis:\", axis_name\n axis_count = len(axes_dict)\n newaxis = self.server.figure.add_subplot(axis_count+1, 1, axis_count+1)\n axes_dict[axis_name] = newaxis\n axes_dict[axis_name].grid(True)\n axes_dict[axis_name].set_xlabel(axis_args['x_label'])\n axes_dict[axis_name].set_ylabel(axis_args['y_label'])\n # TODO: support *.set_title(\"Title\")\n if FLAGS.logy:\n axes_dict[axis_name].set_yscale('log', nonposy='clip')\n\n if axis_count != 0:\n # Resize other axes if the above wasn't the first.\n axis_count = len(axes_dict)\n for row,(name, _) in enumerate(axes_dict.items(), 1):\n print name, axis_count, row\n axes_dict[name].change_geometry(axis_count, 1, row)", "def __init__(self):\n self.isMoving = 0#0 is stop, 1 is moving forward, -1 is moving backward\n self.isRoutating = False\n pygame.init()\n pygame.joystick.init()\n self.controller = pygame.joystick.Joystick(0)\n self.controller.init()\n if not self.axis_data:\n self.axis_data = {}\n\n if not self.button_data:\n self.button_data = {}\n for i in range(self.controller.get_numbuttons()):\n self.button_data[i] = False\n\n if not self.hat_data:\n self.hat_data = {}\n for i in range(self.controller.get_numhats()):\n self.hat_data[i] = (0, 0)", "def __init__(self, axes: int):\n self.axes = axes", "def _InitAxes( self ):\n self.ax = self.fig.add_subplot( 111 )", "def pylab_setup(figure, stream_data, original_width, runlimits, runflags):\n\n def on_key(event):\n \"\"\"on_key\"\"\"\n print('you pressed', event.key, event.xdata, event.ydata)\n\n #def diag_event(event):\n # \"\"\"diag_event\"\"\"\n # print event.name\n # if hasattr(event, 'height'):\n # print event.height, event.width\n # print event.name, event.canvas, event.guiEvent\n\n def pause_axis(unused_event):\n \"\"\"pause_axis\"\"\"\n # stops update of axis when updating lines\n # allows smooth scrolling by user\n print \"PAUSE pause axis\"\n runflags.update_axis = False\n\n def unpause_axis(event):\n \"\"\"unpause_axis\"\"\"\n # continues updating scrolling\n print \"RESUME axis\"\n runflags.update_axis = True\n if hasattr(event, 'height'):\n print event.height, event.width\n new_ratio = float(event.width)/float(event.height)\n default_ratio = 1.3\n print \"BEFORE: \", FLAGS.width\n FLAGS.width = original_width * new_ratio / default_ratio\n print \"AFTER: \", FLAGS.width\n\n figure.canvas.mpl_connect('key_press_event', on_key)\n figure.canvas.mpl_connect('resize_event', unpause_axis)\n figure.canvas.mpl_connect('scroll_event', pause_axis)\n\n timer = figure.canvas.new_timer(interval=500)\n timer.add_callback(plot_refresh_handler, (stream_data, runlimits, runflags))\n timer.start()\n print \"SHOW\"\n pylab.show()\n print \"AFTER\"", "def axInit():\n ax.init()", "def setup(self, flags):\n self.figure = pylab.figure(1)\n self.axes = {}\n self.stream_data = {}\n self.flags = flags", "def toggle_axis(self):\n # cycle through three axis states\n states = [False, 'world', 'all']\n # the state after toggling\n index = (states.index(self.view['axis']) + 1) % len(states)\n # update state to next index\n self.view['axis'] = states[index]\n # perform gl actions\n self.update_flags()", "def __init__( self, container, id = -1, **kwargs ):\n\n self.ax = None\n self.axline = None # axis line representing state\n self.canvas = None\n self.curSize = None\n self.cursor = None\n self.cursorLine = None # axis line following the cursor\n self.cursorLine2 = None # axis line following the cursor\n self.fig = None\n self.timer = None\n self.toolbar = None\n\n self.callbackIds = {}\n #self.isLoaded = False\n self.refAxis = kwargs.get( 'ref_axis', 'y' )\n self.refAxis2 = kwargs.get( 'ref_axis2' )\n self.showCursor = kwargs.get( 'show_cursor', True )\n #self.stateIndex = -1\n self.timeValue = -1.0\n self.titleFontSize = 16\n\n super( PlotWidget, self ).__init__( container, id )", "def initAxisValues(self, axis):\n \n if (axis != None):\n if self.isTime:\n self.axisValues = [repr(t.tocomponent())\n for t in axis.asRelativeTime()]\n else:\n self.axisValues = axis.getValue()\n else:\n raise TypeError(\"Error: axis is not defined\")\n\n self.axisIndices = range(len(self.axisValues))\n self.updateMin(0)\n self.updateMax(len(self.axisValues) - 1)", "def _update_axes(self):\n data_shape = self.data.shape\n if len(self.axes) < self.data.ndim + 1:\n self._axes.append(Axis())\n for index in range(self.data.ndim):\n if len(self.axes[index].values) != data_shape[index]:\n self.axes[index].values = np.arange(data_shape[index],\n dtype=np.float64)", "def _configure_axis(self, source, title):\n conf = source.conf[title]\n if source.data_type[title] == 'image':\n self.plot.getView().invertY(True)\n else:\n self.plot.getView().invertY(False)\n if(self.settingsWidget.ui.flipy.currentText() == 'Yes' or\n (self.settingsWidget.ui.flipy.currentText() == 'Auto' and\n \"flipy\" in conf and conf['flipy'] == True)):\n self.plot.getView().invertY(not self.plot.getView().getViewBox().yInverted())\n if(self.settingsWidget.ui.flipx.currentText() == 'Yes' or\n (self.settingsWidget.ui.flipx.currentText() == 'Auto' and\n \"flipx\" in conf and conf['flipx'] == True)):\n self.plot.getView().invertX(not self.plot.getView().getViewBox().xInverted())\n\n # Tranpose images to make x (last dimension) horizontal\n axis_labels = ['left', 'bottom']\n xlabel_index = 0\n ylabel_index = 1\n if (source.data_type[title] == 'image') or (source.data_type[title] == 'triple'):\n xlabel_index = (xlabel_index+1)%2\n ylabel_index = (ylabel_index+1)%2\n\n if(self.settingsWidget.ui.transpose.currentText() == 'Yes' or\n (self.settingsWidget.ui.transpose.currentText() == 'Auto' \n and \"transpose\" in conf)):\n xlabel_index = (xlabel_index+1)%2\n ylabel_index = (ylabel_index+1)%2\n\n self.x_axis_name = axis_labels[xlabel_index]\n self.y_axis_name = axis_labels[ylabel_index]\n if(self.actionX_axis.isChecked()):\n if(self.settingsWidget.ui.x_label_auto.isChecked() and \n \"xlabel\" in conf):\n self.plot.getView().setLabel(axis_labels[xlabel_index], conf['xlabel']) #pylint: disable=no-member\n else:\n self.plot.getView().setLabel(axis_labels[xlabel_index], self.settingsWidget.ui.x_label.text()) #pylint: disable=no-member\n\n if(self.actionY_axis.isChecked()):\n if(self.settingsWidget.ui.y_label_auto.isChecked() and \n \"ylabel\" in conf):\n self.plot.getView().setLabel(axis_labels[ylabel_index], conf['ylabel']) #pylint: disable=no-member\n else:\n self.plot.getView().setLabel(axis_labels[ylabel_index], self.settingsWidget.ui.y_label.text()) #pylint: disable=no-member", "def front_wheel_from_axis():", "def effect(self):\n AxisType = self.options.AxisType\n AxisDescription = self.options.AxisDescription\n AxisUnit = self.options.AxisUnit\n AxisLabel = self.options.AxisLabel\n AxisMaxValue = self.options.AxisMaxValue\n AxisMinValue = self.options.AxisMinValue\n AxisScale = self.options.AxisScale\n \n \n for id, node in self.selected.iteritems():\n axis = node #TODO: This selection should be further tested\n axis.set(inkex.addNS(\"Type\",\"TimeAnalysis\"), \"Axis\")\n axis.set(inkex.addNS(\"AxisType\",\"TimeAnalysis\"), AxisType)\n axis.set(inkex.addNS(\"AxisDescription\",\"TimeAnalysis\"), AxisDescription)\n #TODO: The label should be unique.\n axis.set(inkex.addNS(\"AxisLabel\",\"TimeAnalysis\"), AxisLabel) \n axis.set(inkex.addNS(\"AxisUnit\",\"TimeAnalysis\"), AxisUnit)\n axis.set(inkex.addNS(\"AxisMaxValue\",\"TimeAnalysis\"), AxisMaxValue)\n axis.set(inkex.addNS(\"AxisMinValue\",\"TimeAnalysis\"), AxisMinValue)\n axis.set(inkex.addNS(\"AxisScale\",\"TimeAnalysis\"), AxisScale)\n # sys.stderr.write(\"The max value of the axis is: \" + str(axis.get(inkex.addNS(\"AxisMaxValue\",\"TimeAnalysis\"))))", "def enableaxes(self):\n debug('ControllerStartup.enableaxes()')\n if not self.pidevice.HasEAX() or self.prop['skipeax']:\n return\n for axis in self.pidevice.axes:\n try:\n self.pidevice.EAX(axis, True)\n except GCSError as exc:\n if exc != gcserror.E2_PI_CNTR_UNKNOWN_COMMAND:\n raise\n waitonready(self.pidevice, **self._kwargs)", "def __init__(self):\n self.rot_axis = 1", "def _handle_axes(self, drawable, option):\n # If we already have an axes object, ignore this one\n if self._axes_object is not None:\n return\n\n # Grab the histogram used for axes style/range manipulation\n if is_stack(drawable) or is_graph(drawable):\n axes_histogram = drawable.GetHistogram()\n else:\n axes_histogram = drawable\n\n # Grab the histogram used for title manipulation\n if is_stack(drawable):\n title_histogram = drawable.GetHists()[0]\n else:\n title_histogram = drawable\n\n # Set the plot title\n title_histogram.SetTitle(self._title)\n\n # Grab axes\n x_axis, y_axis = axes_histogram.GetXaxis(), axes_histogram.GetYaxis()\n\n # Grab titles from first histogram if not set explicitly\n if self._x_title is None:\n self._x_title = title_histogram.GetXaxis().GetTitle()\n if self._y_title is None:\n self._y_title = title_histogram.GetYaxis().GetTitle()\n\n # Style x-axis, or hide it if this plot has a ratio plot\n if self._x_range is not None:\n x_axis.SetRangeUser(*self._x_range)\n if self._ratio_plot:\n x_axis.SetLabelOffset(999)\n x_axis.SetTitleOffset(999)\n else:\n x_axis.SetTitle(self._x_title)\n x_axis.SetTitleSize(self.PLOT_X_AXIS_TITLE_SIZE)\n x_axis.SetTitleOffset(self.PLOT_X_AXIS_TITLE_OFFSET)\n x_axis.SetLabelSize(self.PLOT_X_AXIS_LABEL_SIZE)\n if self._x_integer_ticks:\n x_axis.SetNdivisions(11) # hack for integer ticks \n\n # Style y-axis\n y_axis.SetTitle(self._y_title)\n y_axis.SetLabelFont(self.PLOT_ATLAS_STAMP_TEXT_FONT)\n y_axis.SetTitleSize(\n (self.PLOT_Y_AXIS_TITLE_SIZE_WITH_RATIO\n if self._ratio_plot\n else self.PLOT_Y_AXIS_TITLE_SIZE)\n )\n y_axis.SetTitleOffset(\n (self.PLOT_Y_AXIS_TITLE_OFSET_WITH_RATIO\n if self._ratio_plot\n else self.PLOT_Y_AXIS_TITLE_OFFSET)\n )\n y_axis.SetNdivisions(5,5,0)\n \n # set axis text sizes \n if self._ratio_plot:\n y_axis.SetLabelSize(self.PLOT_Y_AXIS_LABEL_SIZE_WITH_RATIO)\n else:\n y_axis.SetLabelSize(self.PLOT_Y_AXIS_LABEL_SIZE) \n y_axis.SetTitleSize(self.PLOT_Y_AXIS_TITLE_SIZE)\n y_axis.SetTitleOffset(self.PLOT_RATIO_Y_AXIS_TITLE_OFFSET)\n\n # Redraw the drawable with the new style\n drawable.Draw(option)", "def tick(self):", "def getAxisValuesEvent(self): \n varID = self.myParent.getVar().id\n axisVar = MV2.array(self.axis)\n axisVar.setAxis(0, self.axis)\n axisVar.id = varID +'_' + self.axis.id + '_axis'\n\n # Generate teaching command string\n fileID = 'fid2'\n teachingCommand = \"\\n## Getting axis %s\\n\" % self.axis.id\n teachingCommand += \"%s = MV2.array(%s[\\\"%s\\\"].getAxisList(axes = \\\"%s\\\")[0][:])\\n\" % (axisVar.id, fileID, varID, self.axis.id)\n teachingCommand += \"%s.setAxis(0, %s[\\\"%s\\\"].getAxisList(axes = \\\"%s\\\")[0])\\n\" % (axisVar.id, fileID, varID, self.axis.id)\n teachingCommand += \"%s.id = \\\"%s\\\"\\n\" % (axisVar.id, axisVar.id)\n\n # Record teaching commands associate 'get axis values' and\n # define a new variable/tab with only the axis' values \n self.myParent.defineVarAxis(axisVar, teachingCommand)", "def _finalize_axis(self, key, **kwargs):\n axis = self.handles['axis']\n self.handles['fig'].set_frameon(False)\n axis.grid(self.show_grid)\n axis.view_init(elev=self.elevation, azim=self.azimuth)\n try:\n axis._dist = self.distance\n except Exception:\n # axis.dist is deprecated see here:\n # https://github.com/matplotlib/matplotlib/pull/22084\n axis.dist = self.distance\n\n if self.xaxis is None:\n axis.w_xaxis.line.set_lw(0.)\n axis.w_xaxis.label.set_text('')\n if self.yaxis is None:\n axis.w_yaxis.line.set_lw(0.)\n axis.w_yaxis.label.set_text('')\n if self.zaxis is None:\n axis.w_zaxis.line.set_lw(0.)\n axis.w_zaxis.label.set_text('')\n if self.disable_axes:\n axis.set_axis_off()\n\n if mpl_version <= Version('1.5.9'):\n axis.set_axis_bgcolor(self.bgcolor)\n else:\n axis.set_facecolor(self.bgcolor)\n return super()._finalize_axis(key, **kwargs)", "def __init__(self, axis: int, validate: bool = True):\n super().__init__(validate)\n self._axis = axis", "def _checkForSixaxis():\n return sixaxis.init(\"/dev/input/js1\")", "def calc_axes(self):\n self.y_axis = np.linspace(0, self.image_shape[0] - 1, self.image_shape[0])\n self.x_axis = np.linspace(0, self.image_shape[1] - 1, self.image_shape[1])\n if hasattr(self, 'pixelsize'):\n self.y_axis *= self.pixelsize[0]\n self.x_axis *= self.pixelsize[1]\n\n # %%RETRIEVING FUNCTIONS", "def drawAxes(t):\r\n t.speed(0)\r\n t.pd()\r\n t.forward(500)\r\n t.back(500)", "def set_up(self):\n self.h, = self.ax.plot(self.x, lw=2)\n self.ax.set_ylim(0,100)\n self.ax.set_xlim(0,100)\n self.ax.title.set_text(self.config[\"title\"])\n self.ax.set_xlabel(self.config[\"x_label\"])\n self.ax.set_ylabel(self.config[\"y_label\"])", "def populate_plot_axis(self,plot,ax='x'):\n\n fig=plt.gcf()\n\n extra_ax=[]\n\n if ax=='x':\n\n ticks=plot.get_xticks()\n\n lim=plot.get_xlim()\n\n for i in range(len(self.names)):\n\n if i==0:\n\n axn=plot\n\n axn.spines['bottom'].set_position(('outward',10))\n\n axn.spines['bottom'].set_visible(True)\n\n else:\n\n dy_fig=0.08\n\n prev_ax_position=axn.get_position()\n\n extra_ax.append(fig.add_axes(\\\n (prev_ax_position.x0,\\\n prev_ax_position.y0-2*dy_fig,\\\n prev_ax_position.width,\\\n 0),'autoscalex_on',True))\n\n axn=extra_ax[i-1]\n\n axn.yaxis.set_visible(False)\n\n for side in axn.spines.keys():\n\n axn.spines[side].set_linewidth(1)\n\n axn.set_xticks(ticks)\n\n ticksnames=[float(str(x)) for x in self.values[i]]\n\n axn.set_xticklabels(\\\n [\"{:.2f}\".format(x).rstrip('0').rstrip('.') for x in ticksnames],\\\n rotation = 45)\n\n xlab=axn.set_xlabel(self.names[i])\n\n xlab.set_fontsize(10)\n\n axn.tick_params(axis='x',labelsize=10)\n\n axn.set_xlim(lim)\n\n\n\n elif ax=='y':\n\n ticks=plot.get_yticks()\n\n lim=plot.get_ylim()\n\n for i in range(len(self.names)):\n\n if i==0:\n\n axn=plot\n\n axn.spines['left'].set_position(('outward',10))\n\n axn.spines['left'].set_visible(True)\n\n else:\n\n dx_fig=0.08\n\n plot_position=plot.get_position()\n\n prev_ax_position=axn.get_position()\n\n extra_ax.append(fig.add_axes(\\\n (prev_ax_position.x0-2*dx_fig,\\\n prev_ax_position.y0,\\\n 0,\\\n prev_ax_position.height),'autoscalex_on',True))\n\n axn=extra_ax[i-1]\n\n axn.xaxis.set_visible(False) # hide the yaxis\n\n for side in axn.spines.keys(): # 'top', 'bottom', 'left', 'right'\n\n axn.spines[side].set_linewidth(1)\n\n axn.set_yticks(ticks)\n\n ticksnames=[float(str(x)) for x in self.values[i]]\n\n axn.set_yticklabels(\\\n [\"{:.2f}\".format(x).rstrip('0').rstrip('.') for x in ticksnames],\\\n rotation = 45)\n\n ylab=axn.set_ylabel(self.names[i])\n\n ylab.set_fontsize(10)\n\n axn.tick_params(axis='y',labelsize=10)\n\n axn.set_ylim(lim)\n\n else:\n\n raise ValueError(\"Axis can be 'x' or 'y'\")", "def __init__(self, skin_directory):\n self.ax = None\n self.generate_axis()\n self.skin_directory = skin_directory\n self.figure = plt.gcf()", "def set_axes(self, a):\r\n self.axes = a", "def side_wheel_from_axis():", "def setup_axes():\n\taxes = visuals.subplots(1, 2, figsize = (14, 7))\n\taxes[1].set_yscale(\"log\")\n\taxes[0].set_xlabel(\"[Fe/H]\")\n\taxes[0].set_ylabel(\"[Sr/Fe]\")\n\taxes[1].set_xlabel(\"[Sr/Fe]\")\n\taxes[1].set_ylabel(\"Stellar Probability Density\")\n\taxes[0].set_xlim([-2.2, 0.2])\n\taxes[0].set_ylim([-2.4, 0.4])\n\taxes[1].set_xlim([-1.4, 0.4])\n\taxes[1].set_ylim([0.05, 50])\n\treturn axes", "def __init__(self, vals, model, data_visualize, latent_axes=None, sense_axes=None, latent_index=[0,1]):\r\n if vals == None:\r\n vals = model.X[0]\r\n\r\n matplotlib_show.__init__(self, vals, axes=latent_axes)\r\n\r\n if isinstance(latent_axes,mpl.axes.Axes):\r\n self.cid = latent_axes.figure.canvas.mpl_connect('button_press_event', self.on_click)\r\n self.cid = latent_axes.figure.canvas.mpl_connect('motion_notify_event', self.on_move)\r\n self.cid = latent_axes.figure.canvas.mpl_connect('axes_leave_event', self.on_leave)\r\n self.cid = latent_axes.figure.canvas.mpl_connect('axes_enter_event', self.on_enter)\r\n else:\r\n self.cid = latent_axes[0].figure.canvas.mpl_connect('button_press_event', self.on_click)\r\n self.cid = latent_axes[0].figure.canvas.mpl_connect('motion_notify_event', self.on_move)\r\n self.cid = latent_axes[0].figure.canvas.mpl_connect('axes_leave_event', self.on_leave)\r\n self.cid = latent_axes[0].figure.canvas.mpl_connect('axes_enter_event', self.on_enter)\r\n\r\n self.data_visualize = data_visualize\r\n self.model = model\r\n self.latent_axes = latent_axes\r\n self.sense_axes = sense_axes\r\n self.called = False\r\n self.move_on = False\r\n self.latent_index = latent_index\r\n self.latent_dim = model.input_dim\r\n\r\n # The red cross which shows current latent point.\r\n self.latent_values = vals\r\n self.latent_handle = self.latent_axes.plot([0],[0],'rx',mew=2)[0]\r\n self.modify(vals)\r\n self.show_sensitivities()", "def setup_axes(self):\n fig = plt.figure(1)\n axs = fig.add_subplot(1, 1, 1)\n fig.clf()\n axs = plt.subplots(1, 2)\n ax1 : plt.axis = axs[0]\n ax2 : plt.axis = axs[1]\n fig.canvas.draw()\n \n line1_t, = ax1.plot([], label='train')\n line1_v, = ax1.plot([], label='val')\n\n ax1.set_title('Loss vs Iterations')\n ax1.set_xlabel('Iterations')\n ax1.set_ylabel('Loss')\n ax1.grid(True)\n ax1.autoscale()\n # ax1.legend()\n\n line2_t, = ax2.plot([], label='train')\n line2_v, = ax2.plot([], label='val')\n\n ax2.set_title('Accuracy vs Iterations')\n ax2.set_xlabel('Time')\n ax2.set_ylabel('Percent Accuracy')\n ax2.grid(True)\n ax2.autoscale()\n # ax2.legend()\n\n lines = [line1_t, line1_v, line2_t, line2_v]\n\n return fig, ax1, ax2, lines", "def __init__(self, time_axis: Union[np.ndarray, are_ax.Axis], state_vectors: np.ndarray):\n\n if isinstance(time_axis, np.ndarray):\n if time_axis.dtype == PreciseDateTime:\n time_axis_start = time_axis[0]\n relative_time_axis = (time_axis - time_axis_start).astype(float)\n self._time_axis = are_ax.Axis(relative_time_axis, time_axis_start)\n else:\n raise ValueError(\"Axis should be a vector of PreciseDateTime objects\")\n else:\n self._time_axis = time_axis\n\n _check_init_input(self._time_axis, state_vectors)\n\n # state_vector are stored as (3, N) numpy array\n self._state_vectors = np.vstack((state_vectors[::3], state_vectors[1::3], state_vectors[2::3]))\n self._interpolator = GeometryInterpolator(self._time_axis, self._state_vectors)", "def _plot_init(self):\n pass", "def _plot_init(self):\n pass", "def __init__(self):\n self.state_dim = 12\n self.measurement_dim = 6", "def _lazy_axis(self):\n raise NotImplementedError", "def draw_axes(self, cr):\n # en gris\n cr.set_line_width(0.02)\n cr.set_source_rgb(0.3, 0.3, 0.3)\n cr.move_to( -1,0 )\n cr.line_to( 1,0 )\n cr.move_to( 0, -1 )\n cr.line_to( 0, 1 )\n cr.stroke()\n #self.draw_value( cr, \"0\", 0, 0 )\n #self.draw_value( cr, \"1\", 5-0.3, 0 )\n #self.draw_value( cr, \"2\", 2+0.3, 4-0.5 )", "def on_axes_update(self):\n\n if self.connected:\n tab_open = self.tab_open()\n\n # Update axes\n for i, series in enumerate(self.measurements_list):\n if i == tab_open:\n self.chart_list[i].setXRange(self.worker.start_range,\n self.worker.samples_count + NUM_GUI_SAMPLES, padding=0.075)\n\n # for i, series in enumerate(self.measurements_list):\n #\n # # An optimization to prevent unnecessary rendering\n # if i == tab_open:\n #\n # # Remove old x-axis\n # series.detachAxis(self.xaxis_list[i])\n # self.chart_list[i].chart().removeAxis(self.xaxis_list[i])\n # self.xaxis_list[i] = QValueAxis()\n #\n # # Add new x-axis\n # self.chart_list[i].chart().addAxis(self.xaxis_list[i], Qt.AlignBottom)\n # self.xaxis_list[i].setRange(self.worker.samples_count, self.worker.samples_count +\n # NUM_GUI_SAMPLES)\n # series.attachAxis(self.xaxis_list[i])", "def _get_axis_state_ps90(self, control_unit: int, axis: int) -> Tuple[Union[int, bool, str]]:\n control_unit = ctypes.c_long(control_unit)\n axis = int(axis)\n axis = ctypes.c_long(axis)\n sleep(time_ps_delay)\n res = self.lib.PS90_GetAxisState(control_unit, axis)\n error = self.__get_read_error_ps90(control_unit)\n if error != 0:\n res = False\n return res, self._error_OWIS_ps90(error, 1)", "def __init__(self,options,pos):\n self.options = options\n numobjects = pos.shape[1]\n plt.ion() # turn on interactive plotting mode\n dpi=72.0 # set dpi (I think this is appropriate on mac)\n # fig accepts size in inches\n # so divide desired pixel width, height by dpi to get inches\n w,h=(self.options.width/dpi,self.options.height/dpi)\n fig = plt.figure(1,figsize=(w,h),dpi=dpi)\n fig.clear()\n\n #w = self.options.width/fig.get_dpi() # desired width in inches\n #h = self.options.height/fig.get_dpi() # desired height in inches\n #fig.set_size_inches(w,h,forward=True) # last arg resizes the canvas to match\n\n self.ax = plt.axes()\n self.ax.set_xlim(self.options.xmin,self.options.xmax)\n self.ax.set_ylim(self.options.ymin,self.options.ymax)\n #pyplot.axis('scaled')\n\n # I don't know why axis('scaled') doesn't work here\n # But I think the next two commands are equivalent\n self.ax.set_aspect('equal', adjustable='box', anchor='C')\n self.ax.set_autoscale_on(False)\n\n #self.redraw()\n\n\n #facecolors = [cm.jet(x) for x in np.random.rand(len(vicon_objects))]\n facecolors = [cm.jet(x) for x in np.linspace(0,1,numobjects)]\n if self.options.visualize_switch_xy:\n if self.options.axis==1:\n self.ax.axvline(linewidth=4, c='k')\n else:\n self.ax.axhline(linewidth=4, c='k')\n self.col = plt.scatter(pos[:,1],pos[:,0],c=facecolors,s=3000)\n else:\n if self.options.axis==1:\n self.ax.axhline(linewidth=4, c='k')\n else:\n self.ax.axvline(linewidth=4, c='k')\n self.col = plt.scatter(pos[:,0],pos[:,1],c=facecolors,s=3000)\n\n # scores\n self.tpos = self.ax.text(0.75*self.options.xmax,0.75*self.options.ymin,str(50),\n size=72,color='k',ha='center',va='center')\n self.tneg = self.ax.text(0.75*self.options.xmin,0.75*self.options.ymin,str(50),\n size=72,color='k',ha='center',va='center')\n\n self.canvas = agg.FigureCanvasAgg(fig)\n self.canvas.draw()\n self.renderer = self.canvas.get_renderer()\n raw_data = self.renderer.tostring_rgb()\n\n pygame.init()\n \n self.window = pygame.display.set_mode((options.width,options.height), DOUBLEBUF)\n self.screen = pygame.display.get_surface()\n\n self.set_caption(\"Possession: Waiting for Vicon\")\n \n size = self.canvas.get_width_height()\n \n surf = pygame.image.fromstring(raw_data, size, \"RGB\")\n self.screen.blit(surf, (0,0))\n pygame.display.flip()", "def _update_ax(self):\n raise NotImplementedError(\"Implement _update_ax(self) in subclass\")", "def reflect(self, axis):\n if axis == \"x\":\n self.y = - self.y\n elif axis == \"y\":\n self.x = - self.x\n else:\n print(\"The argument axis only accepts values 'x' and 'y'!\")", "def __init__(self, x_axis_p, y_axis_p, x_speed=0, y_speed=0, direction=0):\r\n self.__x_axis = x_axis_p\r\n self.__y_axis = y_axis_p\r\n self.__X_speed = x_speed\r\n self.__y_speed = y_speed\r\n self.__direction = direction\r\n self.__lives = 3", "def __init__(self):\n\n # The Microsoft XBox 360 Wired controller has 11 buttons and 8 axes.\n # Buttons can be 0 (not pressed) or 1 (pressed)\n # Axes are floats and range between -1 and 1. Note that for LT and RT, their \"not pressed\" value is 1 and for the others it is 0. Cross keys only have values -1, 0, and 1. The others have be any value in between -1 and 1.\n num_buttons = 11\n num_axes = 8\n self.inputs = [0 for i in range(num_buttons + num_axes)]\n self.inputs[JoyInput.LT] = self.inputs[JoyInput.RT] = 1\n\n # Dictionary of saved inputs. If an input is not currently saved, you must set it to None.\n # For example, the LS_Y (\"left stick Y\") axis may be saved in self.saved[JoyInput.LS_Y]\n self.saved = {\n JoyInput.LS_Y: None,\n Joystick.RS_ANGLE: None,\n }\n\n # Field variables\n self.depth_state = None # stores the depth state\n self.depth_last_received = 0 # how long since the last depth state callback\n self.depth_pwm_input = 0 # tracks pwm given to depth thrusters\n\n # ROS Subscribers\n rospy.Subscriber(\"/joy\", Joy, self.joy_callback)\n rospy.Subscriber(Topic.YAW_STATE, Float64, self.yaw_state_callback)\n rospy.Subscriber(Topic.DEPTH_STATE, Float64, self.depth_state_callback)\n rospy.Subscriber(Topic.YAW_SETPOINT, Float64, self.yaw_setpoint_callback)\n rospy.Subscriber(Topic.DEPTH_SETPOINT, Int16, self.depth_setpoint_callback)\n\n # ROS Publishers\n # self.topics is a dictionary of dictionaries.\n # 'publisher' contains the rospy.Publisher()\n # 'msg' contains the Int16(), Float64(), or Bool() related to the publisher\n # Use self.publish() rather than using self.topics directly.\n self.topics = {\n Topic.YAW_PWM: {'publisher':rospy.Publisher(Topic.YAW_PWM, Int16, queue_size=10), 'msg':Int16()},\n Topic.YAW_PWM_FEEDBACK: {'publisher':rospy.Publisher(Topic.YAW_PWM_FEEDBACK, Int16, queue_size=10), 'msg':Int16()},\n Topic.YAW_PID: {'publisher':rospy.Publisher(Topic.YAW_PID, Bool, queue_size=10), 'msg':Bool()},\n Topic.YAW_SETPOINT: {'publisher':rospy.Publisher(Topic.YAW_SETPOINT, Float64, queue_size=10), 'msg':Float64()},\n\n Topic.DEPTH_PWM: {'publisher':rospy.Publisher(Topic.DEPTH_PWM, Int16, queue_size=10), 'msg':Int16()},\n Topic.DEPTH_PID: {'publisher':rospy.Publisher(Topic.DEPTH_PID, Bool, queue_size=10), 'msg':Bool()},\n Topic.DEPTH_SETPOINT: {'publisher':rospy.Publisher(Topic.DEPTH_SETPOINT, Int16, queue_size=10), 'msg':Int16()},\n }", "def listen(self):\n\n if not self.axis_data:\n self.axis_data = {}\n\n if not self.button_data:\n self.button_data = {}\n for i in range(self.controller.get_numbuttons()):\n self.button_data[i] = False\n\n if not self.hat_data:\n self.hat_data = {}\n for i in range(self.controller.get_numhats()):\n self.hat_data[i] = (0, 0)\n\n for event in pygame.event.get():\n if event.type == pygame.JOYAXISMOTION:\n self.axis_data[event.axis] = round(event.value,2)\n elif event.type == pygame.JOYBUTTONDOWN:\n self.button_data[event.button] = True\n elif event.type == pygame.JOYBUTTONUP:\n self.button_data[event.button] = False\n elif event.type == pygame.JOYHATMOTION:\n self.hat_data[event.hat] = event.value\n\n axis=self.axis_data\n\n if 0 in axis:\n self.x=axis[0]\n self.y=-axis[1]\n\n # Turbo\n if self.button_data[7]:\n self.x*=2\n self.y*=2\n # Start Camera\n if self.button_data[3]:\n subprocess.Popen([\"firefox\",otraip+\"/html\"],\n stdout=subprocess.PIPE,\n stdin=subprocess.PIPE,\n stderr=subprocess.PIPE)\n return \"camera\"\n\n # Measure\n if self.button_data[1]:\n return \"measure\"\n\n # Exit\n if self.button_data[2]:\n return \"exit\"\n return \"move \"+str(self.x)+\" \"+str(self.y)+\"\\n\"", "def _appendAxisDefinition(self, axis):\n length = len(axis)\n\n self.na_dict[\"NX\"].append(length)\n self.na_dict[\"XNAME\"].append(xarray_utils.getBestName(axis))\n\n # If only one item in axis values\n if length < 2:\n self.na_dict[\"DX\"].append(0)\n self.na_dict[\"NXDEF\"].append(length)\n self.na_dict[\"X\"].append(axis.data.tolist()) \n return\n\n incr = xarray_utils.get_interval(axis, 0, 1)\n\n for i in range(1, length):\n if (axis[i] - axis[i - 1]) != incr:\n self.na_dict[\"DX\"].append(0)\n self.na_dict[\"NXDEF\"].append(length)\n self.na_dict[\"X\"].append(axis.data.tolist())\n break\n\n else: # If did not break out of the loop\n max_length = length\n if length > 3: \n max_length = 3\n\n self.na_dict[\"DX\"].append(incr)\n self.na_dict[\"NXDEF\"].append(max_length)\n self.na_dict[\"X\"].append(axis[:max_length])", "def getAxis(self,axis):\n\n\t\tif axis == \"u\":\n\t\t\tif len(self.usr) != 0:\n\t\t\t\treturn np.append([0], self.usr)\n\n\t\tif axis == \"s\":\n\t\t\tif len(self.seg) != 0:\n\t\t\t\tif self.radiograph:\n\t\t\t\t\treturn self.seg\n\t\t\t\telse:\n\t\t\t\t\tfirst = self.seg[0] - 1.\n\t\t\t\t\treturn np.append([first], self.seg)\n\n\t\tif axis == \"c\":\n\t\t\tif len(self.cos) != 0:\n\t\t\t\tif self.radiograph:\n\t\t\t\t\treturn self.cos\n\t\t\t\telse:\n\t\t\t\t\tfirst = -1.\n\t\t\t\t\treturn np.append([first], self.cos)\n\n\t\tif axis == \"e\":\n\t\t\tif len(self.erg) != 0:\n\t\t\t\tfirst = self.erg[0] - 1.\n\t\t\t\treturn np.append([first], self.erg)\n\n\t\tif axis == \"t\":\n\t\t\tif len(self.tim) != 0:\n\t\t\t\tfirst = self.tim[0] - 1.\n\t\t\t\treturn np.append([first], self.tim)\n\n\t\tif axis == \"i\":\n\t\t\treturn self.cora\n\n\t\tif axis == \"j\":\n\t\t\treturn self.corb\n\n\t\tif axis == \"k\":\n\t\t\treturn self.corc\n\n\t\treturn []", "def axes_coupled(robot):\n\n target_ctrl_path = get_target_ctrl_path(robot)\n attr_name = 'axisCoupling'\n\n attr_path = target_ctrl_path + '.' + attr_name\n\n if not pm.objExists(attr_path):\n return False\n else:\n return pm.getAttr(attr_path)", "def _use_data_bounds_changed_for_axes(self):\n self.update_pipeline()", "def rotate_axis(self):\n try:\n self.obj.rotate(angle=self.rotation_speed * self.time_scale / self.refresh_rate, axis=vector(0, 1, 0))\n except ZeroDivisionError:\n print(\"ERROR: REFRESH_RATE is 0\")\n except (AttributeError, TypeError):\n print(\"ERROR: wrong arguments type while initializing!!\")", "def axes(self,a_len,b_len,c_len,beta):\n self.a = np.array([0,0,a_len])\n self.b = np.array([0,b_len,0])\n self.c = Ry(-beta) @ np.array([0,0,c_len])", "def __init__(self):\n #Call the base class constructor\n inkex.Effect.__init__(self)\n \n #Define custom namespace\n inkex.NSS['TimeAnalysis'] = 'http://www.arcex.no/workpackage/basin-analysis/'\n \n #Define string options\n self.OptionParser.add_option('-t', '--AxisType', action = 'store',\n type = 'string', dest = 'AxisType', default = '',\n help = 'Which axis would you like to define?')\n self.OptionParser.add_option('-s', '--AxisScale', action = 'store',\n type = 'string', dest = 'AxisScale', default = '',\n help = 'Is the axis logaritmic or linear?')\n self.OptionParser.add_option('-d', '--AxisDescription', action = 'store',\n type = 'string', dest = 'AxisDescription', default = '',\n help = 'How would you the describe the axis?')\n self.OptionParser.add_option('-u', '--AxisUnit', action = 'store',\n type = 'string', dest = 'AxisUnit', default = '',\n help = 'What unit is represented by you axis?')\n self.OptionParser.add_option('-l', '--AxisLabel', action = 'store',\n type = 'string', dest = 'AxisLabel', default = '',\n help = 'What would you like to call your axis')\n self.OptionParser.add_option('-x', '--AxisMaxValue', action = 'store',\n type = 'string', dest = 'AxisMaxValue', default = '',\n help = 'What is the maximum value represented by you axis?')\n self.OptionParser.add_option('-i', '--AxisMinValue', action = 'store',\n type = 'string', dest = 'AxisMinValue', default = '',\n help = 'What is the minimum value represented by you axis?')\n self.OptionParser.add_option('-a', '--Notebook', action = 'store',\n type = 'string', dest = 'Notebook', default = '',\n help = 'What is the minimum value represented by you axis?')", "def axis(self):\r\n return self._arm.axis", "def setupVariableAxes(self):\n if self.var is None:\n return\n \n if (self.axisList is None):\n self.axisList = self.var.getAxisList()\n self.axisOrder = range(len(self.axisList))\n\n self.clear() \n self.setAxesNames()\n \n # Iterate through the variables axes & init each axis widget\n axisIndex = 0\n for axis, axisName in zip(self.axisList, self.axesNames):\n # Create the axis widget\n axisWidget = QAxis(axis, axisName, axisIndex, self)\n axisWidget.setAxisButtonText(axisName)\n self.axisWidgets.append(axisWidget)\n\n # Setup the layout for each axis\n row = self.gridLayout.rowCount()\n self.gridLayout.addWidget(axisWidget.getAxisButton(), row, 0)\n self.gridLayout.addWidget(axisWidget, row, 1) \n self.gridLayout.addWidget(axisWidget.getAxisOperationsButton(), row, 2)\n\n # Create separator line between each axis widget\n vline = QtGui.QFrame()\n vline.setFrameStyle(QtGui.QFrame.HLine | QtGui.QFrame.Sunken)\n self.gridLayout.addWidget(vline, row+1, 0, 1,\n self.gridLayout.columnCount())\n\n axisIndex += 1\n\n self.gridLayout.setRowStretch(self.gridLayout.rowCount(), 1)", "def get_axis_vals(self):\n return self._x_axis, self._y_axis", "def get_axis(self):\n self.current_axis = self.gui.comboBox_axis.currentText()\n self.logger.debug('current axis:' + str(self.current_axis))\n\n if 'Stepper' in self.current_axis:\n #self.gui.groupBox_configurate.setEnabled(True)\n self.gui.groupBox_configurate.setStyleSheet(\"QGroupBox#Colored_configure {border: 1px solid blue; border-radius: 9px;}\")\n\n self.gui.groupBox_actions.setStyleSheet(\"QGroupBox default\")\n\n self.gui.stackedWidget_actions.setCurrentWidget(self.gui.page_configure_stepper)\n self.gui.stackedWidget_stepper.setCurrentWidget(self.gui.stackedWidgetMoving)\n self.gui.stackedWidgetMoving.setEnabled(False)\n\n if 'Z' in self.current_axis:\n #Disable the xy groupboxes, enable the z groupboxes,\n # choose the page_amplZ of the stackedWidget_configure\n self.gui.groupBox_XY.setEnabled(False)\n self.gui.groupBox_Z.setEnabled(True)\n\n self.gui.stackedWidget_configure.setCurrentWidget(self.gui.page_amplZ)\n\n self.gui.pushButton_up.setEnabled(False)\n self.gui.pushButton_down.setEnabled(False)\n self.gui.pushButton_left.setText('closer')\n self.gui.pushButton_right.setText('away')\n else:\n #Enable the xy groupboxes, disable the z groupboxes,\n # choose the page_amplXY of the stackedWidget_configure.\n\n self.gui.groupBox_XY.setEnabled(True)\n self.gui.groupBox_Z.setEnabled(False)\n\n self.gui.stackedWidget_configure.setCurrentWidget(self.gui.page_amplXY)\n\n self.gui.pushButton_up.setEnabled(True)\n self.gui.pushButton_down.setEnabled(True)\n self.gui.pushButton_left.setText('left')\n self.gui.pushButton_right.setText('right')\n\n elif 'Scanner' in self.current_axis:\n #Choose the page_move_scanner of the stackedWidget_actions and the stackedWidgetEmpty of the stackedWidget_stepper\n self.gui.stackedWidget_actions.setCurrentWidget(self.gui.page_move_scanner)\n self.gui.stackedWidget_stepper.setCurrentWidget(self.gui.stackedWidgetempty)\n\n #Give the configurate box a border and the action box none\n self.gui.groupBox_configurate.setStyleSheet(\"QGroupBox#Colored_configure {border: 1px solid blue; border-radius: 9px;}\")\n self.gui.groupBox_actions.setStyleSheet(\"QGroupBox default\")\n\n #Choose either the page_scannerZ or page_scannerXY of the stackedWidget_voltScanner\n if 'Z' in self.current_axis:\n self.gui.stackedWidget_voltScanner.setCurrentWidget(self.gui.page_scannerZ)\n else:\n self.gui.stackedWidget_voltScanner.setCurrentWidget(self.gui.page_scannerXY)", "def test_config(self):\n\n p = SyncProto(packet_port, None)\n\n d = make_axes(500, .1, usteps=16, steps_per_rotation=200)\n p.config(4, 18, 32, False, False, axes=d['axes1']);\n p.info()\n\n d = make_axes(1000, .2, usteps=16, steps_per_rotation=200,\n output_mode=OutMode.OUTPUT_OPENDRAIN, highval=OutVal.LOW)\n p.config(4, 7, 9, False, False, axes=d['axes1']);\n p.info()", "def _UpdatePlotImpl( self ):\n if self.ax is not None:\n self.axline = None\n self.cursorLine = \\\n self.cursorLine2 = None\n\n# self.ax.clear()\n# if hasattr( self, 'ax2' ) and self.ax2 is not None:\n# self.ax2.clear()\n self.fig.clear()\n self._InitAxes()\n\n#\t\t-- Scale fonts\n#\t\t--\n wd, ht = self.GetClientSize()\n label_font_size = 14\n tick_font_size = 12\n self.titleFontSize = 16\n if 'wxMac' not in wx.PlatformInfo and wd < 800:\n\tdecr = (800 - wd) / 50.0\n\tlabel_font_size -= decr\n\ttick_font_size -= decr\n\tself.titleFontSize -= decr\n\n# self.ax.grid(\n# True, 'both', 'both',\n#\t color = '#c8c8c8', linestyle = ':', linewidth = 1\n#\t )\n self._DoUpdatePlot( wd, ht )\n self._DoUpdateRedraw()\n self.canvas.draw()\n #end if", "def noAxisSystem():\n dislin.nograf()", "def updatePlot(self,*args):\n # set x limits\n timeDisplayOptions = {'10 minutes':10,'1 hour':60,'6 hours':6*60,'24 hours':24*60,'All':0}\n try:\n lastDatetime = mpl.dates.num2date(self.stage60K.get_xdata()[-1])\n firstDatetime = mpl.dates.num2date(self.stage60K.get_xdata()[0])\n except IndexError: # no data yet\n now = datetime.datetime.utcnow().toordinal()\n firstDatetime = mpl.dates.num2date(now)\n lastDatetime = firstDatetime\n xMin = lastDatetime-datetime.timedelta(minutes=timeDisplayOptions[self.wScale.get()])\n xMin = max([ firstDatetime, xMin ])\n if self.wScale.get() == 'All':\n xMin = firstDatetime\n xMinIndex = numpy.searchsorted( self.stage60K.get_xdata(), mpl.dates.date2num(xMin) )\n # rescale axes, with the x being scaled by the slider\n if self.toolbar._active == 'HOME' or self.toolbar._active == None:\n ymin,ymax = 10000000, -10000000\n lineAndVar = { self.stage60K: self.t60K,\n self.stage03K: self.t3K,\n self.stageGGG: self.tGGG,\n self.stageFAA: self.tFAA }\n if len(self.stage60K.get_xdata()) > 1:\n for line in lineAndVar.keys():\n if lineAndVar[line].get() == 0:\n line.set_visible(False)\n else:\n line.set_visible(True)\n ydata = line.get_ydata()[xMinIndex:-1]\n try:\n ymin = min(ymin, numpy.nanmin(ydata))\n ymax = max(ymax, numpy.nanmax(ydata))\n except ValueError as e:\n pass\n self.ax.set_xlim(xMin,lastDatetime)\n self.ax.set_ylim(ymin - (ymax-ymin)/10, ymax + (ymax-ymin)/10)\n hfmt = mpl.dates.DateFormatter('%H:%M:%S', tz=tz.tzlocal())\n self.ax.xaxis.set_major_formatter(hfmt)\n self.fig.autofmt_xdate()\n self.fig.tight_layout()\n #draw\n self.canvas.draw()", "def on_click(event):\n ax = event.inaxes\n \n if ax is None:\n # Occurs when a region not in an axis is clicked...\n return\n \n if self.current_plot == 'single':\n if event.button is 1:\n if not self.ax_zoomed:\n # Change over to a single baseline plot\n try:\n self.ax_zoomed = True\n self.current_ax = ax\n ax.set_position([0.1, 0.05, 0.85, 0.80])\n ax.set_xlabel(\"Frequency\")\n #ax.set_ylabel(\"Time\")\n \n for axis in self.sp_fig.axes:\n if axis is not ax:\n axis.set_visible(False)\n \n except ValueError:\n raise\n self.sp_fig.canvas.mpl_disconnect(self.fig_connect)\n \n elif event.button is 3:\n if self.ax_zoomed:\n self.ax_zoomed = False\n #self.sp_fig.canvas.mpl_disconnect(self.fig_connect)\n self.updatePlot()\n \n else:\n # No need to re-draw the canvas if it's not a left or right click\n return\n \n elif self.current_plot == 'multi':\n if ax is None:\n # Occurs when a region not in an axis is clicked...\n return\n if event.button is 1:\n if not self.ax_zoomed:\n # Change over to a single baseline plot\n try:\n ant1, ant2 = ax.get_title().split(\" \")\n except:\n ant1 = int(ax.get_title().strip('Tile').strip('Antenna').strip('Stand'))\n ant2 = ant1 \n try:\n self.spin_ref_ant.setValue(int(ant1))\n self.spin_ref_ant2.setValue(int(ant2))\n self.plot_select.setCurrentIndex(0)\n self.current_plot = 'single'\n \n self.updatePlot()\n except:\n raise\n self.sp_fig.canvas.mpl_disconnect(self.fig_connect)\n \n elif event.button is 3:\n if not self.ax_zoomed:\n ax.set_position([0.1, 0.1, 0.85, 0.85])\n # TODO: fix labelling of zoom plots\n ax.set_xlabel(\"Frequency\")\n #ax.set_ylabel(\"Time\")\n self.orig_position = ax.get_position()\n for axis in event.canvas.figure.axes:\n # Hide all the other axes...\n if axis is not ax:\n axis.set_visible(False)\n self.ax_zoomed=True\n else:\n self.updatePlot()\n \n else:\n # No need to re-draw the canvas if it's not a left or right click\n return\n \n event.canvas.draw()", "def update_flags(self):\n # view mode, filled vs wirefrom\n if self.view['wireframe']:\n gl.glPolygonMode(gl.GL_FRONT_AND_BACK, gl.GL_LINE)\n else:\n gl.glPolygonMode(gl.GL_FRONT_AND_BACK, gl.GL_FILL)\n\n # set fullscreen or windowed\n self.set_fullscreen(fullscreen=self.view['fullscreen'])\n\n # backface culling on or off\n if self.view['cull']:\n gl.glEnable(gl.GL_CULL_FACE)\n else:\n gl.glDisable(gl.GL_CULL_FACE)\n\n # case where we WANT an axis and NO vertexlist\n # is stored internally\n if self.view['axis'] and self._axis is None:\n from .. import creation\n # create an axis marker sized relative to the scene\n axis = creation.axis(origin_size=self.scene.scale / 100)\n # create ordered args for a vertex list\n args = rendering.mesh_to_vertexlist(axis)\n # store the axis as a reference\n self._axis = self.batch.add_indexed(*args)\n\n # case where we DON'T want an axis but a vertexlist\n # IS stored internally\n elif not self.view['axis'] and self._axis is not None:\n # remove the axis from the rendering batch\n self._axis.delete()\n # set the reference to None\n self._axis = None", "def update(self):\n\t\tprint(\"Plotting \" + str(str(self.values[\"Trial1\"][1]) + \" at \" + str(self.values[\"Trial1\"][0]) + \"\\n\"))\n\t\tif self.clear:\n\t\t\tself.stream1.write(dict(x=[], y=[]))\n\t\t\tself.stream2.write(dict(x=[], y=[]))\n\t\t\tself.stream3.write(dict(x=[], y=[]))\n\t\telse:\n\t\t\tself.stream1.write(dict(x=self.values[\"Trial1\"][0], y=self.values[\"Trial1\"][1]))#, trace=Bar)\n\t\t\tself.stream2.write(dict(x=self.values[\"Trial2\"][0], y=self.values[\"Trial2\"][1]))\n\t\t\tself.stream3.write(dict(x=self.values[\"Trial3\"][0], y=self.values[\"Trial3\"][1]))", "def xaxis ( self ) :\n return self.__xaxis", "def xaxis ( self ) :\n return self.__xaxis", "def update(self):\n\t\tprint(\"Plotting \" + str(str(self.values[\"Trial\"][1]) + \" at \" + str(self.values[\"Trial\"][0]) + \"\\n\"))\n\t\tif self.clear:\n\t\t\tself.stream.write(dict(x=[], y=[]))\n\t\telse:\n\t\t\tself.stream.write(dict(x=self.values[\"Trial\"][0], y=self.values[\"Trial\"][1]))", "def xaxis ( self ) :\n return self.__xaxis", "def status(self,axis):\n \n if not self.enabled:\n return (False,False)\n \n enabled = True\n self.send_cmd(axis, ' PRINT MVG')\n\n flag = self.ser.read(100)\n moving = True\n \n if flag[:4] == b'FALS': \n moving = False\n elif flag[:4] == b'TRUE':\n moving = True\n\n non_moving = not moving\n return (enabled, non_moving)", "def ready(self):\n plt.ion()\n self.figure = plt.figure()\n axes = self.figure.add_subplot(111)\n self.line, = axes.plot(self.xs, self._get_y_data(), self.colour)\n\n if self.y_range is not None:\n plt.ylim(*self.y_range)\n plt.xlim(self.x.lower, self.x.upper)\n\n plt.xlabel(self.x.tex_name if self.use_tex else self.x.name)\n plt.ylabel(self.y.tex_name if self.use_tex else self.y.name)\n\n self.figure.canvas.draw()", "def updatePlot(self):\n if len(self.baslin):\n X = list(t[0] for t in self.baslin)\n Y = list(t[1] for t in self.baslin)\n self.BLplt.set_xdata(X)\n self.BLplt.set_ydata(Y)\n if self.BLtyp == 'S':\n if self.BL is None:\n self.BL, = self.axes.plot(self.data[0], self.data[2], linestyle='-', color='green')\n else:\n self.BL.set_ydata(self.data[2])\n self.canvas.draw()", "def listen_and_send(self):\n hadEvent = False\n\n if not self.axis_data:\n self.axis_data = {}\n\n if not self.button_data:\n self.button_data = {}\n for i in range(self.controller.get_numbuttons()):\n self.button_data[i] = False\n\n if not self.hat_data:\n self.hat_data = {}\n for i in range(self.controller.get_numhats()):\n self.hat_data[i] = (0, 0)\n\n while True:\n for event in pygame.event.get():\n if event.type == pygame.JOYAXISMOTION:\n self.axis_data[event.axis] = round(event.value, 2)\n elif event.type == pygame.JOYBUTTONDOWN:\n self.button_data[event.button] = True\n elif event.type == pygame.JOYBUTTONUP:\n self.button_data[event.button] = False\n elif event.type == pygame.JOYHATMOTION:\n self.hat_data[event.hat] = event.value\n\n if event.type == pygame.JOYBUTTONDOWN:\n # A button on the joystick just got pushed down\n hadEvent = True\n elif event.type == pygame.JOYAXISMOTION:\n # A joystick has been moved\n hadEvent = True\n\n if hadEvent:\n\n # If platform is linux we need to change some values in axis_data\n os.system('clear')\n print(\"Axis before\")\n pprint.pprint(self.axis_data)\n if sys.platform == 'linux':\n #self.axis_data[2], self.axis_data[3], self.axis_data[4] = self.axis_data[4], self.axis_data[2], self.axis_data[3]\n temp2 = self.axis_data[2]\n temp3 = self.axis_data[3]\n temp4 = self.axis_data[4]\n self.axis_data[2] = temp4\n self.axis_data[3] = temp2\n self.axis_data[4] = temp3\n\n\n self.event_dict['axis'] = self.axis_data\n self.event_dict['button'] = self.button_data\n message = pickle.dumps(self.event_dict, protocol=4)\n message = bytes(f\"{len(message):<{HEADERSIZE}}\", 'utf-8') + message\n self.sock.sendall(message)\n\n #if self.button_data[4]:\n # self.verbose = not self.verbose\n\n if self.verbose:\n\n # print(\"Button \")\n # pprint.pprint(self.button_data)\n print(\"Axis \")\n pprint.pprint(self.axis_data)\n # print(\"Motion \")\n # pprint.pprint(self.hat_data)", "def initialize_axes(self):\r\n self.x_lim = np.array([self.vals[:, 0].min(), self.vals[:, 0].max()])\r\n self.y_lim = np.array([self.vals[:, 1].min(), self.vals[:, 1].max()])\r\n self.z_lim = np.array([self.vals[:, 2].min(), self.vals[:, 2].max()])", "def update_velocities(self, wx, wy):\r\n self.wx = wx\r\n self.wy = wy", "def set_axis(self, axis_list):\n if self.table_ready:\n final_axis_list = []\n for i, axis in enumerate(axis_list):\n if axis:\n final_axis_list.append(\"1 \" + str(i + 1))\n else:\n final_axis_list.append(\"0 \" + str(i + 1))\n\n command = self.build_command(\n self.device, (\"set_axis\", final_axis_list), single_commands=True\n )\n self.vcw.write(self.device, command)", "def plot_config(self):\n self.dynamic.current_plot.setTitle(\n \"Dynamic IV waiting time analysis\", **self.titleStyle\n )\n self.dynamic.current_plot.setLabel(\n \"left\", \"current\", units=\"A\", **self.labelStyle\n )\n self.dynamic.current_plot.setLabel(\n \"bottom\", \"time\", units=\"s\", **self.labelStyle\n )\n self.dynamic.current_plot.showAxis(\"top\", show=True)\n self.dynamic.current_plot.showAxis(\"right\", show=True)\n self.dynamic.current_plot.plotItem.showGrid(x=True, y=True)\n self.dynamic.current_plot.getPlotItem().invertY(True)\n\n change_axis_ticks(self.dynamic.current_plot, self.ticksStyle)", "def x_axis_changed(self):\n\t\tx_coord_cbox = self.ui.findChild(QWidget, \"x_coord_cbox\")\n\t\tnew_axis = x_coord_cbox.currentText()\n\t\tprint new_axis", "def setup_datavault(self, x_axis, y_axis):\n\n yield self.dv.cd(['', self.name], True)\n\n # datasets for each laser\n self.dataset = yield self.dv.new(self.name + ' ML', [( 't', 'num')], [('GHz', '', 'num')])", "def centerAxis():\n dislin.center()", "def show_axes(self):\n if hasattr(self, 'axes_widget'):\n self.axes_widget.EnabledOn()\n self.axes_widget.SetCurrentRenderer(self)\n else:\n self.add_axes()\n self.Modified()", "def __init__(self, *args, **kwargs):\n # Set tick length to zero so azimuthal labels are not too offset\n # Change default radial axis formatter but keep default theta one\n super().__init__(*args, **kwargs)\n formatter = axistools.Formatter('auto')\n self.yaxis.set_major_formatter(formatter)\n self.yaxis.isDefault_majfmt = True\n for axis in (self.xaxis, self.yaxis):\n axis.set_tick_params(which='both', size=0)", "def __init__(self):\n self.wnd = WindSensor()", "def __init__(self, x):\n self.W = np.zeros((x+1,10))\n self.alpha = 0.00000001\n self.epochs = 100\n self.reg_const = 0.01", "def __init__(self, ax=None):\n\n if ax is None:\n f = plt.figure()\n self.ax = f.add_subplot(111)\n else:\n self.ax = ax\n\n self.e2 = [] # list to store RMS error results\n self.labels = []\n self.colors = []", "def tick(self):\r\n pass", "def get_toggle_axes_args(self,comArgs):\n params, flags = self.get_params(comArgs)\n args = [flags, False]\n return args", "def init(self, info):\r\n# info.object.mpl_setup()\r\n return True", "def __init__(self, slit_width_xaxis, slit_height_zaxis):\n super(SwScreen, self).__init__()\n self.sw = self.create_instance()\n\n self.set_output_files(fwrite=0, f_angle=0) #write all, TODO: remove\n\n n_screen = 1\n i_screen = np.array([1, 0, 0, 0, 0, 0, 0, 0, 0, 0])\n i_abs = np.zeros(10)\n i_slit = np.zeros(10)\n i_stop = np.zeros(10)\n k_slit = np.zeros(10)\n thick = np.zeros(10)\n file_abs = np.array(['', '', '', '', '', '', '', '', '', ''])\n rx_slit = np.zeros(10)\n rz_slit = np.zeros(10)\n sl_dis = np.zeros(10)\n file_src_ext = np.array(['', '', '', '', '', '', '', '', '', ''])\n cx_slit = np.zeros(10)\n cz_slit = np.zeros(10)\n\n i_abs[0] = 0 # NO ABSORPTION\n i_slit[0] = 0 # APERTURING\n i_stop[0] = 0 # SLIT\n k_slit[0] = 0 # RECTANGULAR\n\n rx_slit[0] = slit_width_xaxis\n rz_slit[0] = slit_height_zaxis\n cx_slit[0] = 0.0\n cz_slit[0] = 0.0\n\n self.sw._oe.set_screens(n_screen,\n i_screen,\n i_abs,\n sl_dis,\n i_slit,\n i_stop,\n k_slit,\n thick,\n file_abs,\n rx_slit,\n rz_slit,\n cx_slit,\n cz_slit,\n file_src_ext)", "def drawAxis(image, cameraMatrix, distCoeffs, rvec, tvec, length):\n pass", "def __init__(self):\n self.Robot = Robot()\n self.Omega = matrix()\n # self.Omega.value[0][0] = 1.0\n # self.Omega.value[1][1] = 1.0\n self.Xi = matrix()\n # Xi.value[0][0] = 0.0\n # Xi.value[1][0] = 0.0\n self.measure = {}\n self.landMarkCount = 0\n self.init = False\n self.bearing = 0\n self.x = 0\n self.y = 0\n \n # TODO", "def set_data(self, x = None, y = None):\n self.x_axis = x\n self.y_axis = y", "def __init__(self, ds, check_dims=True, x_periodic=True, y_periodic=True,\n z_periodic=False):\n self._ds = ds\n self._check_dims = check_dims\n self._periodic = {'X': x_periodic, 'Y': y_periodic, 'Z': z_periodic}\n\n self._axes = OrderedDict()\n for ax in ['X', 'Y']:\n # figure out what the grid dimensions are\n coord_names = comodo.get_axis_coords(ds, ax)\n ncoords = len(coord_names)\n if ncoords == 0:\n # didn't find anything for this axis\n pass\n else:\n if ncoords != 2:\n raise ValueError('Must have two different %s coordinates. '\n 'Instead got %s' % (ax, repr(coord_names)))\n axis_data = OrderedDict()\n for name in coord_names:\n coord = ds[name]\n axis_shift = coord.attrs.get('c_grid_axis_shift')\n if (axis_shift is None) or (axis_shift == 0):\n # we found the center coordinate\n axis_data['c'] = name\n axis_data['c_coord'] = coord\n elif (axis_shift==0.5) or (axis_shift==-0.5):\n # we found the face coordinate\n axis_data['g'] = name\n axis_data['g_coord'] = coord\n # TODO: clearly document the sign convention\n axis_data['shift'] = 1 if axis_shift==0.5 else -1\n else:\n raise ValueError('Invalid c_grid_axis_shift (%g) for '\n 'coord %s' % (axis_shift, name))\n self._axes[ax] = axis_data\n\n # check grid size consistency\n # we can deal with two cases:\n # * the c dim and g dim are the same size\n # * the g dim is one element longer than the c dim\n # define a slice used to subset\n for ax, info in iteritems(self._axes):\n clen = len(info['c_coord'])\n glen = len(info['g_coord'])\n if clen==glen:\n # all good\n self._axes[ax]['pad'] = 0\n elif clen==(glen - 1):\n self._axes[ax]['pad'] = 1\n else:\n raise ValueError(\"Incompatible c and g dimension lengths on \"\n \"axis %s (%g, %g)\" % (ax, clen, glen))", "def plotWavelet(self):\n\n a = 0\n if(self.axQT is not None):\n self._bxPlot()\n a += 1\n if(self.bxQT is not None):\n self._cxPlot()\n a += 1\n\n if(a > 0):\n return\n\n self._setupPlot()\n self._axPlot()\n self._bxPlot()\n self._cxPlot()\n self._dxPlot()\n self._endingPlot()", "def initRunningVals(self):\n self.r_Vm = [0.0]*self.mirror.dataPoints\n self.r_Va = [0.0]*self.mirror.dataPoints", "def init_axes(self):\n plt.switch_backend(\"cairo\")\n fig = plt.figure(figsize=(15,10))\n ax = fig.add_axes([0.05, 0.15, 0.9, 0.80,])\n return (fig, ax)", "def __init__(self):\n\n self.__name = '{}_{}'.format(type(self).__name__, id(self))\n self.__xax = 0\n self.__yax = 0\n\n self.addListener('zax', self.__name, self.__zaxChanged, immediate=True)\n self.__zaxChanged()", "def reset(self):\n # Don't reset axis labels\n self.range = ((0, 2, 1),) * self.ndim\n self.current_step = (0,) * self.ndim\n self.order = tuple(range(self.ndim))", "def __init__(self, x_0, y_0, initX, initY,h=5):\n self.x_0=x_0\n self.y_0=y_0\n self.x_init=initX\n self.y_init=initY\n self.step=h" ]
[ "0.6245888", "0.621319", "0.6022292", "0.6020143", "0.5990214", "0.5969029", "0.59592175", "0.5930569", "0.59223", "0.59204257", "0.58473855", "0.58251107", "0.5787647", "0.5784954", "0.5752151", "0.5738153", "0.5714067", "0.5657952", "0.56545234", "0.56490576", "0.5624587", "0.56196135", "0.56072676", "0.5602948", "0.55869204", "0.5565315", "0.55491126", "0.55393595", "0.55384016", "0.55170435", "0.5505892", "0.54880345", "0.54871964", "0.54819083", "0.54637367", "0.54509497", "0.54442304", "0.54442304", "0.543805", "0.543516", "0.5426229", "0.5418779", "0.5408822", "0.5402125", "0.53993833", "0.5389953", "0.53882486", "0.53852344", "0.53742784", "0.53589785", "0.5354877", "0.53356165", "0.5331741", "0.53259367", "0.5323735", "0.5323129", "0.53224814", "0.5308836", "0.5289885", "0.5273691", "0.52718043", "0.527041", "0.5251895", "0.5248261", "0.52463955", "0.52411616", "0.52277493", "0.52169573", "0.52169573", "0.5216802", "0.52152216", "0.52065915", "0.5202329", "0.51973915", "0.51920736", "0.519197", "0.51873374", "0.51848036", "0.5176812", "0.51718795", "0.5169494", "0.51694566", "0.5156378", "0.514629", "0.5145656", "0.5140824", "0.51326776", "0.5129849", "0.5129171", "0.51261914", "0.5116847", "0.51144993", "0.5097425", "0.50965565", "0.5094546", "0.50944996", "0.50863266", "0.50857323", "0.50847256", "0.50791985", "0.50754654" ]
0.0
-1
FF on, guider flat, FF off, open FFS 3xguider axes off, guider on
def test_goto_field_apogee_no_slew(self): cmdState = self.actorState.gotoField cmdState.reinitialize(self.cmd) cmdState.doSlew = False self._goto_feld_apogee(9, 37, 0, 0, cmdState)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def f_fixed(self):\n self.fx_free = self.fy_free = self.fz_free = False\n return self", "def _show_feature_flags(graph: nx.DiGraph, mode='sorted'):\n #plt.figure(figsize=(15, 15))\n if mode == 'sorted':\n pos = nx.multipartite_layout(graph)\n nx.draw(graph, pos, with_labels=True, arrows=True, node_color=\"#BA9DFB\")\n else:\n pos = graphviz_layout(graph)\n nx.draw_networkx(graph, pos, arrows=True, node_color=\"#BA9DFB\")\n plt.show()\n plt.clf()", "def gff_init():\n pass", "def displayFiducial(self):\n #obsolete?\n profbox()\n modelNodes = slicer.util.getNodes('vtkMRMLModelNode*')\n for modelNode in modelNodes.values():\n displayNode = modelNode.GetDisplayNode()\n if modelNode.GetAttribute(\"segmented\") == \"1\" and modelNode.GetAttribute(\"nth\")!=None:\n if 1:\n i = int(modelNode.GetAttribute(\"nth\"))\n if self.fiducialnode[i] == 0: \n polyData = modelNode.GetPolyData()\n nb = int(polyData.GetNumberOfPoints()-1)\n coord = [0,0,0]\n if nb>10:\n self.fiducialnode[i] = slicer.vtkMRMLAnnotationFiducialNode()\n polyData.GetPoint(nb,coord) \n self.fiducialnode[i].SetName(self.option[i])\n self.fiducialnode[i].SetFiducialCoordinates(coord) \n self.fiducialnode[i].Initialize(slicer.mrmlScene)\n self.fiducialnode[i].SetLocked(1)\n self.fiducialnode[i].SetSelectable(0)\n fidDN = self.fiducialnode[i].GetDisplayNode()\n fidDN.SetColor(modelNode.GetDisplayNode().GetColor())\n fidDN.SetGlyphScale(0)\n fidTN = self.fiducialnode[i].GetAnnotationTextDisplayNode()\n fidTN.SetTextScale(3)\n fidTN.SetColor(modelNode.GetDisplayNode().GetColor())\n \n self.fiducialnode[i].SetDisplayVisibility(modelNode.GetDisplayNode().GetVisibility())\n else: \n if modelNode.GetDisplayNode().GetVisibility():\n self.fiducialnode[i].SetDisplayVisibility(abs(self.fiducialnode[i].GetDisplayVisibility()-1))\n if self.fiducialnode[i].GetDisplayVisibility()==1:\n self.displayFiducialButton.text = \"Hide Labels on Needles\"\n else:\n self.displayFiducialButton.text = \"Display Labels on Needles\"", "def set_flammable(self, f):\n self.flammable = f", "def no_gradient_fusion():\n pass", "def zguider():\n gzero.gxoff = camera.status.guider[0] + gzero.gxoff\n gzero.gyoff = camera.status.guider[1] + gzero.gyoff\n guider(0,0)\n f = open('/data/guidezero','w')\n cPickle.dump(gzero,f)\n f.close()", "def displayFiducial(self):\r\n # obsolete?\r\n profbox()\r\n modelNodes = slicer.util.getNodes('vtkMRMLModelNode*')\r\n for modelNode in modelNodes.values():\r\n displayNode = modelNode.GetDisplayNode()\r\n if modelNode.GetAttribute(\"segmented\") == \"1\" and modelNode.GetAttribute(\"nth\") != None:\r\n if 1:\r\n i = int(modelNode.GetAttribute(\"nth\"))\r\n if self.fiducialnode[i] == 0:\r\n polyData = modelNode.GetPolyData()\r\n nb = int(polyData.GetNumberOfPoints() - 1)\r\n coord = [0, 0, 0]\r\n if nb > 10:\r\n self.fiducialnode[i] = slicer.vtkMRMLAnnotationFiducialNode()\r\n polyData.GetPoint(nb, coord)\r\n self.fiducialnode[i].SetName(self.option[i])\r\n self.fiducialnode[i].SetFiducialCoordinates(coord)\r\n self.fiducialnode[i].Initialize(slicer.mrmlScene)\r\n self.fiducialnode[i].SetLocked(1)\r\n self.fiducialnode[i].SetSelectable(0)\r\n fidDN = self.fiducialnode[i].GetDisplayNode()\r\n fidDN.SetColor(modelNode.GetDisplayNode().GetColor())\r\n fidDN.SetGlyphScale(0)\r\n fidTN = self.fiducialnode[i].GetAnnotationTextDisplayNode()\r\n fidTN.SetTextScale(3)\r\n fidTN.SetColor(modelNode.GetDisplayNode().GetColor())\r\n\r\n self.fiducialnode[i].SetDisplayVisibility(modelNode.GetDisplayNode().GetVisibility())\r\n else:\r\n if modelNode.GetDisplayNode().GetVisibility():\r\n self.fiducialnode[i].SetDisplayVisibility(abs(self.fiducialnode[i].GetDisplayVisibility() - 1))\r\n if self.fiducialnode[i].GetDisplayVisibility() == 1:\r\n self.displayFiducialButton.text = \"Hide Labels on Needles\"\r\n else:\r\n self.displayFiducialButton.text = \"Display Labels on Needles\"", "def update_flags(self):\n # view mode, filled vs wirefrom\n if self.view['wireframe']:\n gl.glPolygonMode(gl.GL_FRONT_AND_BACK, gl.GL_LINE)\n else:\n gl.glPolygonMode(gl.GL_FRONT_AND_BACK, gl.GL_FILL)\n\n # set fullscreen or windowed\n self.set_fullscreen(fullscreen=self.view['fullscreen'])\n\n # backface culling on or off\n if self.view['cull']:\n gl.glEnable(gl.GL_CULL_FACE)\n else:\n gl.glDisable(gl.GL_CULL_FACE)\n\n # case where we WANT an axis and NO vertexlist\n # is stored internally\n if self.view['axis'] and self._axis is None:\n from .. import creation\n # create an axis marker sized relative to the scene\n axis = creation.axis(origin_size=self.scene.scale / 100)\n # create ordered args for a vertex list\n args = rendering.mesh_to_vertexlist(axis)\n # store the axis as a reference\n self._axis = self.batch.add_indexed(*args)\n\n # case where we DON'T want an axis but a vertexlist\n # IS stored internally\n elif not self.view['axis'] and self._axis is not None:\n # remove the axis from the rendering batch\n self._axis.delete()\n # set the reference to None\n self._axis = None", "def calc_fffb_inhibition(self) -> None:\n # Feedforward inhibition\n ffi = self.spec.ff * max(self.avg_net - self.spec.ff0, 0)\n # Feedback inhibition\n self.fbi = self.spec.fb_dt * (self.spec.fb * self.avg_act - self.fbi)\n # Global inhibition\n self.gc_i = self.spec.gi * (ffi * self.fbi)", "def _fp_setup2(self):\n # TODO: right now it's hard to implement this required stage", "def fc_visual_save(fc, lowweight, savenamefile_prefix):\r\n\r\n\r\n ### text setup for brain areas ###\r\n pos_text_lefttop1 = [-80, 50, 30]\r\n pos_text_middletop1 = [120, 50, 30]\r\n pos_text_lefttop2 = [-80, 70, 10]\r\n pos_text_leftDown1 = [-80, 550, 30]\r\n pos_text_leftDown2 = [-80, 570, 10]\r\n pos_text_leftDown3 = [-80, 580, 10]\r\n \r\n texts_org = dict()\r\n\r\n lowweight = np.round(lowweight, decimals = 2) \r\n\r\n # plot\r\n df_chninf = assign_coord2chnArea(area_coord_file, fc['chnAreas'])\r\n for ci, cond in enumerate(fc['ciCOH'].keys()):\r\n ciCOH = fc['ciCOH'][cond]\r\n ntrials, ntemp = fc['setup']['ntrials_' + cond], fc['setup']['ntemp_' + cond]\r\n\r\n\r\n texts = texts_org.copy()\r\n \r\n text_thred = 'thred = ' + str(np.round(lowweight, decimals = 2))\r\n text_ntrials = 'ntrials = ' + str(ntrials)\r\n\r\n texts[cond] = pos_text_middletop1\r\n texts[text_task] = pos_text_leftDown1\r\n texts[text_ntrials] = pos_text_leftDown2\r\n texts[text_thred] = pos_text_leftDown3\r\n \r\n\r\n saveFCGraph = os.path.join(savefolder, savenamefile_prefix + '_lw' + str(np.round(lowweight, decimals = 2)) + '_' + cond + '.png')\r\n\r\n igplot = ciCOH_visual_save(ciCOH = ciCOH, chnInf = df_chninf, lowweight = lowweight, \r\n savefile = saveFCGraph, texts = texts, threds_edge = None)\r\n\r\n del texts[cond], texts[text_ntrials]\r\n\r\n img = cv2.imread(saveFCGraph)\r\n if ci == 0:\r\n imgs = img\r\n else:\r\n imgs = np.concatenate((imgs, np.zeros((img.shape[0], 5, 3)),img), axis = 1)\r\n\r\n os.remove(saveFCGraph)\r\n\r\n # combine all conditions\r\n print(imgs.shape)\r\n saveFCGraph_comb = os.path.join(savefolder, 'comb_' + savenamefile_prefix + '_lw' + str(np.round(lowweight, decimals = 2)) + '.png')\r\n cv2.imwrite(saveFCGraph_comb, imgs)", "def setup_fermi(self):\n eventclass=5 # 2 (Source) or 5 (UltracleanVeto)\n eventtype=0 # 0 (all), 3 (bestpsf) or 5 (top3 quartiles)\n mask_type='top300'\n force_mask_at_bin_number=10\n\n self.f1 = fp.fermi_plugin(maps_dir,fermi_data_dir=fermi_data_dir,work_dir=work_dir,CTB_en_min=0,CTB_en_max=40,nside=self.nside,eventclass=eventclass,eventtype=eventtype,newstyle=1,data_July16=True)\n\n if mask_type != 'False':\n self.f1.make_ps_mask(mask_type = mask_type,force_energy = True,energy_bin = force_mask_at_bin_number)\n self.f1.add_diffuse_newstyle(comp = 'p7', eventclass = eventclass, eventtype = eventtype)\n self.f1.add_bubbles(comp='bubs') #bubbles\n self.f1.add_iso(comp='iso') #iso\n self.f1.add_ps_model(comp='ps_model')\n\n # Exposure correct J_map_arr\n self.J_map_arr *= self.f1.CTB_exposure_maps\n\n # Add J-factor map with mean 1 in each energy bin\n self.f1.add_template_by_hand('J_map',np.array([self.J_map_arr[i]/np.mean(self.J_map_arr[i]) for i in range(40)]))", "def sink_floats(qtile):\n for window in qtile.current_group.windows:\n if window.floating:\n window.toggle_floating()", "def SetStandbyFPMode(self):\n handler = self.get_command_object(\"SetStandbyFPMode\")\n handler()", "def isFim(self):\r\n return", "def update_focal_axes(self):\n #self.update_sigma()\n self.updateGL()", "def setup(self, flags):\n self.figure = pylab.figure(1)\n self.axes = {}\n self.stream_data = {}\n self.flags = flags", "def set_fence_mode(self, on):\r\n return self._arm.set_fense_mode(on)", "def main(argv=None):\n\n if argv is None:\n argv = sys.argv\n\n parser = E.OptionParser(\n version=\"%prog version: $Id: gff2plot.py 2781 2009-09-10 11:33:14Z andreas $\", usage=globals()[\"__doc__\"])\n\n parser.add_option(\"-f\", \"--file\", dest=\"filenames\", type=\"string\",\n help=\"files[s] to take data from,stdin = -.\")\n parser.add_option(\"\", \"--symbols\", dest=\"symbols\", type=\"string\",\n help=\"symbols to use for each histogram [steps|...].\")\n parser.add_option(\"--slide-show\", dest=\"slide_show\", type=\"choice\",\n choices=(\"first\", \"all\", \"sequence\"),\n help=\"do a slide show - otherwise, write image to file.\")\n parser.add_option(\"--config\", dest=\"filename_config\", type=\"string\",\n help=\"filename of track configuration file.\")\n parser.add_option(\"--dpi\", dest=\"dpi\", type=\"int\",\n help=\"dpi for hardcopy output.\")\n parser.add_option(\"--window-size\", dest=\"window_size\", type=\"int\",\n help=\"window-size.\")\n parser.add_option(\"--output-filename-pattern\", dest=\"output_pattern_image\", type=\"string\",\n help=\"output pattern for images. Should contain a '%(contig)s' pattern .\")\n parser.add_option(\"--global-colours\", dest=\"global_colours\", action=\"store_true\",\n help=\"cycle through colours for all tracks.\")\n\n parser.set_defaults(\n filenames=None,\n symbols=\"k-,b-,r-,c-,m-,y-,g-\",\n output_pattern_image=\"%(contig)s.png\",\n slide_show=None,\n window_size=None,\n filename_config=None,\n dpi=None,\n global_colours=False,\n )\n\n (options, args) = E.Start(parser)\n options.symbols = options.symbols.split(\",\")\n\n #--------------------------------------------------------\n # collect all the data\n # list of data per source and contig\n tracks = {}\n extra_features = {}\n\n if options.filenames:\n options.filenames = options.filenames.split(\",\")\n\n if len(args) > 0:\n options.filenames = args\n\n if options.filenames:\n\n for filename in options.filenames:\n\n if filename == \"-\":\n infile = sys.stdin\n else:\n infile = IOTools.openFile(filename)\n\n data = readData(infile)\n\n if filename != \"-\":\n infile.close()\n\n track[filename] = Track(title=filename, data=data)\n\n elif options.filename_config:\n # get track information from config file\n config = ConfigParser.ConfigParser()\n config.read(os.path.expanduser(options.filename_config))\n\n # first extract special sections\n for section in config.sections():\n if section == \"vlines\":\n infile = IOTools.openFile(config.get(section, \"filename\"), \"r\")\n data = readData(infile)\n infile.close()\n extra_features[section] = Track(title=section,\n data=data,\n config=config)\n config.remove_section(section)\n elif section in (\"figure\", \"legend\"):\n extra_features[section] = Track(title=section,\n data=None,\n config=config)\n config.remove_section(section)\n n = 0\n for section in config.sections():\n\n if config.has_option(section, \"filename\"):\n infile = IOTools.openFile(config.get(section, \"filename\"), \"r\")\n data = readData(infile)\n infile.close()\n\n tracks[section] = Track(title=section,\n data=data,\n priority=n,\n config=config)\n\n elif config.has_option(section, \"tracks\"):\n subtracks = config.get(section, \"tracks\")\n subtracks = map(lambda x: x.strip(), subtracks.split(\",\"))\n\n tracks[section] = Track(title=section,\n data=None,\n config=config,\n priority=n,\n subtracks=subtracks)\n n += 1\n\n # compile set of all contigs\n contigs = set()\n for track in tracks.values():\n if track.mData:\n contigs = contigs.union(track.mData.keys())\n\n # re-arrange tracks and subtracks\n tracks = layoutTracks(tracks)\n\n nplots = 0\n figures = []\n legend = None\n for contig in contigs:\n figure, l = plotContig(contig, tracks, options,\n plot_legend=legend is None,\n extra_features=extra_features)\n figures.append(figure)\n if l:\n legend = l\n\n if options.slide_show:\n if options.slide_show == \"first\":\n pylab.show()\n elif options.slide_show == \"all\":\n pylab.show()\n elif options.slide_show == \"sequence\":\n pylab.show()\n else:\n\n extra_args = {}\n if options.dpi:\n extra_args['dpi'] = options.dpi\n\n for contig, figure in zip(contigs, figures):\n params = {'contig': contig}\n filename = options.output_pattern_image % params\n E.info(\"# creating image: %s\" % filename)\n figure.savefig(os.path.expanduser(filename), **extra_args)\n if legend:\n params = {'contig': \"legend\"}\n filename = options.output_pattern_image % params\n E.info(\"creating image: %s\" % filename)\n legend.savefig(os.path.expanduser(filename), **extra_args)\n\n E.info(\"ninput=%i, ncontigs=%i, nplots=%i\" %\n (len(tracks), nplots, len(contigs)))\n\n E.Stop()", "def _fctl(self):\n if self._fctl_written:\n return\n data = struct.pack(\n \">4L2H2B\",\n self.width, self.height, 0, 0,\n self.delay_numerator, self.delay_denominator,\n 1, 0)\n self._chunk(b\"fcTL\", self._seqno() + data)\n self._fctl_written = True", "def go_infFD(self):\n\n response = self.send_lens_cmd(['05', '00', '00', '00'], fast_mode=True)\n self.wait_focus_move()", "def addFluxcal():\n # Overall\n i = s.getScriptInt(odi.INDX_INT_NOBS_FLUX) - 1\n if i < 0: i = 0\n s.setScriptInt(odi.INDX_INT_NOBS_FLUX, i)\n\n # Primary\n i = s.getScriptInt(odi.INDX_INT_NOBS_FLUXPRI) - 1\n if i < 0: i = 0\n s.setScriptInt(odi.INDX_INT_NOBS_FLUXPRI, i)", "def HasFOV(self):\n return _gmat_py.Hardware_HasFOV(self)", "def main_multimodal_fusion(im_vis, im_ir, kernel, levels, window_size):\n\n im_vis = convert_image_to_floats(im_vis)\n im_ir = convert_image_to_floats(im_ir)\n\n im_vis_hsv = rgb2hsv(im_vis)\n value_channel = im_vis_hsv[:, :, 2]\n\n plt.subplot(1, 2, 1)\n plt.imshow(value_channel, cmap='gray')\n plt.subplot(1, 2, 2)\n plt.imshow(im_ir, cmap='gray')\n plt.show()\n\n # kernels to compute visibility\n kernel1 = classical_gaussian_kernel(5, 2)\n kernel2 = classical_gaussian_kernel(5, 2)\n\n # Computation of local entropy, local contrast and visibility for value channel\n local_entropy_value = normalized_local_entropy(value_channel, window_size)\n local_contrast_value = local_contrast(value_channel, window_size)\n visibility_value = visibility(value_channel, kernel1, kernel2)\n # Combination of local entropy, local contrast and visibility for value channel\n weight_value = weight_combination(local_entropy_value, local_contrast_value, visibility_value, 1, 1, 1)\n\n # Computation of local entropy, local contrast and visibility for IR image\n local_entropy_ir = normalized_local_entropy(im_ir, window_size)\n local_contrast_ir = local_contrast(im_ir, window_size)\n visibility_ir = visibility(im_ir, kernel1, kernel2)\n # Combination of local entropy, local contrast and visibility for IR image\n weight_ir = weight_combination(local_entropy_ir, local_contrast_ir, visibility_ir, 1, 1, 1)\n\n plt.subplot(2, 3, 1)\n plt.imshow(local_entropy_value, cmap='gray')\n plt.subplot(2, 3, 2)\n plt.imshow(local_contrast_value, cmap='gray')\n plt.subplot(2, 3, 3)\n plt.imshow(visibility_value, cmap='gray')\n plt.subplot(2, 3, 4)\n plt.imshow(local_entropy_ir, cmap='gray')\n plt.subplot(2, 3, 5)\n plt.imshow(local_contrast_ir, cmap='gray')\n plt.subplot(2, 3, 6)\n plt.imshow(visibility_ir, cmap='gray')\n plt.show()\n\n # Normalising weights of value channel and IR image\n weightN_value, weightN_ir = weight_normalization(weight_value, weight_ir)\n\n plt.subplot(1, 2, 1)\n plt.imshow(weightN_value, cmap='gray')\n plt.subplot(1, 2, 2)\n plt.imshow(weightN_ir, cmap='gray')\n plt.show()\n\n # Creating Gaussian pyramids of the weights maps of respectively the value channel and IR image\n gauss_pyr_value_weights = gaussian_pyramid(weightN_value, kernel, levels)\n gauss_pyr_ir_weights = gaussian_pyramid(weightN_ir, kernel, levels)\n\n # Creating Laplacian pyramids of respectively the value channel and IR image\n lap_pyr_value = laplacian_pyramid(value_channel, kernel, levels)\n lap_pyr_ir = laplacian_pyramid(im_ir, kernel, levels)\n\n # Creating the fused Laplacian of the two modalities\n lap_pyr_fusion = fused_laplacian_pyramid(gauss_pyr_value_weights, gauss_pyr_ir_weights, lap_pyr_value, lap_pyr_ir)\n\n i = 1\n for l in lap_pyr_fusion:\n plt.subplot(1, len(lap_pyr_fusion), i)\n plt.imshow(l, cmap='gray')\n i += 1\n plt.show()\n\n # Creating the Gaussian pyramid of value channel in order to collapse the fused Laplacian pyramid\n gauss_pyr_value = gaussian_pyramid(value_channel, kernel, levels)\n collapsed_image = collapse_pyramid(lap_pyr_fusion, gauss_pyr_value)\n\n # Replacing the value channel in HSV visible image by the collapsed image\n im_vis_hsv_fusion = im_vis_hsv.copy()\n im_vis_hsv_fusion[:, :, 2] = collapsed_image\n im_vis_rgb_fusion = hsv2rgb(im_vis_hsv_fusion)\n\n plt.subplot(1, 2, 1)\n plt.imshow(im_vis)\n plt.subplot(1, 2, 2)\n plt.imshow(im_vis_rgb_fusion)\n plt.show()", "def _checkForSixaxis():\n return sixaxis.init(\"/dev/input/js1\")", "def outputFO(self, outfile):\n\n#fof(axiom_0,axiom,\n# ( ! [V2] : ? [V1]:\n# ( p(V2) | p(V1) ) & (~p(V2) | ~p(V1) ) & ( p(V2) | ~p(V1) ) )).\n# outfile.write(\"\"\"cnf(rule_true,axiom, p(1)).\n#cnf(rule_false,axiom, ~p(0)).\n#\"\"\")\n outfile.write(\"fof(quant,axiom,(\\n\\t\")\n for q in self.__quantifierList:\n\n if q.qtype == \"a\":\n outfile.write(\" ! \")\n elif q.qtype == \"e\":\n outfile.write(\" ? \")\n variables = [\"V%d\" % x for x in q.getVariableNames()]\n \n outfile.write(\"[ %s ] : \\n\\t\" % \",\".join(variables))\n clauselist = [] \n outfile.write(\" ( \\n\\t p(true) & ~p(false) & \\n \") \n for c in self.__clauseList:\n clause = []\n clause.append(\"( \")\n vlist = []\n for var in c.varlist:\n if var.inv:\n vlist.append(\"~p(V%s)\" % var.name)\n else:\n vlist.append(\" p(V%s)\" % var.name)\n clause.append(\" | \".join(vlist))\n clause.append(\") \")\n clauselist.append(\"\".join(clause))\n outfile.write(\"\\n\\t & \".join(clauselist))\n outfile.write(\" ) \");\n outfile.write(\"\\n)).\")", "def OnFloated(self, event):\n self._floating = True\n wx.PostEvent(self, wxDockPaneFloatedEvent())", "def set_udfs(self):\n\n flowcell_type = self.process.all_inputs()[0].udf.get('Flowcell Type')\n\n for key, val in self.process_settings[flowcell_type].items():\n self.process.udf[key] = val\n self.process.put()\n\n for art in self.artifacts:\n for key, val in self.artifact_settings[flowcell_type].items():\n art.udf[key] = val\n art.put()", "def __set_FIP(self,FP_num):\n\t\tsize = len(self.matrix)\n\t\tif FP_num == 1:\n\t\t\t[row,col] = [0,0]\n\t\telif FP_num == 2:\n\t\t\t[row,col] = [0,size-7]\n\t\telif FP_num == 3:\n\t\t\t[row,col] = [size-7,0]\n\n\t\tfor r in range(7):\n\t\t\tfor c in range(7):\n\t\t\t\tif (0 <= r and r <= 6 and (c ==0 or c == 6) or (0 <= c and c <= 6 and (r == 0 or r == 6))\n\t\t\t\t\tor (2 <= r and r <= 4 and 2 <= c and c <= 4)):\n\t\t\t\t\tself.matrix[row+r][col+c] = 1\n\t\t\t\telse:\n\t\t\t\t\tself.matrix[row+r][col+c] = 0\n\n\t\t\n\t\tif FP_num == 1:\n\t\t\tself.matrix[7][0:8] = [0] * 8\n\t\t\tfor i in range(0,8):\n\t\t\t\tself.matrix[i][7] = 0\n\t\telif FP_num == 2:\n\t\t\tself.matrix[7][size-8:size] = [0] * 8\n\t\t\tfor i in range(0,8):\n\t\t\t\tself.matrix[i][size-8] = 0\n\t\telif FP_num == 3:\n\t\t\tself.matrix[size-8][0:8] = [0] * 8\n\t\t\tfor i in range(size-8,size):\n\t\t\t\tself.matrix[i][7] = 0", "def SetFloatable(self, floatable):\n if self._floatable != floatable:\n self._floatable = floatable\n def closure(pane):\n pane.Floatable(floatable)\n self._PaneInfoOperation(closure)", "def setupconfig():\n from Manager import Studio\n studio = Studio.Instance\n cfgeff = studio.configEffect_st\n cfgeff.bloomToggle.isOn = False\n cfgeff.vignetteToggle.isOn = False\n cfgeff.sunShaftsToggle.isOn = False\n cfgeff.fogToggle.isOn = False\n cfgeff.depthOfFieldToggle.isOn = False\n #cfgeff.ssaoToggle.isOn = True\n #cfgeff.selfShadowToggle.isOn = True\n \n # Turn off backgrounds\n studio.uiBGChanger.onOffToggle.isOn = False", "def late_gradient_fusion():\n pass", "def float_to_front(qtile):\n for window in qtile.current_group.windows:\n if window.floating:\n window.cmd_bring_to_front()", "def friewallOn():\n pass", "def paint_focal_axes(self):\n GL.glTranslatef(*self.focus) # translate to focus\n self.paint_axes(self.sigma)\n GL.glTranslatef(*-self.focus) # translate back", "def toggle_fore_mod(self, checked):\n for tile in self.tiles:\n tile.toggle_fore_mod(checked)", "def early_gradient_fusion():\n pass", "def do_fit(self):\n\n if (self._flag == 1):\n self._gf = [0.2]\n self._gf = self.par*(self._num_fu*len(self._sites)*2)\n x, F = self.read_from_file(\n self._sn, self._qn, self._path) # read data from the file\n # ,ftol=1.0e-7,xtol=1.0e-8)\n popt, pcov = curve_fit(\n self.modelfun, x, F, p0=self._gf, maxfev=5000)\n self._gf = popt\n\n elif (self._flag == 2):\n\n# par=[0.0]*(self._num_fu*5)\n# for j in range(self._num_fu):\n# par[j*5]=0.0*math.copysign(1,(pow(-1,j)))\n# self._gf[j*5]=0.1\n# par[j*5+1]=6.45\n# par[j*5+2]=0.0\n# par[j*5+3]=0.05\n# par[j*5+4]=1.0\n\n X, F = self.read_from_file(self._sn, self._qn, self._path) # read data from the file\n\n# height, xx, width=self.moments(F)\n# Tracer()()\n# par=[0.0]*(self._num_fu*5)\n# for j in range(self._num_fu):\n# par[j*5]=x[0,xx]\n# par[j*5]=X[0,xx]*math.copysign(1,(pow(-1,j)))\n# par[j*5+1]=X[1,xx]\n# par[j*5+2]=X[2,xx]\n# par[j*5+3]=0.007\n# par[j*5+4]=height*math.copysign(1,(pow(-1,j)))\n\n xi, yi, zi = np.mgrid[-6.5:6.5:160j, 4.0:8.9:160j, -7.5:7.5:160j]\n x, y, z = xi.flatten(), yi.flatten(), zi.flatten()\n XX = np.vstack((x, y, z))\n\n invdisttree = Invdisttree(X.T, F, leafsize=10, stat=1)\n AA = invdisttree(XX.T, nnear=130, eps=0, p=1)\n\n# aaa1,bbb1=self.detect_local_minima(-AA.reshape(xi.shape))\n# aaa2,bbb2=self.detect_local_maxima(-AA.reshape(xi.shape))\n if self.peaks==[]:\n print('\\n---------------------------------------------------------------------')\n print('Detecting maxima and minima of target function...',)\n\n peaks_min, min_coord, peaks_max, max_coord = self.detect_min_max(AA.reshape(xi.shape))\n print('done')\n print('Number of the min peaks: {}'.format(len(peaks_min)))\n print('Number of the max peaks: {}'.format(len(peaks_max)))\n print('---------------------------------------------------------------------\\n')\n # fig=plt.figure()\n # ax = fig.add_subplot(111, projection='3d')\n # ax.plot_surface(xi[:,:,60],yi[:,:,60],bbb2[:,:,60], cmap=cm.jet, linewidth=0.2)\n # plt.hold(True)\n # plt.show()\n\n if peaks_max==[]:\n peaks=np.insert(peaks_min, np.arange(len(peaks_max)), peaks_max)\n coords=np.insert(min_coord, np.arange(max_coord.shape[1]), max_coord, axis=1)\n else:\n peaks = np.insert(peaks_max, np.arange(len(peaks_min)), peaks_min)\n coords = np.insert(max_coord, np.arange(min_coord.shape[1]), min_coord, axis=1)\n\n self.peaks=peaks\n self.coords=coords\n\n par = [0.0]*(self._num_fu*5)\n j1 = 0\n aaaa = 1\n for j in range(self._num_fu):\n if (j > aaaa*self.coords.shape[1]-1):\n j1 = 0\n aaaa += 1\n par[j*5] = xi[self.coords[0, j1], self.coords[0, j1], self.coords[0, j1]]\n par[j*5+1] = yi[self.coords[1, j1], self.coords[1, j1], self.coords[1, j1]]\n par[j*5+2] = zi[self.coords[2, j1], self.coords[2, j1], self.coords[2, j1]]\n # par[j*5+3] = 0.1003+0.1000*math.copysign(1, (pow(-1, j)))\n par[j*5+3] = 0.0001\n# if j < 15:\n# par[j*5+3] = 0.00001\n# else:\n# par[j*5+3] = 0.0005\n par[j*5+4] = self.peaks[j1]\n# print(coords[0, j1], coords[1, j1], coords[2, j1])\n j1 += 1\n # popt, pcov = curve_fit(self.modelfun1, x[:,1:20000], F[1:20000],p0=par,maxfev=150000,xtol=1e-8,ftol=1e-8)\n popt, pcov = curve_fit(\n self.modelfun1, X, F, p0=par, maxfev=150000, xtol=1e-6,\n ftol=1e-8)\n # popt, pcov = curve_fit(self.modelfun1, XX, AA, p0=par)\n self._gf = popt\n# self.error=np.diagonal(pcov, offset=0)\n# print(pcov)\n else:\n # pass\n sys.exit(\"Wrong flag in do_fit\")", "def plot_fppy(self,LAXIS,xbl,xbr,ybu,ybd,ilg): \n\t\t\n # load x GRID\n grd1 = self.xzn0\n\t\n # load DATA to plot\n plt1 = self.fppy\n\t\t\t\t\n # create FIGURE\n plt.figure(figsize=(7,6))\n\t\t\n # format AXIS, make sure it is exponential\n plt.gca().yaxis.get_major_formatter().set_powerlimits((0,0))\t\t\n\n # set plot boundaries \n to_plot = [plt1]\t\t\n self.set_plt_axis(LAXIS,xbl,xbr,ybu,ybd,to_plot)\n\t\t\t\t\n # plot DATA \n plt.title(r'pressure flux y')\n plt.plot(grd1,plt1,color='brown',label = r'f$_{py}$')\n\n # define and show x/y LABELS\n setxlabel = r\"r (cm)\"\n setylabel = r\"$f_{py}$ (erg cm$^{-2}$ s$^{-1}$)\"\n plt.xlabel(setxlabel)\n plt.ylabel(setylabel)\n\t\t\n # show LEGEND\n plt.legend(loc=ilg,prop={'size':18})\n\n # display PLOT\n plt.show(block=False)\n\n # save PLOT\n plt.savefig('RESULTS/'+self.data_prefix+'mean_fppy.png')", "def switch_frequency_plot_channel_six(self):\n if self.plot_channel_key_booleans[5]:\n self.plot_channel_key_booleans[5] = False\n self.parent_widget.graph_channel_six_button.setStyleSheet(\n \"background-color:rgb(%d,%d,%d)\" % (255, 255, 255))\n else:\n self.plot_channel_key_booleans[5] = True\n self.parent_widget.graph_channel_six_button.setStyleSheet(\n \"background-color:rgb(%d,%d,%d)\" % (LINE_COLORS[5]))", "def fiducial_evolution():\n \n # fiducial model\n wangle = 180*u.deg\n pk = pickle.load(open('../data/gd1_fiducial.pkl', 'rb'))\n x = pk['x'].to(u.kpc)\n xorig = x[:2]\n \n plt.close()\n fig, ax = plt.subplots(1,1,figsize=(6,6))\n \n plt.sca(ax)\n \n Nsnap = 8\n times = np.linspace(0,0.5,Nsnap)[::-1]\n angles = np.linspace(0,322,Nsnap)[::-1]*u.deg\n\n for e, t in enumerate(times):\n c = mpl.cm.Blues(0.05+0.85*(Nsnap-e)/Nsnap)\n #a = 0.5 + 0.5*(Nsnap-e)/Nsnap\n \n pk = pickle.load(open('../data/gd1_fiducial_t{:.4f}.pkl'.format(t), 'rb'))\n x = pk['x'].to(u.kpc)\n x_, y_ = x[0], x[1]\n \n plt.plot(x_[120:-120], y_[120:-120], '.', color=c, ms=10, zorder=Nsnap-e, rasterized=False)\n \n xt = 24*np.cos(angles[e]+90*u.deg)\n yt = 24*np.sin(angles[e]+90*u.deg)\n if e<Nsnap-1:\n txt = plt.text(xt, yt, '+ {:.2f} Gyr'.format(t), va='center', ha='center', fontsize='small', color='0.2', rotation=(angles[e]).value, zorder=10)\n txt.set_bbox(dict(facecolor='w', alpha=0.7, ec='none'))\n \n plt.text(0, 24, 'Flyby', va='center', ha='center', fontsize='small', color='0.2')\n\n lim = 27\n plt.xlim(-lim,lim)\n plt.ylim(-lim,lim)\n plt.gca().set_aspect('equal')\n \n plt.xlabel('x [kpc]')\n plt.ylabel('y [kpc]')\n \n plt.tight_layout()\n plt.savefig('../plots/loop_evolution.pdf')", "def show_filters(self):\n w_mat = np.transpose(self.sess.run(self.W_fc1))\n\n plt.figure(figsize=(10,10), facecolor='w', edgecolor='w')\n plot_positions = [(0,0),(0,1),(1,0),(1,1)]\n for ch in range(self.n_input_channels):\n grid,_ = ia.image_grid_RGB( w_mat,\n n_channels=self.n_input_channels,\n image_size=(self.y_res,self.x_res), n_x=6, n_y=6,\n channel_order=(ch,ch,ch), amplitude_scaling=(1,1,1),\n line_color=1, auto_scale=True, return_borders=False )\n colormax = np.abs(grid).max()\n with sns.axes_style(\"white\"):\n ax = plt.subplot2grid( (2,2), plot_positions[ch] )\n ax.imshow( grid[:,:,0], interpolation='nearest',\n cmap=plt.get_cmap('seismic'),\n clim=(-1*colormax,colormax) )\n ax.set_title(\"Hidden units, channel {}\".format(ch))\n plt.axis('tight')\n plt.axis('off')\n plt.tight_layout()", "def togff(self,gff={}):\n self._gff['fstart'] = self.pos-1\n self._gff['fstop'] = self.pos\n return SpliceSiteBase.togff(self,gff=gff)", "def trigger_set_fetc(self):\n self.write(\"*DDT #15FETC?\")", "def _set_draw_mode(draw_mode):\n###############################################################################\n global _draw_mode\n _draw_mode = draw_mode", "def switch_frequency_plot_channel_four(self):\n if self.plot_channel_key_booleans[3]:\n self.plot_channel_key_booleans[3] = False\n self.parent_widget.graph_channel_four_button.setStyleSheet(\n \"background-color:rgb(%d,%d,%d)\" % (255, 255, 255))\n else:\n self.plot_channel_key_booleans[3] = True\n self.parent_widget.graph_channel_four_button.setStyleSheet(\n \"background-color:rgb(%d,%d,%d)\" % (LINE_COLORS[3]))", "def FlowMapTwoColourPlotHelper(self,filename,lsmask_filename=None,grid_type='HD',\n minflowcutoff=100,flip_data=False,flip_mask=False,\n **kwargs):\n flowmap_object = iodriver.load_field(filename,\n file_type=iodriver.get_file_extension(filename),\n field_type='Generic',\n grid_type=grid_type,**kwargs)\n lsmask_field = iodriver.load_field(lsmask_filename,\n file_type=iodriver.get_file_extension(lsmask_filename),\n field_type='Generic', grid_type=grid_type,**kwargs)\n if flip_data:\n flowmap_object.flip_data_ud()\n if flip_mask:\n lsmask_field.flip_data_ud()\n lsmask = lsmask_field.get_data()\n flowmap = flowmap_object.get_data()\n plt.figure()\n plt.subplot(111)\n flowmap[flowmap < minflowcutoff] = 1\n flowmap[flowmap >= minflowcutoff] = 2\n if lsmask is not None:\n flowmap[lsmask == 1] = 0\n cmap = mpl.colors.ListedColormap(['blue','peru','black'])\n bounds = list(range(4))\n norm = mpl.colors.BoundaryNorm(bounds,cmap.N)\n plt.imshow(flowmap,cmap=cmap,norm=norm,interpolation=\"none\")\n plt.title('Cells with cumulative flow greater than or equal to {0}'.format(minflowcutoff))", "def FG2semiFG(self, FG, flag_FG2semiFG):\n # USER IMPOSED\n # Actual FG\n if 'JUSTE FG' in flag_FG2semiFG:\n return FG\n # Antonio's files where a 'phase' parameter is used to permit changin\n # evolution condition (CR insertion) at 2 points: 15,30 MWd/tU thus\n # defining 3cicles\n if 'ESTEBAN' in flag_FG2semiFG:\n \"\"\"\n Esteban's files. 'phase' and 'burnup step' and 'burnup' depends on a single parameter. No additional point required\n Example\n input FG:[[1], [1], [1], [1], [1], [1], [1], [1], [1, 2,..., 70]]\n Output FG:[[1], [1], [1], [1], [1], [1], [1], [1], array([ 1, 2,..., 70, 71, 72])]\n \"\"\"\n FG[6] = [1] # the PHASE is a dependant variables and thus does not participates in the cartesian product\n FG[7] = [1] # the BURNUP_step is a dependant variable...\n # Antonio's files where a 'phase' parameter is used to permit changin\n # evolution condition (CR insertion) at 2 points: 15,30 MWd/tU thus\n # defining 3cicles\n if 'ANTONIO' in flag_FG2semiFG:\n \"\"\"\n Antonio's files. 'phase' and 'burnup step' and 'burnup' depends on rules over a special index\n Example\n input FG:[[1], [1], [1], [1], [1], [1], [1], [1], [1, 2,..., 70]]\n Output FG:[[1], [1], [1], [1], [1], [1], [1], [1], array([ 1, 2,..., 70, 71, 72])]\n Two points are added, as A2 calculation at 0 burnup are mandatory, and at each new cicle a new 0 burnup calcvulation takes place\n \"\"\"\n FG[6] = [1] # the PHASE is a dependant variables and thus does not participates in the cartesian product\n FG[7] = [1] # the BURNUP_step is a dependant variable...\n # the phase depended on the index in burnup by this relationship. The number 24,48 are the indexed defined by the discretization criterion of burnup chosen by the user of A2\n # print FG\n if FG[8][-1] <= 24:\n ph = 1\n if FG[8][-1] > 24 and FG[8][-1] <= 48:\n ph = 2\n if FG[8][-1] > 48:\n ph = 3\n # print ph\n # it adds indexes as required by ph. if ph=1 then nothing gets added, if\n # ph=3 and FG[8][-1]=70 then '71' and '72' get added\n FG[8] = np.append(FG[8], range(FG[8][-1] + 1, FG[8][-1] + ph))\n if 'CR' in flag_FG2semiFG:\n \"\"\"\n In reference calculation where CR position may change, the lecture of the phase space given a CR value of [1,2] i.e. in and out. However no branch calculation acctualy takes place, and un-existing points are requested.\n \"\"\"\n FG[4] = [1]\n if 'NO BURNUP' in flag_FG2semiFG:\n FG[8] = [1]\n # Format: Making sure that index are int\n return [[int(value) for value in vec] for vec in FG]", "def toggle_wireframe(self):\n self.view['wireframe'] = not self.view['wireframe']\n self.update_flags()", "def enable3D(self):\r\n if(self.dataController.fileLoaded==True):\r\n self.dataController.toggleInteractiveMode()\r\n\r\n self.midsagittalView = False\r\n self.frontView = False\r\n self.topView = False\r\n self.bottomView = False\r\n self.threeDView = True", "def plot_rf_fi(rf, figsize=(8, 5), plot_direction='h', columns=None, max_cols_plot=None,\n color='g', title=None, errorbars=True):\n fontsize=14\n alpha=0.7\n\n importance = rf.feature_importances_\n std = np.std([tree.feature_importances_ for tree in rf.estimators_], axis=0)\n indices = np.argsort(importance)[::-1] # feature indices ordered by importance\n top_indices = indices[:max_cols_plot] # get indices of top most important features\n if columns is None:\n columns = top_indices\n else:\n columns = np.array(columns)[top_indices]\n\n # Start plotting\n fig, ax = plt.subplots(figsize=figsize)\n if title:\n ax.set_title(title)\n \n if plot_direction=='v':\n if errorbars:\n ax.bar(range(len(top_indices)), importance[top_indices], color=color, align='center', alpha=alpha,\n yerr=std[top_indices], ecolor='black')\n else:\n ax.bar(range(len(top_indices)), importance[top_indices], color=color, align='center', alpha=alpha)\n ax.set_xticks(range(len(top_indices)))\n ax.set_xticklabels(columns, rotation='vertical', fontsize=fontsize)\n ax.set_xlim([-1, len(top_indices)])\n ax.set_xlabel('Feature', fontsize=fontsize)\n ax.set_ylabel('Importance', fontsize=fontsize)\n [tick.label.set_fontsize(fontsize-4) for tick in ax.yaxis.get_major_ticks()]\n else:\n if errorbars:\n ax.barh(range(len(top_indices)), importance[top_indices], color=color, align='center', alpha=alpha,\n xerr=std[top_indices], ecolor='black')\n else:\n ax.barh(range(len(top_indices)), importance[top_indices], color=color, align='center', alpha=alpha)\n ax.set_yticks(range(len(top_indices)))\n ax.set_yticklabels(columns, rotation='horizontal', fontsize=fontsize)\n ax.set_ylim([-1, len(top_indices)])\n # ax.invert_yaxis()\n ax.set_ylabel('Feature', fontsize=fontsize)\n ax.set_xlabel('Importance', fontsize=fontsize)\n [tick.label.set_fontsize(fontsize-4) for tick in ax.xaxis.get_major_ticks()]\n\n # ax.grid()\n # plt.tight_layout()\n\n return indices, fig", "def graphics(env, fovea, objects, unit):\n plt.clf()\n\n env = environment.redraw(env, unit, objects)\n fovea_im = fovea.get_focus_image(env)\n\n plt.subplot(121)\n plt.title('Training environment')\n plt.xlim(0, unit)\n plt.ylim(0, unit)\n plt.imshow(env)\n\n # PLOT DESK EDGES\n plt.plot([0.2*unit, 0.2*unit, 0.8*unit, 0.8*unit, 0.2*unit],\n [0.2*unit, 0.8*unit, 0.8*unit, 0.2*unit, 0.2*unit], 'w-'\n )\n\n # PLOT FOVEA EDGES\n fov_indices = fovea.get_index_values()\n plt.plot([fov_indices[0][0], fov_indices[0][0], fov_indices[0][1],\n fov_indices[0][1], fov_indices[0][0]],\n [fov_indices[1][0], fov_indices[1][1], fov_indices[1][1],\n fov_indices[1][0], fov_indices[1][0]], 'w-'\n )\n\n plt.subplot(122)\n plt.title('Focus image')\n plt.imshow(fovea_im)\n\n plt.draw()\n plt.pause(0.01)", "def calculateFFBox(qOfFlights):\n\t# if qOfFlights == 2: rows=2; columns=1\n\t# else:\n\tpass", "def setup_to_finetune(model):\n\n for layer in model.layers[:NB_VGG_LAYERS_TO_FREEZE]:\n layer.trainable = False\n for layer in model.layers[NB_VGG_LAYERS_TO_FREEZE:]:\n layer.trainable = True\n\n # opt = Adam(lr=0.0001)\n model.compile(optimizer=SGD(lr=0.0001, momentum=0.9),\n loss='categorical_crossentropy',\n metrics=['categorical_accuracy', f1_score])", "def main():\n\n robot = S2Fluke2( \"/dev/rfcomm2\" )\n\n print( \"getVersion :\", robot.getVersion() )\n print( \"identifyRobot :\", robot.identifyRobot() )\n print( \"getBattery :\", robot.getBattery() )\n print( \"setForwardness: SCRIBBLER_FORWARD\" )\n robot.setForwardness( robot.SCRIBBLER_FORWARD )\n print( \"setForwardness: FLUKE_FORWARD\" )\n robot.setForwardness( robot.FLUKE_FORWARD )\n print( \"setForwardness: SCRIBBLER_FORWARD\" )\n robot.setForwardness( robot.SCRIBBLER_FORWARD )\n print( \"getErrors : \" )\n print( robot.getErrors() )\n robot.resetScribbler()\n\n robot.close()", "def use_fscale(self,use_fscale):\n if type(use_fscale).__name__ == 'bool':\n self._use_fscale = use_fscale\n else:\n raise KINSOL_Exception(\"The variable sent to 'use_fscale' must be a boolean.\")", "def fullcore_detectors():\n\n cwd = os.getcwd()\n fname = get_sample_data('%s/oecd-fullcore_geom1.png' % (cwd))\n im = plt.imread(fname)\n\n # crop the image\n height, width, color = np.shape(im)\n y1 = int(height*0.15)\n y2 = int(height*0.6)\n x1 = int(width*0.45)\n x2 = int(width)\n plt.imshow(im[y1:y2,x1:x2,:])\n plt.axis('off')\n\n # Axial 1\n x = 158\n y = 291\n P = 55\n s = P/2/np.cos(np.pi/6)\n plt.plot([s+x, 2*s+x], [0+y, 0+y], 'r-', lw=1.5, label='1- Axial1')\n plt.plot([s+x, 2*s+x], [P+y, P+y], 'r-', lw=1.5)\n plt.plot([s+x, s/2+x], [0+y, P/2+y], 'r-', lw=1.5)\n plt.plot([s/2+x, s+x], [P/2+y, P+y], 'r-', lw=1.5)\n plt.plot([2*s+x, 2*s+s/2+x], [0+y, P/2+y], 'r-', lw=1.5)\n plt.plot([2*s+s/2+x, 2*s+x], [P/2+y, P+y], 'r-', lw=1.5)\n\n plt.text(x=x+37, y=y+40, s='1', fontsize=20, color='w')\n\n # Axial 2\n x = 210\n y = 321\n P = 55\n s = P/2/np.cos(np.pi/6)\n plt.plot([s+x, 2*s+x], [0+y, 0+y], 'r-', lw=1.5, label='2- Axial2')\n plt.plot([s+x, 2*s+x], [P+y, P+y], 'r-', lw=1.5)\n plt.plot([s+x, s/2+x], [0+y, P/2+y], 'r-', lw=1.5)\n plt.plot([s/2+x, s+x], [P/2+y, P+y], 'r-', lw=1.5)\n plt.plot([2*s+x, 2*s+s/2+x], [0+y, P/2+y], 'r-', lw=1.5)\n plt.plot([2*s+s/2+x, 2*s+x], [P/2+y, P+y], 'r-', lw=1.5)\n plt.text(x=x+37, y=y+40, s='2', fontsize=20, color='w')\n\n # Axial 3\n x = 262\n y = 291\n P = 55\n s = P/2/np.cos(np.pi/6)\n plt.plot([s+x, 2*s+x], [0+y, 0+y], 'r-', lw=1.5, label='3- Axial3')\n plt.plot([s+x, 2*s+x], [P+y, P+y], 'r-', lw=1.5)\n plt.plot([s+x, s/2+x], [0+y, P/2+y], 'r-', lw=1.5)\n plt.plot([s/2+x, s+x], [P/2+y, P+y], 'r-', lw=1.5)\n plt.plot([2*s+x, 2*s+s/2+x], [0+y, P/2+y], 'r-', lw=1.5)\n plt.plot([2*s+s/2+x, 2*s+x], [P/2+y, P+y], 'r-', lw=1.5)\n\n plt.text(x=x+37, y=y+40, s='3', fontsize=20, color='w')\n\n # Radial 1\n x = 52\n y = 349\n plt.plot([x, 495+x], [y, y], 'r-', lw=1.5, label='4- Radial1')\n plt.plot([x, 495+x], [y, y], 'r-', lw=1.5, label='5- Radial2')\n plt.text(x=x+380, y=y-10, s='4, 5', fontsize=20, color='black')\n\n # Radial 2\n x = 52\n y = 349\n L = 495\n plt.plot([x, L*np.cos(np.pi/6)+x], [y, -L/2+y], 'r-', lw=1.5, label='6- Radial3')\n plt.text(x=350, y=y-200, s='6', rotation=30, fontsize=20, color='black')\n plt.legend(loc='best')\n\n plt.savefig(\"oecd-fullcore-detectors\", dpi=300, bbox_inches=\"tight\")", "def main():\n\n varList = {'beta': 6., 'convSpeed': 1.2, 'Mark': 0., 'axi': 1, 'acModes': 4, 'Nr': 801, 'Tf': 600., 'xf': 0.51}\n\n # Solve steady flame.\n # BC1: I have the attachment BC at r = 1, always\n # BC2: I need to set dF/dr = 0 at r = 0 iff Mark != 0\n [qMean, r, FMean] = steady_flame_area_FD3(varList['Mark'], varList['beta'], varList['axi'], varList['Nr'])\n r = r * varList['beta']\n\n # Calculate mean flame derivatives\n dFMeanDr = derivsnew.FD1_CT2_D(FMean, r[1] - r[0])\n d2FMeanDr2 = derivsnew.FD2_CT2_D(FMean, r[1] - r[0])\n\n #Apply BC smooth tip:\n if(varList['Mark']!=0.0):\n dFMeanDr[-1] = 0.0\n\n # Use correct number of points. Remember that the extrems need to be set depending on the BC!\n # The attach BC (first point) is always assumed to be true and removed from the vector list\n if(varList['Mark']==0):\n Nr = varList['Nr'] / 2\n dFMeanDr = dFMeanDr[1:]\n d2FMeanDr2 = d2FMeanDr2[1:]\n r = r[1:]\n # The smooth BC holds only if Mark!=0 (second derivatives appear): remove also the last point\n else:\n Nr = varList['Nr'] / 2 - 1\n dFMeanDr = dFMeanDr[1:-1]\n d2FMeanDr2 = d2FMeanDr2[1:-1]\n r = r[1:-1]\n\n # Calculate geometric values\n den = 1 + varList['beta'] * varList['beta'] * dFMeanDr * dFMeanDr\n dR = r[1] - r[0]\n # Set Nx equal to Nr for now.\n # The implementation is more complicated if they differ, and need to interpolate between values.\n Nx = Nr\n\n # Nonuniform grid spacing along x!\n # Nx = length(dx) has to hold.\n dx = np.empty(len(FMean) - 1)\n for ii in range(1, len(FMean)):\n dx[ii - 1] = FMean[ii] - FMean[ii - 1]\n\n [A, B, C, tau] = loadAcoustics(varList['xf'], varList['Tf'], varList['acModes'], varList['beta'])\n\n Matrix = buildMatrix(Nr, dR, varList['beta'], den, r, FMean, dFMeanDr, d2FMeanDr2, varList['Mark'], varList['acModes'], A,\n B, C, Nx, dx, tau, qMean, varList['convSpeed'])\n\n [d, W, V] = eigProblem.solveEigProb(Matrix)\n [dnew, Wnew, Vnew] = eigProblem.selectUnstable(d, W, V)\n\n print dnew / (2. * np.pi)", "def on_floated(self, event):\n if not self._guard & FLOATED_GUARD:\n self._guard |= FLOATED_GUARD\n try:\n self.declaration.floating = True\n finally:\n self._guard &= ~FLOATED_GUARD", "def findFringesInScan(self, fsu='FSUB', plot=False, calibrated=True):\n fringes_pos = []\n weight = []\n\n if plot:\n plt.figure(0)\n plt.clf()\n plt.title(self.filename+' | '+fsu+' | '+self.insmode)\n plt.xlabel('OPD '+self.DLtrack+' (m)')\n\n for k in range(self.scan_nscans):\n x = self.getScan(k, isolate=False, calibrated=calibrated,\n FUOFFSET=1, fsu=fsu, resample=False)\n opd = np.linspace(x[1].min(), x[1].max(), len(x[1]))\n scan1 = np.interp(opd, x[1], x[2]-x[4])\n scan2 = np.interp(opd, x[1], x[3]-x[5])\n sigS = np.exp(-((opd-opd.mean())/10e-6)**2)*\\\n np.sin((opd-opd.mean())/2.2e-6*2*np.pi)\n sigC = np.exp(-((opd-opd.mean())/10e-6)**2)*\\\n np.cos((opd-opd.mean())/2.2e-6*2*np.pi)\n sigS = np.roll(sigS, len(sigS)/2)\n sigC = np.roll(sigC, len(sigC)/2)\n \n fft_sigS = np.fft.fft(sigS)\n fft_sigC = np.fft.fft(sigC)\n fft_scan1 = np.fft.fft(scan1)\n fft_scan2 = np.fft.fft(scan2)\n # correlate\n powerS1 = np.abs(np.fft.ifft(fft_scan1*fft_sigS))**2\n powerC1 = np.abs(np.fft.ifft(fft_scan1*fft_sigC))**2\n powerS2 = np.abs(np.fft.ifft(fft_scan2*fft_sigS))**2\n powerC2 = np.abs(np.fft.ifft(fft_scan2*fft_sigC))**2\n power1 = powerS1+powerC1\n power2 = powerS2+powerC2\n if power1.max()>(power1.mean()+8*power1.std()):\n fringes_pos.append(opd[power1.argmax()])\n weight.append(power1.max())\n if plot:\n plt.plot(opd, (power1-power1.mean())/power1.std(),\n linewidth=2)\n print x[0].min(), x[0].max()\n #plt.plot(opd, np.interp(x[0],\n # self.raw['IMAGING_DATA_'+fsu.upper()].data.field('TIME'),\n # self.raw['IMAGING_DATA_'+fsu.upper()].data.field(OPDSNR)),\n # 'k', alpha=0.5, linewidth=3)\n else:\n if plot:\n plt.plot(opd, (power1-power1.mean())/power1.std(),\n 'k', alpha='0.5')\n\n if power2.max()>(power2.mean()+8*power2.std()):\n fringes_pos.append(opd[power2.argmax()])\n weight.append(power2.max())\n if plot:\n plt.plot(opd, (power2-power2.mean())/power2.std(),\n linewidth=2)\n else:\n if plot:\n plt.plot(opd, (power2-power2.mean())/power2.std(),\n 'k', alpha='0.5')\n\n return (np.array(fringes_pos)*np.array(weight)).sum()/\\\n np.array(weight).sum()", "def Floatable(self, b=True):\r\n \r\n return self.SetFlag(self.optionFloatable, b)", "def _cutoff(xdata, ydata, btype, fs, ff):\r\n try:\r\n# print ff\r\n if ff != None:\r\n nPts = int(1./(((xdata.max()-xdata.min())/xdata.shape[0])*(ff/10.)))\r\n else:\r\n nPts = 0\r\n if nPts%2 == 0:\r\n nPts = nPts + 1\r\n if nPts < xdata.shape[0]:\r\n nPts = xdata.shape[0]\r\n# print nPts\r\n window = np.hanning(ydata.shape[0])\r\n freq = FourierFrequency(xdata, nPts)\r\n index = np.argsort(freq)\r\n tdf = FourierTransform(ydata*window, nPts)\r\n tdf = abs(tdf)\r\n pp = _maxima(tdf[index], freq[index], lookahead = 1)\r\n# mm = _minima(tdf[index], freq[index], lookahead=1)\r\n pp, hh = np.array(np.array(pp).T[0]), np.array(np.array(pp).T[1])\r\n# mm = np.array(np.array(mm).T[0])#, np.array(np.array(mm).T[1])\r\n ind = np.where(pp == min(abs(pp)))[0][0]\r\n ind2 = np.where(hh == max(hh[(ind+1):]))[0][0]\r\n for u, i in enumerate(freq):\r\n if i > abs(pp[ind2])*1.5 or i < -abs(pp[ind2])*1.5 or (i < abs(pp[ind2])/2. and i > -abs(pp[ind2])/2.) or (tdf[u] > hh[ind2]*1.05): #(abs(i) < abs(mm[indmin])) or \r\n tdf[u] = 0.\r\n def lor2(x, A0, x0, gamma0):\r\n return A0*(1/np.pi)*(gamma0/2)/((x-x0)**2+(gamma0/2)**2)+A0*(1/np.pi)*(gamma0/2)/((x+x0)**2+(gamma0/2)**2)\r\n lmod2 = lmf.Model(lor2)\r\n lmod2.make_params()\r\n lmod2.set_param_hint('A0', value=max(tdf), min=max(tdf)/1000.)\r\n lmod2.set_param_hint('x0', value=abs(pp[ind2]), min=0.)\r\n lmod2.set_param_hint('gamma0', value=1., min=0.)\r\n result2 = lmod2.fit(tdf[index], x=freq[index])\r\n# print result2.values.get('x0'), result2.values.get('gamma0')\r\n if btype=='high':\r\n if result2.values.get('x0')-result2.values.get('gamma0') > 0.:\r\n# print \"frequency: \", result2.values.get('x0')-result2.values.get('gamma0')\r\n if hh[ind2] != max(hh[(ind+1):]):\r\n print \"False\", \" maximum\", \"\\n\", \"\\n\", \"\\n\"\r\n return result2.values.get('x0')-result2.values.get('gamma0')\r\n else:\r\n# print \"failed: 0\"\r\n return 0.\r\n elif btype=='low':\r\n return result2.values.get('x0')+result2.values.get('gamma0')\r\n except Exception:\r\n pass\r\n finally:\r\n pass", "def update_focal_axes(self):\n self.update_sigma()\n self.updateGL()", "def ff_callback(self):\n self.rokucontrol.ff_callback()", "def init_plot_force(nb_mus):\n # --- Curve graph --- #\n # app = pg.mkQApp(\"force\")\n # remote = []\n # layout = pg.LayoutWidget()\n # layout.resize(800, 800)\n # label = QtGui.QLabel()\n # box = []\n # rplt = []\n # row_count = 0\n # col_span = 4 if nb_mus > 8 else 8\n # for mus in range(nb_mus):\n # remote.append(rgv.RemoteGraphicsView())\n # remote[mus].pg.setConfigOptions(antialias=True)\n # app.aboutToQuit.connect(remote[mus].close)\n # box.append(QtGui.QCheckBox(f\"muscle_{mus}\"))\n # if mus >= 8:\n # layout.addWidget(box[mus], row=1, col=mus-8)\n # layout.addWidget(remote[mus], row=mus - 8 + 2, col=4, colspan=col_span)\n # else:\n # layout.addWidget(box[mus], row=0, col=mus)\n # layout.addWidget(remote[mus], row=mus + 2, col=0, colspan=col_span)\n # rplt.append(remote[mus].pg.PlotItem())\n # rplt[mus]._setProxyOptions(deferGetattr=True) ## speeds up access to rplt.plot\n # remote[mus].setCentralItem(rplt[mus])\n # layout.addWidget(label)\n # layout.show()\n # row_count += 1\n # return rplt, layout, app , box\n\n # --- Progress bar graph --- #\n # app = pg.mkQApp(\"force\")\n # layout = pg.LayoutWidget()\n # layout.resize(400, 800)\n # layout.move(0, 0)\n # box = []\n # rplt = []\n # row_count = 0\n # for mus in range(nb_mus):\n # rplt.append(QProgressBar())\n # rplt[mus].setMaximum(1000)\n # layout.addWidget(rplt[mus], row=mus, col=0)\n # layout.show()\n # row_count += 1\n # return rplt, layout, app\n\n # --- Bar graph --- #\n app = pg.mkQApp()\n layout = pg.plot()\n layout.resize(800, 800)\n rplt = pg.BarGraphItem(x=range(nb_mus), height=np.zeros((nb_mus)), width=0.3, brush=\"r\")\n layout.addItem(rplt)\n return rplt, layout, app", "def set_autofeed_mode(self, mode):\n self._info(\"set_autofeed_mode\")\n self.parent.controller.set_autofeed_mode(mode)", "def compileIF_WFS(filebase='', vmax=10.0, ftype='h5'):\n for idx in ax.cellmap:\n measureIF_WFS(idx, N=1, volt=vmax, filebase=filebase, ftype=ftype)", "def switch_frequency_plot_channel_five(self):\n if self.plot_channel_key_booleans[4]:\n self.plot_channel_key_booleans[4] = False\n self.parent_widget.graph_channel_five_button.setStyleSheet(\n \"background-color:rgb(%d,%d,%d)\" % (255, 255, 255))\n else:\n self.plot_channel_key_booleans[4] = True\n self.parent_widget.graph_channel_five_button.setStyleSheet(\n \"background-color:rgb(%d,%d,%d)\" % (LINE_COLORS[4]))", "def setSurfaceMeshing(state='off',shading=1):\n sdict = {'off':'OFF','on':'ON'}\n val = sdict[state]\n if not shading:\n val = 'ONLY'\n dislin.surmsh(val)", "def soff_to_fpoff(*args):\n return _ida_frame.soff_to_fpoff(*args)", "def force_show(sub_Idx):\n force_path = './dataset/subj_' + f'{sub_Idx:02d}'+ '/forces/force_' + f'{sub_Idx:02d}' + '.txt'\n image_path = './dataset/subj_' + f'{sub_Idx:02d}'+ '/images/'\n force_num = len(glob.glob(image_path + '*.jpg'))\n force_list = load_force_txt(force_path,force_num)\n print('showing '+f'{force_num:03d}'+ ' raw forces for subject ' + f'{sub_Idx:02d}')\n\n fig = plt.figure(figsize = (10, 7)) \n ax = plt.axes(projection =\"3d\") \n\n for x, y, z in force_list:\n ax.scatter3D(x, y, z, color = \"green\")\n ax.set_xlabel('X-axis', fontweight ='bold') \n ax.set_ylabel('Y-axis', fontweight ='bold') \n ax.set_zlabel('Z-axis', fontweight ='bold')\n plt.title(\"3D force data\") \n plt.show()", "def fbank(signal,samplerate=16000,winlen=0.025,winstep=0.01,\n nfilt=26,nfft=512,lowfreq=0,highfreq=None,preemph=0.97): \n highfreq= highfreq or samplerate/2\n print \"preemph %s\"%(preemph)\n signal = sigproc.preemphasis(signal,preemph)\n frames = sigproc.framesig(signal, winlen*samplerate, winstep*samplerate)\n matchframes(frames[0], frames[1])\n pspec = sigproc.powspec(frames,nfft)\n energy = pylab.sum(pspec,1) # this stores the total energy in each frame\n energy = pylab.where(energy == 0, pylab.finfo(float).eps, energy) # if energy is zero, we get problems with log\n fb = get_filterbanks(nfilt, nfft, samplerate, lowfreq, highfreq)\n print \"len(fb) %s\"%(len(fb))\n colour = \"k-\"\n for i in range(len(fb)):\n if colour == \"k-\":\n colour = \"r-\"\n else:\n colour = \"k-\"\n startedplot = False\n midpoint = 0\n for j in range(len(fb[i])):\n if fb[i][j] > 0:\n if startedplot == False:\n startedplot = j\n if j > 0:\n pylab.plot([j-1, j], [fb[i][j-1], fb[i][j]], colour)\n if fb[i][j] == 1.0:\n midpoint = j\n else:\n if not startedplot == False:\n pylab.plot([j-1, j], [fb[i][j-1], 0], colour)\n try:\n print \"slope to midpoint %.3f, slope from midpoint %.3f\"%(1.0/float(midpoint-startedplot), 1.0/float(midpoint-j+1))\n except:\n pass\n break\n pylab.show()\n feat = pylab.dot(pspec, fb.T) # compute the filterbank energies\n feat = pylab.where(feat == 0, pylab.finfo(float).eps, feat) # if feat is zero, we get problems with log\n return feat, energy", "def fig_craco_fiducial(outfile='fig_craco_fiducial.png',\n zmax=2.5,DMmax=2500,\n show_Macquart=False,\n log=True,\n label='$\\\\log_{10} \\; p(DM_{\\\\rm EG},z)$',\n Aconts=[0.01, 0.1, 0.5],\n cmap='jet', show=False, figsize=None,\n vmnx=(None,None),\n grid=None, survey=None):\n # Generate the grid\n if grid is None or survey is None:\n survey, grid = analy_H0_I.craco_mc_survey_grid()\n\n # Unpack\n full_zDMgrid, zvals, dmvals = grid.rates, grid.zvals, grid.dmvals\n FRBZ=survey.frbs['Z']\n FRBDM=survey.DMEGs\n \n ##### imshow of grid #######\n fsize = 14.\n plt.figure(figsize=figsize)\n ax1=plt.axes()\n plt.sca(ax1)\n \n plt.xlabel('z')\n plt.ylabel('${\\\\rm DM}_{\\\\rm EG}$')\n #plt.title(title+str(H0))\n \n # Cut down grid\n zvals, dmvals, zDMgrid = figures.proc_pgrid(\n full_zDMgrid, \n zvals, (0, zmax),\n dmvals, (0, DMmax))\n ddm=dmvals[1]-dmvals[0]\n dz=zvals[1]-zvals[0]\n nz, ndm = zDMgrid.shape\n\n # Contours\n alevels = figures.find_Alevels(full_zDMgrid, Aconts, log=True)\n \n # Ticks\n tvals, ticks = figures.ticks_pgrid(zvals)# , fmt='str4')\n plt.xticks(tvals, ticks)\n tvals, ticks = figures.ticks_pgrid(dmvals, fmt='int')# , fmt='str4')\n plt.yticks(tvals, ticks)\n\n # Image \n im=plt.imshow(zDMgrid.T,cmap=cmap,origin='lower', \n vmin=vmnx[0], vmax=vmnx[1],\n interpolation='None',\n aspect='auto')\n \n styles=['--','-.',':']\n ax=plt.gca()\n cs=ax.contour(zDMgrid.T,levels=alevels,origin='lower',colors=\"white\",linestyles=styles)\n\n ax=plt.gca()\n \n muDMhost=np.log(10**grid.state.host.lmean)\n sigmaDMhost=np.log(10**grid.state.host.lsigma)\n meanHost = np.exp(muDMhost + sigmaDMhost**2/2.)\n medianHost = np.exp(muDMhost) \n print(f\"Host: mean={meanHost}, median={medianHost}\")\n plt.ylim(0,ndm-1)\n plt.xlim(0,nz-1)\n zmax=zvals[-1]\n nz=zvals.size\n #DMbar, zeval = igm.average_DM(zmax, cumul=True, neval=nz+1)\n DM_cosmic = pcosmic.get_mean_DM(zvals, grid.state)\n\n \n #idea is that 1 point is 1, hence...\n zeval = zvals/dz\n DMEG_mean = (DM_cosmic+meanHost)/ddm\n DMEG_median = (DM_cosmic+medianHost)/ddm\n\n # Check median\n f_median = scipy.interpolate.interp1d(\n zvals, DM_cosmic+medianHost, \n fill_value='extrapolate')\n eval_DMEG = f_median(FRBZ)\n above = FRBDM > eval_DMEG\n print(f\"There are {np.sum(above)/len(FRBZ)} above the median\")\n\n if show_Macquart:\n plt.plot(zeval,DMEG_mean,color='gray',linewidth=2,\n label='Macquart relation (mean)')\n plt.plot(zeval,DMEG_median,color='gray',\n linewidth=2, ls='--',\n label='Macquart relation (median)')\n l=plt.legend(loc='lower right',fontsize=12)\n #l=plt.legend(bbox_to_anchor=(0.2, 0.8),fontsize=8)\n #for text in l.get_texts():\n #\ttext.set_color(\"white\")\n \n # limit to a reasonable range if logscale\n if log and vmnx[0] is None:\n themax=zDMgrid.max()\n themin=int(themax-4)\n themax=int(themax)\n plt.clim(themin,themax)\n \n ##### add FRB host galaxies at some DM/redshift #####\n if FRBZ is not None:\n iDMs=FRBDM/ddm\n iZ=FRBZ/dz\n # Restrict to plot range\n gd = (FRBDM < DMmax) & (FRBZ < zmax)\n plt.plot(iZ[gd],iDMs[gd],'ko',linestyle=\"\",markersize=2.)\n\n cbar=plt.colorbar(im,fraction=0.046, shrink=1.2,aspect=15,pad=0.05)\n cbar.set_label(label)\n\n fig_utils.set_fontsize(ax, fsize)\n \n plt.tight_layout()\n \n if show:\n plt.show()\n else:\n plt.savefig(outfile, dpi=300)\n print(f\"Wrote: {outfile}\")\n plt.close()", "def set_floating_point_behaviour_options(opts,\n inv=True,\n div0=True,\n oflo=True,\n esr=True,\n nanoo=True):\n opts.floating_point_behaviour.flags_set = True\n opts.floating_point_behaviour.inv = inv\n opts.floating_point_behaviour.div0 = div0\n opts.floating_point_behaviour.oflo = oflo\n opts.floating_point_behaviour.esr = esr\n opts.floating_point_behaviour.nanoo = nanoo\n\n return opts", "def plot_single_hfo(hfo, envelope = False, xlim =[-1,1], cutoff = None, v = True,\n axes = None, figure_size = (15,10),dpi=600,saveplot = None):\n if axes == None:\n # Creating the figure \n fig = plt.figure(figsize=figure_size,dpi=dpi)\n ax1 = fig.add_subplot(311)\n ax2 = fig.add_subplot(312)\n ax3 = fig.add_subplot(313)\n else:\n ax1 = axes[0]\n ax2 = axes[1]\n ax3 = axes[2]\n\n # number of points\n npoints = hfo.waveform.shape[0]\n time_v = np.linspace(-1,1,npoints,endpoint=True)\n # creating the axes\n \n ax1.plot(time_v,hfo.waveform[:,0],'b')\n ax1.plot(time_v[hfo.start_idx:hfo.end_idx],hfo.waveform[hfo.start_idx:hfo.end_idx,0],'k')\n \n adjust_spines(ax1, ['left'])\n ax1.set_xlim(xlim)\n \n \n \n filt = hfo.waveform[:,1]\n ax2.plot(time_v,filt) \n ax2.plot(time_v[hfo.start_idx:hfo.end_idx],filt[hfo.start_idx:hfo.end_idx],'k')\n if envelope:\n env = hfo.waveform[:,2]\n ax4 = ax2.twinx()\n ax4.plot(time_v,env,'g')\n \n\n \n adjust_spines(ax2, ['left', 'bottom'])\n ax2.set_xlim(xlim)\n \n \n hfo.spectrum.plot(cutoff = cutoff, v = v, ax = ax3)\n ax3.set_title('peak freq = ' + str(hfo.spectrum.peak_freq))\n adjust_spines(ax3, ['left', 'bottom'])\n \n if saveplot != None:\n if type(saveplot) == str: \n plt.savefig(saveplot, bbox_inches='tight')\n else:\n raise Exception('saveplot should be a string')\n plt.draw()", "def plot_fenics_mesh(mesh, new_fig=True):\n if(new_fig):\n plt.figure()\n\n plot(mesh)\n #plt.title(\"FEniCS mesh\")\n plt.show(block=False)\n\n pass", "def IsFloatable(self):\r\n\r\n return self.HasFlag(self.optionFloatable)", "def terminatePlane3D():\n dislin.grffin()", "def loadGroupActFlowFC_NoHidden(inputtype,fcdir):\n\n if inputtype in ['VERTICAL','RED','HIGH','CONSTANT']:\n h5f = h5py.File(fcdir + inputtype + 'ToOutputLayer_FC_Group.h5','r')\n fcmapping = h5f['sourceToTargetMapping'][:].copy()\n eigenvectors_input = h5f['eigenvectors'][:].copy()\n h5f.close()\n fc_input2output = fcmapping\n\n if inputtype=='12' or inputtype=='fpn' or inputtype=='nounimodal':\n h5f = h5py.File(fcdir + '12RuleToOutputLayer_FC_Group.h5','r')\n fcmapping = h5f['sourceToTargetMapping'][:].copy()\n eigenvectors_input = h5f['eigenvectors'][:].copy()\n h5f.close()\n fc_input2output = fcmapping\n\n return fc_input2output, eigenvectors_input", "def plotAll(fx,tfarray,tlst,flst,fignum=1,starttime=0,timeinc='hrs',\r\n dt=1.0,title=None,vmm=None,cmap=None,aspect=None,interpolation=None,\r\n cbori=None,cbshrink=None,cbaspect=None,cbpad=None,normalize='n',\r\n scale='log'):\r\n \r\n #time increment\r\n if timeinc=='hrs':\r\n tinc=3600/dt\r\n elif timeinc=='min':\r\n tinc=60/dt\r\n elif timeinc=='sec':\r\n tinc=1/dt\r\n else:\r\n raise ValueError(timeinc+'is not defined')\r\n #colormap\r\n if cmap==None:\r\n cmap='jet'\r\n else:\r\n cmap=cmap\r\n #aspect ratio\r\n if aspect==None:\r\n aspect='auto'\r\n else:\r\n aspect=aspect\r\n #interpolation\r\n if interpolation==None:\r\n interpolation='gaussian'\r\n else:\r\n interpolation=interpolation\r\n #colorbar orientation\r\n if cbori==None:\r\n cbori='vertical'\r\n else:\r\n cbori=cbori\r\n #colorbar shinkage\r\n if cbshrink==None:\r\n cbshrink=.99\r\n else:\r\n cbshrink=cbshrink\r\n #colorbar aspect\r\n if cbaspect==None:\r\n cbaspect=20\r\n else:\r\n cbaspect=cbaspect\r\n #colorbar pad\r\n if cbpad==None:\r\n cbpad=.1\r\n else:\r\n cbpad=cbpad\r\n \r\n #scale\r\n if scale=='log':\r\n zerofind=np.where(abs(tfarray)==0)\r\n tfarray[zerofind]=1.0\r\n if normalize=='y':\r\n plottfarray=20*np.log10(abs(tfarray/np.max(abs(tfarray))))\r\n else:\r\n plottfarray=20*np.log10(abs(tfarray))\r\n elif scale=='linear':\r\n if normalize=='y':\r\n plottfarray=abs(plottfarray/np.max(abs(plottfarray)))**2\r\n else:\r\n plottfarray=abs(tfarray)**2\r\n \r\n t=np.arange(len(fx))*dt+starttime*dt\r\n FX=np.fft.fft(padzeros(fx))\r\n FXfreq=np.fft.fftfreq(len(FX),dt)\r\n \r\n #set some plot parameters\r\n plt.rcParams['font.size']=10\r\n plt.rcParams['figure.subplot.left']=.13\r\n plt.rcParams['figure.subplot.right']=.98\r\n plt.rcParams['figure.subplot.bottom']=.07\r\n plt.rcParams['figure.subplot.top']=.96\r\n plt.rcParams['figure.subplot.wspace']=.25\r\n plt.rcParams['figure.subplot.hspace']=.20\r\n #plt.rcParams['font.family']='helvetica'\r\n \r\n fig=plt.figure(fignum)\r\n \r\n #plot FFT of fx\r\n fax=fig.add_axes([.05,.25,.1,.7])\r\n plt.plot(abs(FX[0:len(FX)/2]/max(abs(FX)))**2,FXfreq[0:len(FX)/2],'-k')\r\n plt.xlim(0,1)\r\n plt.ylim(0,FXfreq[len(FX)/2-1])\r\n fax.xaxis.set_major_locator(MultipleLocator(.5))\r\n \r\n #plot TFD\r\n pax=fig.add_axes([.25,.25,.75,.7])\r\n if vmm!=None:\r\n vmin=vmm[0]\r\n vmax=vmm[1]\r\n plt.imshow(plottfarray,extent=(tlst[0]/tinc,tlst[-1]/tinc,\r\n flst[0],flst[-1]),aspect=aspect,vmin=vmin,vmax=vmax,cmap=cmap,\r\n interpolation=interpolation)\r\n else:\r\n plt.imshow(plottfarray,extent=(tlst[0]/tinc,tlst[-1]/tinc,\r\n flst[0],flst[-1]),aspect=aspect,cmap=cmap,\r\n interpolation=interpolation)\r\n plt.xlabel('Time('+timeinc+')',fontsize=12,fontweight='bold')\r\n plt.ylabel('Frequency (Hz)',fontsize=12,fontweight='bold')\r\n if title!=None:\r\n plt.title(title,fontsize=14,fontweight='bold')\r\n plt.colorbar(orientation=cbori,shrink=cbshrink,pad=cbpad,aspect=cbaspect)\r\n \r\n #plot timeseries\r\n tax=fig.add_axes([.25,.05,.60,.1])\r\n plt.plot(t,fx,'-k')\r\n plt.axis('tight')\r\n plt.show()", "def setup( self ):\n glClearColor(*self.background)\n glClearDepth(1.0)\n glDepthFunc(GL_LEQUAL)\n glEnable(GL_LIGHTING)\n glEnable(GL_LIGHT0)\n '''\n ambientLight = [0.2, 0.2, 0.2, 1.0]\n diffuseLight = [0.8, 0.8, 0.8, 1.0]\n specularLight = [0.5, 0.5, 0.5, 1.0]\n lightPos = [0.0, 0.0, -30.0, 1.0]\n glLightfv(GL_LIGHT0, GL_AMBIENT, ambientLight)\n glLightfv(GL_LIGHT0, GL_DIFFUSE, diffuseLight)\n glLightfv(GL_LIGHT0, GL_SPECULAR, specularLight)\n glLightfv(GL_LIGHT0, GL_POSITION, lightPos)\n glEnable(GL_LIGHTING)\n glEnable(GL_LIGHT0)\n \n mat = [1.0, 0.0, 0.1, 1.0]\n glMaterialfv(GL_FRONT, GL_AMBIENT, mat)\n mat[0] = 1.0; mat[1] = 0.0; mat[2] = 0.0\n glMaterialfv(GL_FRONT, GL_DIFFUSE, mat)\n mat[0] = 1.0; mat[1] = 1.0; mat[2] = 1.0\n glMaterialfv(GL_FRONT, GL_SPECULAR, mat)\n glMaterialf(GL_FRONT, GL_SHININESS, 0.6*128.0)\n glEnable(GL_FOG)\n fogColor = [1.0, 0.0, 1.0, 1.0]\n \n global fogMode\n fogMode = GL_EXP2\n glFogi (GL_FOG_MODE, fogMode)\n glFogfv (GL_FOG_COLOR, fogColor)\n glFogf (GL_FOG_DENSITY, 0.0001)\n glHint (GL_FOG_HINT, GL_NICEST)\n glFogf (GL_FOG_START, 10.0)\n glFogf (GL_FOG_END, -1000)\n glClearColor(0.0, 0.0, 0.1, 1.0)\n '''\n glEnable(GL_DEPTH_TEST) # Enables Depth Testing\n glShadeModel(GL_SMOOTH) # Enables smooth color shading\n glMatrixMode(GL_PROJECTION)\n glLoadIdentity() \n # Set up perspective view\n gluPerspective(50.0, float(self.size[0])/float(self.size[1]), 0.1, 5000.0)\n # Set up an orthographic view\n #glOrtho(-float(width)/2, float(width)/2, -float(height)/2, float(height)/2, -1.0, 1.0)\n glMatrixMode(GL_MODELVIEW)\n glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT)\n display.flip() # For interactiveness sake\n return", "def prepare_fock_state(self, n, mode):\n self.circuit.prepare_mode_fock(n, self._remap_modes(mode))", "def flip_faceup(self):\r\n self.faceup = True", "def buildFaces(self):\n for fi in range(len(self.poly1.faces)):\n f = self.poly1.faces[fi]\n for vi in range(len(f.vertices)):\n v = f.vertices[vi]\n if self.getIntersector(v) is not None and not v.isMarkedFor(f):\n self.faceTrace(1, fi, vi)\n v = f.vertices[0]\n if v.isInterior() and not v.isMarkedFor(f):\n interiorFaceTrace(f)\n for fi in range(len(self.poly2.faces)):\n f = self.poly2.faces[fi]\n for vi in range(len(f.vertices)):\n v = f.vertices[vi]\n if self.getIntersector(v) is not None and not v.isMarkedFor(f) :\n self.faceTrace(2, fi, vi)\n v = f.vertices[0]\n if v.isInterior() and not v.isMarkedFor(f):\n interiorFaceTrace(f)", "def plot_multigroup_flux(mesh, state, edges = False) :\n if mesh.dimension() == 1 :\n # get the mesh points\n x = mesh_axes(mesh)\n # plot the map\n plt.plot(x, f)\n \n elif mesh.dimension() == 2 :\n\n # Get the mesh axes and then make a grid of them for plotting.\n x, y = mesh_axes(mesh)\n X, Y = np.meshgrid(x, y)\n edgec = 'none'\n if edges :\n edgec = 'k'\n plt.pcolor(X, Y, f, cmap=colormap, edgecolors=edgec)\n \n else :\n print \"not ready for 3d\"\n return\n # show the plot\n plt.show()", "def showDetectorMap(display, pfsConfig, detMap, width=100, zoom=0, xcen=None, fiberIds=None, showLegend=True,\n lines=None, alpha=1.0, getCtypeFromReferenceLine=getCtypeFromReferenceLineDefault):\n\n plt.sca(display._impl._figure.axes[0])\n height = detMap.getBBox().getHeight()\n y = np.arange(0, height)\n\n SuNSS = TargetType.SUNSS_IMAGING in pfsConfig.targetType\n\n showAll = False\n if xcen is None:\n if fiberIds is None:\n fiberIds = detMap.fiberId\n showAll = True\n else:\n try:\n fiberIds[0]\n except TypeError:\n fiberIds = [fiberIds]\n\n if len(fiberIds) == 1:\n fid = fiberIds[0]\n try:\n xcen = detMap.getXCenter(fid, height/2)\n except IndexError:\n warnings.warn(\"Index %d is not found in DetectorMap\" % (fid)) # doesn't permit lazy eval\n xcen = detMap.bbox.getWidth()//2\n else:\n pass # xcen is already set\n\n nFiberShown = 0\n for fid in detMap.fiberId:\n ls = '-'\n if fid in pfsConfig.fiberId:\n ind = pfsConfig.selectFiber([fid])\n imagingFiber = pfsConfig.targetType[ind] == TargetType.SUNSS_IMAGING\n if pfsConfig.fiberStatus[ind] == FiberStatus.BROKENFIBER:\n ls = ':'\n color = 'cyan' if SuNSS and imagingFiber else 'magenta'\n else:\n color = 'green' if SuNSS and imagingFiber else 'red'\n else:\n if SuNSS:\n continue\n\n if fiberIds is not None and len(fiberIds) > 1 and fid not in fiberIds:\n continue\n\n try:\n ind = pfsConfig.selectFiber([fid])[0]\n except IndexError: # e.g. the pfsConfig contains a subset of the entire PFI\n continue\n\n imagingFiber = pfsConfig.targetType[ind] == TargetType.SUNSS_IMAGING\n if pfsConfig.fiberStatus[ind] == FiberStatus.BROKENFIBER:\n ls = ':'\n color = 'cyan' if SuNSS and imagingFiber else 'magenta'\n else:\n color = 'green' if SuNSS and imagingFiber else 'red'\n\n fiberX = detMap.getXCenter(fid, height//2)\n if showAll or len(fiberIds) > 1 or np.abs(fiberX - xcen) < width/2:\n fiberX = detMap.getXCenter(fid)\n plt.plot(fiberX[::20], y[::20], ls=ls, alpha=alpha, label=f\"{fid}\",\n color=color if showAll else None)\n nFiberShown += 1\n #\n # Plot the position of a set of lines\n #\n if lines:\n if fiberIds is None or len(fiberIds) == 0:\n fiberIds = detMap.fiberId\n stride = len(fiberIds)//25 + 1\n else:\n stride = 1\n\n # find the first and last valid fibres\n firstGood, lastGood = None, None\n ll = lines[0]\n for i, fid in enumerate(fiberIds):\n xc, yc = detMap.findPoint(fid, ll.wavelength)\n\n if np.isnan(xc + yc):\n continue\n\n if firstGood is None:\n firstGood = i\n lastGood = i\n\n for ll in lines:\n ctype = getCtypeFromReferenceLine(ll)\n if ctype == \"IGNORE\":\n continue\n\n xy = np.zeros((2, len(fiberIds))) + np.NaN\n\n for i, fid in enumerate(fiberIds):\n if i%stride != 0 and i not in (firstGood, lastGood):\n continue\n\n xc, yc = detMap.findPoint(fid, ll.wavelength)\n\n if len(fiberIds) == 1:\n display.dot('o', xc, yc, ctype=ctype)\n else:\n xy[0, i] = xc\n xy[1, i] = yc\n\n if len(fiberIds) > 1:\n good = np.isfinite(xy[0])\n if sum(good) > 0:\n plt.plot(xy[0][good], xy[1][good], color=ctype, alpha=alpha)\n\n if not showAll:\n if nFiberShown > 0 and showLegend:\n plt.legend()\n if zoom > 0:\n display.zoom(zoom, xcen, np.mean(y))", "def setDrawingMode(self):\n pass", "def convert_Ti_to_FLX(sff_fp, output_fp, use_sfftools=False):\r\n if use_sfftools:\r\n _fail_on_gzipped_sff(sff_fp)\r\n check_sfffile()\r\n _check_call(\r\n ['sfffile', '-flx', '-o', output_fp, sff_fp],\r\n stdout=open(devnull, 'w'))\r\n else:\r\n header, reads = adjust_sff_cycles(\r\n parse_binary_sff(qiime_open(sff_fp, 'rb'),\r\n True), 100)\r\n write_binary_sff(open(output_fp, 'w'), header, reads)", "def _removeFX(self):\r\n\t\tnodesToClean = [CONST.FOAM_FLUID_SHAPENODE, CONST.WAKE_FLUID_SHAPENODE, 'fluids_hrc']\r\n\t\tfor eachNode in nodesToClean:\r\n\t\t\ttry:\r\n\t\t\t\tcmds.delete(each)\r\n\t\t\texcept:\r\n\t\t\t\tpass\r\n\r\n\t\tfor eachCache in cmds.ls(type = 'cacheFile'):\r\n\t\t\tcmds.delete(eachCache)", "def multi_focus_TIE(fs, using_gpu=False, printing=False, *erps, **kwerps):\n mftie = MFTIE(fs, using_gpu, *erps, **kwerps)\n phase = mftie()\n if printing:\n mftie.print_k2_thres()\n return phase", "def artFluidAttrCtx(*args, accopacity: bool=False, activeListChangedProc: Union[AnyStr,\n bool]=\"\", afterStrokeCmd: Union[AnyStr, bool]=\"\", alphaclamp: Union[AnyStr,\n bool]=\"none\", alphaclamplower: Union[float, bool]=0.0, alphaclampupper:\n Union[float, bool]=1.0, attrSelected: Union[AnyStr, bool]=\"\", autoSave:\n Union[AnyStr, bool]=\"\", beforeStrokeCmd: Union[AnyStr, bool]=\"\",\n brushalignment: bool=True, brushfeedback: bool=True, clamp: Union[AnyStr,\n bool]=\"none\", clamplower: Union[float, bool]=0.0, clampupper: Union[float,\n bool]=1.0, clear: bool=True, colorAlphaValue: Union[float, bool]=0.0,\n colorRGBAValue: Union[List[float, float, float, float], bool]=None,\n colorRGBValue: Union[List[float, float, float], bool]=None, colorRamp:\n Union[AnyStr, bool]=\"\", colorfeedback: bool=False, colorfeedbackOverride:\n bool=False, colorrangelower: Union[float, bool]=0.0, colorrangeupper:\n Union[float, bool]=1.0, currentPaintableFluid: Union[AnyStr, bool]=\"\",\n dataTypeIndex: Union[int, bool]=0, delaySelectionChanged: bool=True,\n disablelighting: bool=False, displayAsRender: bool=True, displayVelocity:\n bool=True, doAutoSave: bool=True, dragSlider: AnyStr=\"\", duringStrokeCmd:\n Union[AnyStr, bool]=\"\", dynclonemode: bool=True, exists: bool=True,\n expandfilename: bool=True, exportaspectratio: Union[float, bool]=0.0,\n exportfilemode: Union[AnyStr, bool]=\"luminance/rgb\", exportfilesave:\n AnyStr=\"\", exportfilesizex: Union[int, bool]=0, exportfilesizey: Union[int,\n bool]=0, exportfiletype: Union[AnyStr, bool]=\"\", filterNodes: bool=True,\n history: bool=True, image1: Union[AnyStr, bool]=\"\", image2: Union[AnyStr,\n bool]=\"\", image3: Union[AnyStr, bool]=\"\", importfileload: AnyStr=\"\",\n importfilemode: Union[AnyStr, bool]=\"alpha\", importreassign: bool=False,\n interactiveUpdate: bool=True, lastRecorderCmd: Union[AnyStr, bool]=\"\",\n lastStampName: Union[AnyStr, bool]=\"\", lowerradius: Union[float, bool]=0.0,\n makeStroke: Union[int, List[int], bool]=0, mappressure: Union[AnyStr,\n bool]=\"none\", maxvalue: Union[float, bool]=1.0, minvalue: Union[float,\n bool]=0.0, name: AnyStr=\"\", objattrArray: Union[AnyStr, bool]=\"\", opacity:\n Union[float, bool]=1.0, outline: bool=True, outwhilepaint: bool=False,\n paintNodeArray: Union[AnyStr, bool]=\"\", paintattrselected: AnyStr=\"\",\n paintmode: Union[AnyStr, bool]=\"screen\", paintoperationtype: Union[AnyStr,\n bool]=\"Paint\", pickColor: bool=True, pickValue: bool=True, playbackCursor:\n Union[List[float, float], List[List[float, float]], bool]=None,\n playbackPressure: Union[float, List[float], bool]=0.0, preserveclonesource:\n bool=True, profileShapeFile: Union[AnyStr, bool]=\"\", projective: bool=False,\n property: Union[AnyStr, bool]=\"\", radius: Union[float, bool]=1.0,\n rampMaxColor: Union[List[float, float, float], bool]=None, rampMinColor:\n Union[List[float, float, float], bool]=None, record: bool=True, reflection:\n bool=False, reflectionaboutorigin: bool=True, reflectionaxis: Union[AnyStr,\n bool]=\"x\", rgbValue: Union[List[float, float, float], bool]=None,\n screenRadius: Union[float, bool]=0.0, selectclonesource: bool=True,\n selectedattroper: Union[AnyStr, bool]=\"absolute\", showactive: bool=True,\n stampDepth: Union[float, bool]=0.0, stampProfile: Union[AnyStr, bool]=\"\",\n stampSpacing: Union[float, bool]=1.0, strokesmooth: Union[AnyStr, bool]=\"\",\n surfaceConformedBrushVertices: bool=True, tablet: bool=True,\n tangentOutline: bool=True, toolOffProc: Union[AnyStr, bool]=\"\", toolOnProc:\n Union[AnyStr, bool]=\"\", useColorRamp: bool=True, useMaxMinColor: bool=True,\n useStrokeDirection: bool=True, usepressure: bool=False, value: Union[float,\n bool]=0.0, velocity: Union[List[float, float, float], bool]=None,\n whichTool: Union[AnyStr, bool]=\"\", worldRadius: Union[float, bool]=0.0,\n q=True, query=True, e=True, edit=True, **kwargs)->Union[None, Any]:\n pass", "def set_ground_channel_fourier_modes(self, nxmax=None, nymax=None, auto=True, mode='analytic'):\n\n if mode == \"symbolic\":\n if nxmax is not None and nymax is not None:\n basis = contiguous_channel_basis(nxmax, nymax, self.scale_params.n)\n else:\n basis = None\n self.set_ground_modes(basis, auto)\n else:\n self._set_ground_analytic_fourier_modes(nxmax, nymax, auto)", "def plot5(self):\n\n cond = ((self.ds.freq<32.6) & (self.ds.freq>17))\n freq = self.ds.freq[cond]\n power = self.ds.power[cond]\n\n # the modes for KIC 9205705\n m = pd.read_csv('/home/mxs191/Desktop/MathewSchofield/TRG/GetData/Modes/modes_9205705.csv')\n m1 = [17.65, 20.6, 23.7, 26.9, 30.1] # l=1\n\n plt.rc('font', size=18)\n fig, ax = plt.subplots()\n plt.plot(freq, power, zorder=1, alpha=0.4)\n\n # NOTE: annotate mode angular degrees\n plt.scatter(m['f0'].as_matrix(), np.full(len(m), 150000), c='k', zorder=2, s=80)\n plt.scatter(m['f2'].as_matrix(), np.full(len(m), 130000), c='mediumseagreen', zorder=2, s=80, marker='^')\n plt.scatter(m1, np.full(len(m1), 140000), c='grey', zorder=2, s=80, marker='v')\n\n # NOTE: plot envelope\n numax = info['numax'].as_matrix() # mu Hz\n env_width = 0.66 * numax**0.88\n plt.plot(freq, 40004*np.exp( -( (freq-24.7)**2 / (2*7.**2) ) ), c='k', linestyle='--')\n\n # NOTE: annotate envelope\n style = dict(size=16, color='k')\n ax.text(24.1, 49167, r\"$\\nu_{\\rm max}$\", color='k', size=18)\n ax.text(24.1, 20994, r\"$\\Gamma_{\\rm env}$\", color='k', size=18)\n ax.text(23, 162944, r\"$\\Delta \\nu$\", **style)\n plt.annotate(s='', xy=(25.3, 158610), xytext=(21.91, 158610),\n arrowprops=dict(arrowstyle='<->')) # dnu\n plt.annotate(s='', xy=((24.7-env_width/2.), 15861), xytext=((24.7+env_width/2.), 15861),\n arrowprops=dict(arrowstyle='<->')) # env width\n\n ax.set_xlabel(r'Frequency ($\\rm \\mu Hz$)')\n ax.set_ylabel(r'PSD ($\\rm ppm^{2} \\, \\mu Hz^{-1}$)')\n plt.xlim(17, 32.6)\n plt.ylim(17, 195181)\n plt.tight_layout()\n plt.show()\n fig.savefig(os.getcwd() + '/DetTest1_plots/Plot5_ps_' + str(self.ds.epic) + '.pdf')", "def IC_FC_visualization(self):\n legend = ['1st CWT','2nd CWT','IC','FC']\n title = 'Optimized ICs and FCs detection'\n IC_values = [self.IC,normalize(self.cwt1)[self.IC]]\n FC_values = [self.FC,normalize(self.cwt2)[self.FC]]\n visualize_signal(legend, title, normalize(self.cwt1), normalize(self.cwt2), IC = IC_values, FC = FC_values)", "def draw_foe_mines(self):\n self.foe_top.draw(self.foe_top_rect.topleft)\n self.foe_middle.draw(self.foe_middle_rect.topleft)\n self.foe_midbot.draw(self.foe_midbot_rect.topleft)\n self.foe_bottom.draw(self.foe_bottom_rect.topleft)", "def event_m20_11_x115(z7=_):\n \"\"\"State 0,1: Flying animation playback\"\"\"\n ChangeObjState(z7, 70)\n assert CompareObjStateId(z7, 20, 0)\n \"\"\"State 2: End state\"\"\"\n return 0", "def check_fpu_mode(request):\n old_mode = get_fpu_mode()\n yield\n new_mode = get_fpu_mode()\n\n if old_mode != new_mode:\n warnings.warn(\"FPU mode changed from {0:#x} to {1:#x} during \"\n \"the test\".format(old_mode, new_mode),\n category=FPUModeChangeWarning, stacklevel=0)", "def main(config):\n file_paths_info = [('GLOFRIS','WATCH','ARG_inunriver_historical_000000000WATCH_1980_rp01000.tif'),\n \t\t\t\t('GLOFRIS','RCP45','ARG_inunriver_rcp4p5_0000GFDL-ESM2M_2030_rp01000.tif'),\n \t\t\t\t('GLOFRIS','RCP85','ARG_inunriver_rcp8p5_0000GFDL-ESM2M_2030_rp01000.tif'),\n \t\t\t\t('FATHOM','AR_fluvial_undefended_merged','AR-FU-1000.tif'),\n \t\t\t\t('FATHOM','AR_pluvial_undefended_merged','AR-PU-1000.tif')\n \t\t\t\t]\n figure_names = ['GLOFRIS-WATCH-fluvial','GLOFRIS-RCP45-fluvial','GLOFRIS-RCP85-fluvial','FATHOM-fluvial','FATHOM-pluvial']\n figure_titles = ['current fluvial flooding','RCP4.5 fluvial flooding','RCP8.5 fluvial flooding','current fluvial flooding','current pluvial flooding']\n for f_i in range(len(file_paths_info)):\n\t hazard_file = os.path.join(config['paths']['data'],'flood_data', file_paths_info[f_i][0],file_paths_info[f_i][1],file_paths_info[f_i][2])\n\t output_file = os.path.join(config['paths']['figures'], 'flood-map-{}.png'.format(figure_names[f_i]))\n\t ax = get_axes()\n\t plot_basemap(ax, config['paths']['data'])\n\t scale_bar(ax, location=(0.8, 0.05))\n\t plot_basemap_labels(ax, config['paths']['data'], include_regions=True,include_zorder=3)\n\n\t proj_lat_lon = ccrs.PlateCarree()\n\n\n\t # Create color map\n\t colors = plt.get_cmap('Blues')\n\n\t # Read in raster data\n\t data, lat_lon_extent = get_data(hazard_file)\n\t data[(data <= 0) | (data > 5)] = np.nan\n\t max_val = np.nanmax(data)\n\t norm=mpl.colors.Normalize(vmin=0, vmax=max_val)\n\n\t # Plot population data\n\t im = ax.imshow(data, extent=lat_lon_extent,transform=proj_lat_lon, cmap=colors,norm =norm, zorder=2)\n\n\t # Add colorbar\n\t cbar = plt.colorbar(im, ax=ax,fraction=0.1, shrink=0.87,pad=0.01, drawedges=False, orientation='horizontal',\n\t norm=mpl.colors.Normalize(vmin=0, vmax=max_val), ticks=list(np.linspace(0,max_val,3)))\n\t cbar.set_clim(vmin=0,vmax=max_val)\n\n\n\t cbar.outline.set_color(\"none\")\n\t cbar.ax.yaxis.set_tick_params(color='black')\n\t cbar.ax.set_xlabel('Flood depths (m)',fontsize=12,color='black')\n\n\t plt.title('1 in 1000 year {}'.format(figure_titles[f_i]), fontsize = 14)\n\t save_fig(output_file)\n\t plt.close()", "def _add_force(Fg: np.ndarray, dof_map: Dict[Tuple[int, int], int], model: BDF,\n load, offset: int, ndof_per_grid: int, cid: int=0, show_warning: bool=True):\n #cid = load.cid\n nid = load.node\n node_ref = load.node_ref\n ndofi = ndof_per_grid if node_ref.type == 'GRID' else 1\n assert ndofi == 6, f'GRID must have 6 DOF for structural analysis\\n{node_ref}'\n\n if node_ref.cd == cid:\n fglobal = load.mag * load.xyz\n elif node_ref.cd != cid:\n fbasic = load.to_global()\n if show_warning:\n model.log.warning(f'differing cid & cd is not supported; cid={cid} cd={node_ref.cd}')\n show_warning = False\n cd_ref = node_ref.cd_ref\n Tbg = cd_ref.beta()\n fglobal = _force_to_local(cd_ref, fbasic)\n\n if 0: # pragma: no cover\n if cd_ref.type[-1] in ['C', 'S']:\n ex = Tbg[0, :]\n ey = Tbg[1, :]\n #ez = Tbg[2, :]\n xyz_local = node_ref.get_position_wrt(model, node_ref.cd)\n if cd_ref.type[-1] == 'C':\n theta = radians(xyz_local[1])\n ct = cos(theta)\n st = sin(theta)\n T = np.array([\n [ct, -st, 0.],\n [st, ct, 0.],\n [0., 0., 1.],\n ])\n Tbg = Tbg @ T\n else:\n from pyNastran.bdf.cards.coordinate_systems import CORD2S\n rho, thetad, phid = xyz_local\n coord = CORD2S.add_ijk(-1, origin=cd_ref.origin, i=ex, j=ey, k=None, rid=0, comment='')\n beta = coord.beta()\n Tbg = Tbg @ beta\n coord.transform_vector_to_local([rho, thetad, phid])\n #theta = radians(xyz_local[1])\n #phi = radians(xyz_local[2])\n #ct = cos(theta)\n #st = sin(theta)\n\n #cp = cos(phi)\n #sp = sin(phi)\n\n str(xyz_local)\n else:\n # rectangular\n pass\n Tgb = Tbg.T\n fglobal = Tgb @ fbasic\n else:\n raise NotImplementedError(f'node_ref.cd={node_ref.cd} cid={cid} load:\\n{str(load)}')\n\n for dof in range(3):\n irow = dof_map[(nid, dof+offset)]\n Fg[irow] += fglobal[dof]\n return show_warning", "def fan_mode(self) -> str:\n fan_mode = self._node.aux_properties.get(CMD_CLIMATE_FAN_SETTING)\n if not fan_mode:\n return FAN_OFF\n return UOM_TO_STATES[UOM_FAN_MODES].get(fan_mode.value, FAN_OFF)" ]
[ "0.5923838", "0.5707589", "0.55777276", "0.54883665", "0.540146", "0.5344511", "0.533769", "0.5307722", "0.5219301", "0.52111065", "0.52089953", "0.52063096", "0.52039117", "0.5201414", "0.5189145", "0.5175825", "0.5149582", "0.51441306", "0.51224685", "0.50826716", "0.5071986", "0.5053937", "0.5051721", "0.5032304", "0.5023499", "0.49948257", "0.49895376", "0.49811614", "0.49521124", "0.4946736", "0.4938188", "0.49363694", "0.49339715", "0.49184874", "0.491781", "0.49167332", "0.49117228", "0.4905199", "0.49002463", "0.48966008", "0.4895511", "0.48948538", "0.48945892", "0.4890698", "0.48875433", "0.48817825", "0.48676562", "0.48636362", "0.48622575", "0.48568866", "0.48557562", "0.48518446", "0.48510903", "0.4844225", "0.4834994", "0.48309195", "0.48162675", "0.4813715", "0.48117584", "0.48063385", "0.48028514", "0.47999352", "0.47982162", "0.47868183", "0.47832566", "0.47822708", "0.47783366", "0.47723454", "0.47660625", "0.4757509", "0.47560143", "0.47534367", "0.47505647", "0.47499144", "0.47403547", "0.47306728", "0.47276214", "0.47207034", "0.4716821", "0.4714821", "0.47123277", "0.47104922", "0.47101778", "0.47096017", "0.47093007", "0.4707182", "0.47042236", "0.4703938", "0.47035268", "0.47028476", "0.4695518", "0.46896037", "0.46859416", "0.46849614", "0.46822444", "0.46738988", "0.4672068", "0.46719962", "0.4669006", "0.4668775", "0.4667354" ]
0.0
-1
FF on, guider flat, FF off, open FFS guider decenter off, 3xguider axes off, guider on
def test_goto_field_apogee_no_slew_decenter_off(self): sopTester.updateModel('mcp', TestHelper.mcpState['all_off']) sopTester.updateModel('guider', TestHelper.guiderState['guiderOnDecenter']) cmdState = self.actorState.gotoField cmdState.reinitialize(self.cmd) cmdState.doSlew = False self._goto_feld_apogee(9, 37, 0, 0, cmdState)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def f_fixed(self):\n self.fx_free = self.fy_free = self.fz_free = False\n return self", "def zguider():\n gzero.gxoff = camera.status.guider[0] + gzero.gxoff\n gzero.gyoff = camera.status.guider[1] + gzero.gyoff\n guider(0,0)\n f = open('/data/guidezero','w')\n cPickle.dump(gzero,f)\n f.close()", "def _show_feature_flags(graph: nx.DiGraph, mode='sorted'):\n #plt.figure(figsize=(15, 15))\n if mode == 'sorted':\n pos = nx.multipartite_layout(graph)\n nx.draw(graph, pos, with_labels=True, arrows=True, node_color=\"#BA9DFB\")\n else:\n pos = graphviz_layout(graph)\n nx.draw_networkx(graph, pos, arrows=True, node_color=\"#BA9DFB\")\n plt.show()\n plt.clf()", "def displayFiducial(self):\n #obsolete?\n profbox()\n modelNodes = slicer.util.getNodes('vtkMRMLModelNode*')\n for modelNode in modelNodes.values():\n displayNode = modelNode.GetDisplayNode()\n if modelNode.GetAttribute(\"segmented\") == \"1\" and modelNode.GetAttribute(\"nth\")!=None:\n if 1:\n i = int(modelNode.GetAttribute(\"nth\"))\n if self.fiducialnode[i] == 0: \n polyData = modelNode.GetPolyData()\n nb = int(polyData.GetNumberOfPoints()-1)\n coord = [0,0,0]\n if nb>10:\n self.fiducialnode[i] = slicer.vtkMRMLAnnotationFiducialNode()\n polyData.GetPoint(nb,coord) \n self.fiducialnode[i].SetName(self.option[i])\n self.fiducialnode[i].SetFiducialCoordinates(coord) \n self.fiducialnode[i].Initialize(slicer.mrmlScene)\n self.fiducialnode[i].SetLocked(1)\n self.fiducialnode[i].SetSelectable(0)\n fidDN = self.fiducialnode[i].GetDisplayNode()\n fidDN.SetColor(modelNode.GetDisplayNode().GetColor())\n fidDN.SetGlyphScale(0)\n fidTN = self.fiducialnode[i].GetAnnotationTextDisplayNode()\n fidTN.SetTextScale(3)\n fidTN.SetColor(modelNode.GetDisplayNode().GetColor())\n \n self.fiducialnode[i].SetDisplayVisibility(modelNode.GetDisplayNode().GetVisibility())\n else: \n if modelNode.GetDisplayNode().GetVisibility():\n self.fiducialnode[i].SetDisplayVisibility(abs(self.fiducialnode[i].GetDisplayVisibility()-1))\n if self.fiducialnode[i].GetDisplayVisibility()==1:\n self.displayFiducialButton.text = \"Hide Labels on Needles\"\n else:\n self.displayFiducialButton.text = \"Display Labels on Needles\"", "def gff_init():\n pass", "def main_multimodal_fusion(im_vis, im_ir, kernel, levels, window_size):\n\n im_vis = convert_image_to_floats(im_vis)\n im_ir = convert_image_to_floats(im_ir)\n\n im_vis_hsv = rgb2hsv(im_vis)\n value_channel = im_vis_hsv[:, :, 2]\n\n plt.subplot(1, 2, 1)\n plt.imshow(value_channel, cmap='gray')\n plt.subplot(1, 2, 2)\n plt.imshow(im_ir, cmap='gray')\n plt.show()\n\n # kernels to compute visibility\n kernel1 = classical_gaussian_kernel(5, 2)\n kernel2 = classical_gaussian_kernel(5, 2)\n\n # Computation of local entropy, local contrast and visibility for value channel\n local_entropy_value = normalized_local_entropy(value_channel, window_size)\n local_contrast_value = local_contrast(value_channel, window_size)\n visibility_value = visibility(value_channel, kernel1, kernel2)\n # Combination of local entropy, local contrast and visibility for value channel\n weight_value = weight_combination(local_entropy_value, local_contrast_value, visibility_value, 1, 1, 1)\n\n # Computation of local entropy, local contrast and visibility for IR image\n local_entropy_ir = normalized_local_entropy(im_ir, window_size)\n local_contrast_ir = local_contrast(im_ir, window_size)\n visibility_ir = visibility(im_ir, kernel1, kernel2)\n # Combination of local entropy, local contrast and visibility for IR image\n weight_ir = weight_combination(local_entropy_ir, local_contrast_ir, visibility_ir, 1, 1, 1)\n\n plt.subplot(2, 3, 1)\n plt.imshow(local_entropy_value, cmap='gray')\n plt.subplot(2, 3, 2)\n plt.imshow(local_contrast_value, cmap='gray')\n plt.subplot(2, 3, 3)\n plt.imshow(visibility_value, cmap='gray')\n plt.subplot(2, 3, 4)\n plt.imshow(local_entropy_ir, cmap='gray')\n plt.subplot(2, 3, 5)\n plt.imshow(local_contrast_ir, cmap='gray')\n plt.subplot(2, 3, 6)\n plt.imshow(visibility_ir, cmap='gray')\n plt.show()\n\n # Normalising weights of value channel and IR image\n weightN_value, weightN_ir = weight_normalization(weight_value, weight_ir)\n\n plt.subplot(1, 2, 1)\n plt.imshow(weightN_value, cmap='gray')\n plt.subplot(1, 2, 2)\n plt.imshow(weightN_ir, cmap='gray')\n plt.show()\n\n # Creating Gaussian pyramids of the weights maps of respectively the value channel and IR image\n gauss_pyr_value_weights = gaussian_pyramid(weightN_value, kernel, levels)\n gauss_pyr_ir_weights = gaussian_pyramid(weightN_ir, kernel, levels)\n\n # Creating Laplacian pyramids of respectively the value channel and IR image\n lap_pyr_value = laplacian_pyramid(value_channel, kernel, levels)\n lap_pyr_ir = laplacian_pyramid(im_ir, kernel, levels)\n\n # Creating the fused Laplacian of the two modalities\n lap_pyr_fusion = fused_laplacian_pyramid(gauss_pyr_value_weights, gauss_pyr_ir_weights, lap_pyr_value, lap_pyr_ir)\n\n i = 1\n for l in lap_pyr_fusion:\n plt.subplot(1, len(lap_pyr_fusion), i)\n plt.imshow(l, cmap='gray')\n i += 1\n plt.show()\n\n # Creating the Gaussian pyramid of value channel in order to collapse the fused Laplacian pyramid\n gauss_pyr_value = gaussian_pyramid(value_channel, kernel, levels)\n collapsed_image = collapse_pyramid(lap_pyr_fusion, gauss_pyr_value)\n\n # Replacing the value channel in HSV visible image by the collapsed image\n im_vis_hsv_fusion = im_vis_hsv.copy()\n im_vis_hsv_fusion[:, :, 2] = collapsed_image\n im_vis_rgb_fusion = hsv2rgb(im_vis_hsv_fusion)\n\n plt.subplot(1, 2, 1)\n plt.imshow(im_vis)\n plt.subplot(1, 2, 2)\n plt.imshow(im_vis_rgb_fusion)\n plt.show()", "def displayFiducial(self):\r\n # obsolete?\r\n profbox()\r\n modelNodes = slicer.util.getNodes('vtkMRMLModelNode*')\r\n for modelNode in modelNodes.values():\r\n displayNode = modelNode.GetDisplayNode()\r\n if modelNode.GetAttribute(\"segmented\") == \"1\" and modelNode.GetAttribute(\"nth\") != None:\r\n if 1:\r\n i = int(modelNode.GetAttribute(\"nth\"))\r\n if self.fiducialnode[i] == 0:\r\n polyData = modelNode.GetPolyData()\r\n nb = int(polyData.GetNumberOfPoints() - 1)\r\n coord = [0, 0, 0]\r\n if nb > 10:\r\n self.fiducialnode[i] = slicer.vtkMRMLAnnotationFiducialNode()\r\n polyData.GetPoint(nb, coord)\r\n self.fiducialnode[i].SetName(self.option[i])\r\n self.fiducialnode[i].SetFiducialCoordinates(coord)\r\n self.fiducialnode[i].Initialize(slicer.mrmlScene)\r\n self.fiducialnode[i].SetLocked(1)\r\n self.fiducialnode[i].SetSelectable(0)\r\n fidDN = self.fiducialnode[i].GetDisplayNode()\r\n fidDN.SetColor(modelNode.GetDisplayNode().GetColor())\r\n fidDN.SetGlyphScale(0)\r\n fidTN = self.fiducialnode[i].GetAnnotationTextDisplayNode()\r\n fidTN.SetTextScale(3)\r\n fidTN.SetColor(modelNode.GetDisplayNode().GetColor())\r\n\r\n self.fiducialnode[i].SetDisplayVisibility(modelNode.GetDisplayNode().GetVisibility())\r\n else:\r\n if modelNode.GetDisplayNode().GetVisibility():\r\n self.fiducialnode[i].SetDisplayVisibility(abs(self.fiducialnode[i].GetDisplayVisibility() - 1))\r\n if self.fiducialnode[i].GetDisplayVisibility() == 1:\r\n self.displayFiducialButton.text = \"Hide Labels on Needles\"\r\n else:\r\n self.displayFiducialButton.text = \"Display Labels on Needles\"", "def update_flags(self):\n # view mode, filled vs wirefrom\n if self.view['wireframe']:\n gl.glPolygonMode(gl.GL_FRONT_AND_BACK, gl.GL_LINE)\n else:\n gl.glPolygonMode(gl.GL_FRONT_AND_BACK, gl.GL_FILL)\n\n # set fullscreen or windowed\n self.set_fullscreen(fullscreen=self.view['fullscreen'])\n\n # backface culling on or off\n if self.view['cull']:\n gl.glEnable(gl.GL_CULL_FACE)\n else:\n gl.glDisable(gl.GL_CULL_FACE)\n\n # case where we WANT an axis and NO vertexlist\n # is stored internally\n if self.view['axis'] and self._axis is None:\n from .. import creation\n # create an axis marker sized relative to the scene\n axis = creation.axis(origin_size=self.scene.scale / 100)\n # create ordered args for a vertex list\n args = rendering.mesh_to_vertexlist(axis)\n # store the axis as a reference\n self._axis = self.batch.add_indexed(*args)\n\n # case where we DON'T want an axis but a vertexlist\n # IS stored internally\n elif not self.view['axis'] and self._axis is not None:\n # remove the axis from the rendering batch\n self._axis.delete()\n # set the reference to None\n self._axis = None", "def no_gradient_fusion():\n pass", "def update_focal_axes(self):\n #self.update_sigma()\n self.updateGL()", "def setup_fermi(self):\n eventclass=5 # 2 (Source) or 5 (UltracleanVeto)\n eventtype=0 # 0 (all), 3 (bestpsf) or 5 (top3 quartiles)\n mask_type='top300'\n force_mask_at_bin_number=10\n\n self.f1 = fp.fermi_plugin(maps_dir,fermi_data_dir=fermi_data_dir,work_dir=work_dir,CTB_en_min=0,CTB_en_max=40,nside=self.nside,eventclass=eventclass,eventtype=eventtype,newstyle=1,data_July16=True)\n\n if mask_type != 'False':\n self.f1.make_ps_mask(mask_type = mask_type,force_energy = True,energy_bin = force_mask_at_bin_number)\n self.f1.add_diffuse_newstyle(comp = 'p7', eventclass = eventclass, eventtype = eventtype)\n self.f1.add_bubbles(comp='bubs') #bubbles\n self.f1.add_iso(comp='iso') #iso\n self.f1.add_ps_model(comp='ps_model')\n\n # Exposure correct J_map_arr\n self.J_map_arr *= self.f1.CTB_exposure_maps\n\n # Add J-factor map with mean 1 in each energy bin\n self.f1.add_template_by_hand('J_map',np.array([self.J_map_arr[i]/np.mean(self.J_map_arr[i]) for i in range(40)]))", "def fullcore_detectors():\n\n cwd = os.getcwd()\n fname = get_sample_data('%s/oecd-fullcore_geom1.png' % (cwd))\n im = plt.imread(fname)\n\n # crop the image\n height, width, color = np.shape(im)\n y1 = int(height*0.15)\n y2 = int(height*0.6)\n x1 = int(width*0.45)\n x2 = int(width)\n plt.imshow(im[y1:y2,x1:x2,:])\n plt.axis('off')\n\n # Axial 1\n x = 158\n y = 291\n P = 55\n s = P/2/np.cos(np.pi/6)\n plt.plot([s+x, 2*s+x], [0+y, 0+y], 'r-', lw=1.5, label='1- Axial1')\n plt.plot([s+x, 2*s+x], [P+y, P+y], 'r-', lw=1.5)\n plt.plot([s+x, s/2+x], [0+y, P/2+y], 'r-', lw=1.5)\n plt.plot([s/2+x, s+x], [P/2+y, P+y], 'r-', lw=1.5)\n plt.plot([2*s+x, 2*s+s/2+x], [0+y, P/2+y], 'r-', lw=1.5)\n plt.plot([2*s+s/2+x, 2*s+x], [P/2+y, P+y], 'r-', lw=1.5)\n\n plt.text(x=x+37, y=y+40, s='1', fontsize=20, color='w')\n\n # Axial 2\n x = 210\n y = 321\n P = 55\n s = P/2/np.cos(np.pi/6)\n plt.plot([s+x, 2*s+x], [0+y, 0+y], 'r-', lw=1.5, label='2- Axial2')\n plt.plot([s+x, 2*s+x], [P+y, P+y], 'r-', lw=1.5)\n plt.plot([s+x, s/2+x], [0+y, P/2+y], 'r-', lw=1.5)\n plt.plot([s/2+x, s+x], [P/2+y, P+y], 'r-', lw=1.5)\n plt.plot([2*s+x, 2*s+s/2+x], [0+y, P/2+y], 'r-', lw=1.5)\n plt.plot([2*s+s/2+x, 2*s+x], [P/2+y, P+y], 'r-', lw=1.5)\n plt.text(x=x+37, y=y+40, s='2', fontsize=20, color='w')\n\n # Axial 3\n x = 262\n y = 291\n P = 55\n s = P/2/np.cos(np.pi/6)\n plt.plot([s+x, 2*s+x], [0+y, 0+y], 'r-', lw=1.5, label='3- Axial3')\n plt.plot([s+x, 2*s+x], [P+y, P+y], 'r-', lw=1.5)\n plt.plot([s+x, s/2+x], [0+y, P/2+y], 'r-', lw=1.5)\n plt.plot([s/2+x, s+x], [P/2+y, P+y], 'r-', lw=1.5)\n plt.plot([2*s+x, 2*s+s/2+x], [0+y, P/2+y], 'r-', lw=1.5)\n plt.plot([2*s+s/2+x, 2*s+x], [P/2+y, P+y], 'r-', lw=1.5)\n\n plt.text(x=x+37, y=y+40, s='3', fontsize=20, color='w')\n\n # Radial 1\n x = 52\n y = 349\n plt.plot([x, 495+x], [y, y], 'r-', lw=1.5, label='4- Radial1')\n plt.plot([x, 495+x], [y, y], 'r-', lw=1.5, label='5- Radial2')\n plt.text(x=x+380, y=y-10, s='4, 5', fontsize=20, color='black')\n\n # Radial 2\n x = 52\n y = 349\n L = 495\n plt.plot([x, L*np.cos(np.pi/6)+x], [y, -L/2+y], 'r-', lw=1.5, label='6- Radial3')\n plt.text(x=350, y=y-200, s='6', rotation=30, fontsize=20, color='black')\n plt.legend(loc='best')\n\n plt.savefig(\"oecd-fullcore-detectors\", dpi=300, bbox_inches=\"tight\")", "def isFim(self):\r\n return", "def fiducial_evolution():\n \n # fiducial model\n wangle = 180*u.deg\n pk = pickle.load(open('../data/gd1_fiducial.pkl', 'rb'))\n x = pk['x'].to(u.kpc)\n xorig = x[:2]\n \n plt.close()\n fig, ax = plt.subplots(1,1,figsize=(6,6))\n \n plt.sca(ax)\n \n Nsnap = 8\n times = np.linspace(0,0.5,Nsnap)[::-1]\n angles = np.linspace(0,322,Nsnap)[::-1]*u.deg\n\n for e, t in enumerate(times):\n c = mpl.cm.Blues(0.05+0.85*(Nsnap-e)/Nsnap)\n #a = 0.5 + 0.5*(Nsnap-e)/Nsnap\n \n pk = pickle.load(open('../data/gd1_fiducial_t{:.4f}.pkl'.format(t), 'rb'))\n x = pk['x'].to(u.kpc)\n x_, y_ = x[0], x[1]\n \n plt.plot(x_[120:-120], y_[120:-120], '.', color=c, ms=10, zorder=Nsnap-e, rasterized=False)\n \n xt = 24*np.cos(angles[e]+90*u.deg)\n yt = 24*np.sin(angles[e]+90*u.deg)\n if e<Nsnap-1:\n txt = plt.text(xt, yt, '+ {:.2f} Gyr'.format(t), va='center', ha='center', fontsize='small', color='0.2', rotation=(angles[e]).value, zorder=10)\n txt.set_bbox(dict(facecolor='w', alpha=0.7, ec='none'))\n \n plt.text(0, 24, 'Flyby', va='center', ha='center', fontsize='small', color='0.2')\n\n lim = 27\n plt.xlim(-lim,lim)\n plt.ylim(-lim,lim)\n plt.gca().set_aspect('equal')\n \n plt.xlabel('x [kpc]')\n plt.ylabel('y [kpc]')\n \n plt.tight_layout()\n plt.savefig('../plots/loop_evolution.pdf')", "def fc_visual_save(fc, lowweight, savenamefile_prefix):\r\n\r\n\r\n ### text setup for brain areas ###\r\n pos_text_lefttop1 = [-80, 50, 30]\r\n pos_text_middletop1 = [120, 50, 30]\r\n pos_text_lefttop2 = [-80, 70, 10]\r\n pos_text_leftDown1 = [-80, 550, 30]\r\n pos_text_leftDown2 = [-80, 570, 10]\r\n pos_text_leftDown3 = [-80, 580, 10]\r\n \r\n texts_org = dict()\r\n\r\n lowweight = np.round(lowweight, decimals = 2) \r\n\r\n # plot\r\n df_chninf = assign_coord2chnArea(area_coord_file, fc['chnAreas'])\r\n for ci, cond in enumerate(fc['ciCOH'].keys()):\r\n ciCOH = fc['ciCOH'][cond]\r\n ntrials, ntemp = fc['setup']['ntrials_' + cond], fc['setup']['ntemp_' + cond]\r\n\r\n\r\n texts = texts_org.copy()\r\n \r\n text_thred = 'thred = ' + str(np.round(lowweight, decimals = 2))\r\n text_ntrials = 'ntrials = ' + str(ntrials)\r\n\r\n texts[cond] = pos_text_middletop1\r\n texts[text_task] = pos_text_leftDown1\r\n texts[text_ntrials] = pos_text_leftDown2\r\n texts[text_thred] = pos_text_leftDown3\r\n \r\n\r\n saveFCGraph = os.path.join(savefolder, savenamefile_prefix + '_lw' + str(np.round(lowweight, decimals = 2)) + '_' + cond + '.png')\r\n\r\n igplot = ciCOH_visual_save(ciCOH = ciCOH, chnInf = df_chninf, lowweight = lowweight, \r\n savefile = saveFCGraph, texts = texts, threds_edge = None)\r\n\r\n del texts[cond], texts[text_ntrials]\r\n\r\n img = cv2.imread(saveFCGraph)\r\n if ci == 0:\r\n imgs = img\r\n else:\r\n imgs = np.concatenate((imgs, np.zeros((img.shape[0], 5, 3)),img), axis = 1)\r\n\r\n os.remove(saveFCGraph)\r\n\r\n # combine all conditions\r\n print(imgs.shape)\r\n saveFCGraph_comb = os.path.join(savefolder, 'comb_' + savenamefile_prefix + '_lw' + str(np.round(lowweight, decimals = 2)) + '.png')\r\n cv2.imwrite(saveFCGraph_comb, imgs)", "def set_flammable(self, f):\n self.flammable = f", "def setup(self, flags):\n self.figure = pylab.figure(1)\n self.axes = {}\n self.stream_data = {}\n self.flags = flags", "def SetStandbyFPMode(self):\n handler = self.get_command_object(\"SetStandbyFPMode\")\n handler()", "def _fp_setup2(self):\n # TODO: right now it's hard to implement this required stage", "def fig_craco_fiducial(outfile='fig_craco_fiducial.png',\n zmax=2.5,DMmax=2500,\n show_Macquart=False,\n log=True,\n label='$\\\\log_{10} \\; p(DM_{\\\\rm EG},z)$',\n Aconts=[0.01, 0.1, 0.5],\n cmap='jet', show=False, figsize=None,\n vmnx=(None,None),\n grid=None, survey=None):\n # Generate the grid\n if grid is None or survey is None:\n survey, grid = analy_H0_I.craco_mc_survey_grid()\n\n # Unpack\n full_zDMgrid, zvals, dmvals = grid.rates, grid.zvals, grid.dmvals\n FRBZ=survey.frbs['Z']\n FRBDM=survey.DMEGs\n \n ##### imshow of grid #######\n fsize = 14.\n plt.figure(figsize=figsize)\n ax1=plt.axes()\n plt.sca(ax1)\n \n plt.xlabel('z')\n plt.ylabel('${\\\\rm DM}_{\\\\rm EG}$')\n #plt.title(title+str(H0))\n \n # Cut down grid\n zvals, dmvals, zDMgrid = figures.proc_pgrid(\n full_zDMgrid, \n zvals, (0, zmax),\n dmvals, (0, DMmax))\n ddm=dmvals[1]-dmvals[0]\n dz=zvals[1]-zvals[0]\n nz, ndm = zDMgrid.shape\n\n # Contours\n alevels = figures.find_Alevels(full_zDMgrid, Aconts, log=True)\n \n # Ticks\n tvals, ticks = figures.ticks_pgrid(zvals)# , fmt='str4')\n plt.xticks(tvals, ticks)\n tvals, ticks = figures.ticks_pgrid(dmvals, fmt='int')# , fmt='str4')\n plt.yticks(tvals, ticks)\n\n # Image \n im=plt.imshow(zDMgrid.T,cmap=cmap,origin='lower', \n vmin=vmnx[0], vmax=vmnx[1],\n interpolation='None',\n aspect='auto')\n \n styles=['--','-.',':']\n ax=plt.gca()\n cs=ax.contour(zDMgrid.T,levels=alevels,origin='lower',colors=\"white\",linestyles=styles)\n\n ax=plt.gca()\n \n muDMhost=np.log(10**grid.state.host.lmean)\n sigmaDMhost=np.log(10**grid.state.host.lsigma)\n meanHost = np.exp(muDMhost + sigmaDMhost**2/2.)\n medianHost = np.exp(muDMhost) \n print(f\"Host: mean={meanHost}, median={medianHost}\")\n plt.ylim(0,ndm-1)\n plt.xlim(0,nz-1)\n zmax=zvals[-1]\n nz=zvals.size\n #DMbar, zeval = igm.average_DM(zmax, cumul=True, neval=nz+1)\n DM_cosmic = pcosmic.get_mean_DM(zvals, grid.state)\n\n \n #idea is that 1 point is 1, hence...\n zeval = zvals/dz\n DMEG_mean = (DM_cosmic+meanHost)/ddm\n DMEG_median = (DM_cosmic+medianHost)/ddm\n\n # Check median\n f_median = scipy.interpolate.interp1d(\n zvals, DM_cosmic+medianHost, \n fill_value='extrapolate')\n eval_DMEG = f_median(FRBZ)\n above = FRBDM > eval_DMEG\n print(f\"There are {np.sum(above)/len(FRBZ)} above the median\")\n\n if show_Macquart:\n plt.plot(zeval,DMEG_mean,color='gray',linewidth=2,\n label='Macquart relation (mean)')\n plt.plot(zeval,DMEG_median,color='gray',\n linewidth=2, ls='--',\n label='Macquart relation (median)')\n l=plt.legend(loc='lower right',fontsize=12)\n #l=plt.legend(bbox_to_anchor=(0.2, 0.8),fontsize=8)\n #for text in l.get_texts():\n #\ttext.set_color(\"white\")\n \n # limit to a reasonable range if logscale\n if log and vmnx[0] is None:\n themax=zDMgrid.max()\n themin=int(themax-4)\n themax=int(themax)\n plt.clim(themin,themax)\n \n ##### add FRB host galaxies at some DM/redshift #####\n if FRBZ is not None:\n iDMs=FRBDM/ddm\n iZ=FRBZ/dz\n # Restrict to plot range\n gd = (FRBDM < DMmax) & (FRBZ < zmax)\n plt.plot(iZ[gd],iDMs[gd],'ko',linestyle=\"\",markersize=2.)\n\n cbar=plt.colorbar(im,fraction=0.046, shrink=1.2,aspect=15,pad=0.05)\n cbar.set_label(label)\n\n fig_utils.set_fontsize(ax, fsize)\n \n plt.tight_layout()\n \n if show:\n plt.show()\n else:\n plt.savefig(outfile, dpi=300)\n print(f\"Wrote: {outfile}\")\n plt.close()", "def go_infFD(self):\n\n response = self.send_lens_cmd(['05', '00', '00', '00'], fast_mode=True)\n self.wait_focus_move()", "def set_fence_mode(self, on):\r\n return self._arm.set_fense_mode(on)", "def OnFloated(self, event):\n self._floating = True\n wx.PostEvent(self, wxDockPaneFloatedEvent())", "def paint_focal_axes(self):\n GL.glTranslatef(*self.focus) # translate to focus\n self.paint_axes(self.sigma)\n GL.glTranslatef(*-self.focus) # translate back", "def _fctl(self):\n if self._fctl_written:\n return\n data = struct.pack(\n \">4L2H2B\",\n self.width, self.height, 0, 0,\n self.delay_numerator, self.delay_denominator,\n 1, 0)\n self._chunk(b\"fcTL\", self._seqno() + data)\n self._fctl_written = True", "def showDetectorMap(display, pfsConfig, detMap, width=100, zoom=0, xcen=None, fiberIds=None, showLegend=True,\n lines=None, alpha=1.0, getCtypeFromReferenceLine=getCtypeFromReferenceLineDefault):\n\n plt.sca(display._impl._figure.axes[0])\n height = detMap.getBBox().getHeight()\n y = np.arange(0, height)\n\n SuNSS = TargetType.SUNSS_IMAGING in pfsConfig.targetType\n\n showAll = False\n if xcen is None:\n if fiberIds is None:\n fiberIds = detMap.fiberId\n showAll = True\n else:\n try:\n fiberIds[0]\n except TypeError:\n fiberIds = [fiberIds]\n\n if len(fiberIds) == 1:\n fid = fiberIds[0]\n try:\n xcen = detMap.getXCenter(fid, height/2)\n except IndexError:\n warnings.warn(\"Index %d is not found in DetectorMap\" % (fid)) # doesn't permit lazy eval\n xcen = detMap.bbox.getWidth()//2\n else:\n pass # xcen is already set\n\n nFiberShown = 0\n for fid in detMap.fiberId:\n ls = '-'\n if fid in pfsConfig.fiberId:\n ind = pfsConfig.selectFiber([fid])\n imagingFiber = pfsConfig.targetType[ind] == TargetType.SUNSS_IMAGING\n if pfsConfig.fiberStatus[ind] == FiberStatus.BROKENFIBER:\n ls = ':'\n color = 'cyan' if SuNSS and imagingFiber else 'magenta'\n else:\n color = 'green' if SuNSS and imagingFiber else 'red'\n else:\n if SuNSS:\n continue\n\n if fiberIds is not None and len(fiberIds) > 1 and fid not in fiberIds:\n continue\n\n try:\n ind = pfsConfig.selectFiber([fid])[0]\n except IndexError: # e.g. the pfsConfig contains a subset of the entire PFI\n continue\n\n imagingFiber = pfsConfig.targetType[ind] == TargetType.SUNSS_IMAGING\n if pfsConfig.fiberStatus[ind] == FiberStatus.BROKENFIBER:\n ls = ':'\n color = 'cyan' if SuNSS and imagingFiber else 'magenta'\n else:\n color = 'green' if SuNSS and imagingFiber else 'red'\n\n fiberX = detMap.getXCenter(fid, height//2)\n if showAll or len(fiberIds) > 1 or np.abs(fiberX - xcen) < width/2:\n fiberX = detMap.getXCenter(fid)\n plt.plot(fiberX[::20], y[::20], ls=ls, alpha=alpha, label=f\"{fid}\",\n color=color if showAll else None)\n nFiberShown += 1\n #\n # Plot the position of a set of lines\n #\n if lines:\n if fiberIds is None or len(fiberIds) == 0:\n fiberIds = detMap.fiberId\n stride = len(fiberIds)//25 + 1\n else:\n stride = 1\n\n # find the first and last valid fibres\n firstGood, lastGood = None, None\n ll = lines[0]\n for i, fid in enumerate(fiberIds):\n xc, yc = detMap.findPoint(fid, ll.wavelength)\n\n if np.isnan(xc + yc):\n continue\n\n if firstGood is None:\n firstGood = i\n lastGood = i\n\n for ll in lines:\n ctype = getCtypeFromReferenceLine(ll)\n if ctype == \"IGNORE\":\n continue\n\n xy = np.zeros((2, len(fiberIds))) + np.NaN\n\n for i, fid in enumerate(fiberIds):\n if i%stride != 0 and i not in (firstGood, lastGood):\n continue\n\n xc, yc = detMap.findPoint(fid, ll.wavelength)\n\n if len(fiberIds) == 1:\n display.dot('o', xc, yc, ctype=ctype)\n else:\n xy[0, i] = xc\n xy[1, i] = yc\n\n if len(fiberIds) > 1:\n good = np.isfinite(xy[0])\n if sum(good) > 0:\n plt.plot(xy[0][good], xy[1][good], color=ctype, alpha=alpha)\n\n if not showAll:\n if nFiberShown > 0 and showLegend:\n plt.legend()\n if zoom > 0:\n display.zoom(zoom, xcen, np.mean(y))", "def main(argv=None):\n\n if argv is None:\n argv = sys.argv\n\n parser = E.OptionParser(\n version=\"%prog version: $Id: gff2plot.py 2781 2009-09-10 11:33:14Z andreas $\", usage=globals()[\"__doc__\"])\n\n parser.add_option(\"-f\", \"--file\", dest=\"filenames\", type=\"string\",\n help=\"files[s] to take data from,stdin = -.\")\n parser.add_option(\"\", \"--symbols\", dest=\"symbols\", type=\"string\",\n help=\"symbols to use for each histogram [steps|...].\")\n parser.add_option(\"--slide-show\", dest=\"slide_show\", type=\"choice\",\n choices=(\"first\", \"all\", \"sequence\"),\n help=\"do a slide show - otherwise, write image to file.\")\n parser.add_option(\"--config\", dest=\"filename_config\", type=\"string\",\n help=\"filename of track configuration file.\")\n parser.add_option(\"--dpi\", dest=\"dpi\", type=\"int\",\n help=\"dpi for hardcopy output.\")\n parser.add_option(\"--window-size\", dest=\"window_size\", type=\"int\",\n help=\"window-size.\")\n parser.add_option(\"--output-filename-pattern\", dest=\"output_pattern_image\", type=\"string\",\n help=\"output pattern for images. Should contain a '%(contig)s' pattern .\")\n parser.add_option(\"--global-colours\", dest=\"global_colours\", action=\"store_true\",\n help=\"cycle through colours for all tracks.\")\n\n parser.set_defaults(\n filenames=None,\n symbols=\"k-,b-,r-,c-,m-,y-,g-\",\n output_pattern_image=\"%(contig)s.png\",\n slide_show=None,\n window_size=None,\n filename_config=None,\n dpi=None,\n global_colours=False,\n )\n\n (options, args) = E.Start(parser)\n options.symbols = options.symbols.split(\",\")\n\n #--------------------------------------------------------\n # collect all the data\n # list of data per source and contig\n tracks = {}\n extra_features = {}\n\n if options.filenames:\n options.filenames = options.filenames.split(\",\")\n\n if len(args) > 0:\n options.filenames = args\n\n if options.filenames:\n\n for filename in options.filenames:\n\n if filename == \"-\":\n infile = sys.stdin\n else:\n infile = IOTools.openFile(filename)\n\n data = readData(infile)\n\n if filename != \"-\":\n infile.close()\n\n track[filename] = Track(title=filename, data=data)\n\n elif options.filename_config:\n # get track information from config file\n config = ConfigParser.ConfigParser()\n config.read(os.path.expanduser(options.filename_config))\n\n # first extract special sections\n for section in config.sections():\n if section == \"vlines\":\n infile = IOTools.openFile(config.get(section, \"filename\"), \"r\")\n data = readData(infile)\n infile.close()\n extra_features[section] = Track(title=section,\n data=data,\n config=config)\n config.remove_section(section)\n elif section in (\"figure\", \"legend\"):\n extra_features[section] = Track(title=section,\n data=None,\n config=config)\n config.remove_section(section)\n n = 0\n for section in config.sections():\n\n if config.has_option(section, \"filename\"):\n infile = IOTools.openFile(config.get(section, \"filename\"), \"r\")\n data = readData(infile)\n infile.close()\n\n tracks[section] = Track(title=section,\n data=data,\n priority=n,\n config=config)\n\n elif config.has_option(section, \"tracks\"):\n subtracks = config.get(section, \"tracks\")\n subtracks = map(lambda x: x.strip(), subtracks.split(\",\"))\n\n tracks[section] = Track(title=section,\n data=None,\n config=config,\n priority=n,\n subtracks=subtracks)\n n += 1\n\n # compile set of all contigs\n contigs = set()\n for track in tracks.values():\n if track.mData:\n contigs = contigs.union(track.mData.keys())\n\n # re-arrange tracks and subtracks\n tracks = layoutTracks(tracks)\n\n nplots = 0\n figures = []\n legend = None\n for contig in contigs:\n figure, l = plotContig(contig, tracks, options,\n plot_legend=legend is None,\n extra_features=extra_features)\n figures.append(figure)\n if l:\n legend = l\n\n if options.slide_show:\n if options.slide_show == \"first\":\n pylab.show()\n elif options.slide_show == \"all\":\n pylab.show()\n elif options.slide_show == \"sequence\":\n pylab.show()\n else:\n\n extra_args = {}\n if options.dpi:\n extra_args['dpi'] = options.dpi\n\n for contig, figure in zip(contigs, figures):\n params = {'contig': contig}\n filename = options.output_pattern_image % params\n E.info(\"# creating image: %s\" % filename)\n figure.savefig(os.path.expanduser(filename), **extra_args)\n if legend:\n params = {'contig': \"legend\"}\n filename = options.output_pattern_image % params\n E.info(\"creating image: %s\" % filename)\n legend.savefig(os.path.expanduser(filename), **extra_args)\n\n E.info(\"ninput=%i, ncontigs=%i, nplots=%i\" %\n (len(tracks), nplots, len(contigs)))\n\n E.Stop()", "def draw_foe_mines(self):\n self.foe_top.draw(self.foe_top_rect.topleft)\n self.foe_middle.draw(self.foe_middle_rect.topleft)\n self.foe_midbot.draw(self.foe_midbot_rect.topleft)\n self.foe_bottom.draw(self.foe_bottom_rect.topleft)", "def guider(x=0,y=0):\n if x==0 and y==0 and (gzero.gxoff<>0 or gzero.gyoff<>0):\n opticalcoupler.HomeXYStage()\n opticalcoupler.MoveXYStage( x=(x+gzero.gxoff), y=(y+gzero.gyoff) )\n camera.status.guider = (x,y)", "def calc_fffb_inhibition(self) -> None:\n # Feedforward inhibition\n ffi = self.spec.ff * max(self.avg_net - self.spec.ff0, 0)\n # Feedback inhibition\n self.fbi = self.spec.fb_dt * (self.spec.fb * self.avg_act - self.fbi)\n # Global inhibition\n self.gc_i = self.spec.gi * (ffi * self.fbi)", "def SetFloatable(self, floatable):\n if self._floatable != floatable:\n self._floatable = floatable\n def closure(pane):\n pane.Floatable(floatable)\n self._PaneInfoOperation(closure)", "def enable3D(self):\r\n if(self.dataController.fileLoaded==True):\r\n self.dataController.toggleInteractiveMode()\r\n\r\n self.midsagittalView = False\r\n self.frontView = False\r\n self.topView = False\r\n self.bottomView = False\r\n self.threeDView = True", "def late_gradient_fusion():\n pass", "def main(config):\n file_paths_info = [('GLOFRIS','WATCH','ARG_inunriver_historical_000000000WATCH_1980_rp01000.tif'),\n \t\t\t\t('GLOFRIS','RCP45','ARG_inunriver_rcp4p5_0000GFDL-ESM2M_2030_rp01000.tif'),\n \t\t\t\t('GLOFRIS','RCP85','ARG_inunriver_rcp8p5_0000GFDL-ESM2M_2030_rp01000.tif'),\n \t\t\t\t('FATHOM','AR_fluvial_undefended_merged','AR-FU-1000.tif'),\n \t\t\t\t('FATHOM','AR_pluvial_undefended_merged','AR-PU-1000.tif')\n \t\t\t\t]\n figure_names = ['GLOFRIS-WATCH-fluvial','GLOFRIS-RCP45-fluvial','GLOFRIS-RCP85-fluvial','FATHOM-fluvial','FATHOM-pluvial']\n figure_titles = ['current fluvial flooding','RCP4.5 fluvial flooding','RCP8.5 fluvial flooding','current fluvial flooding','current pluvial flooding']\n for f_i in range(len(file_paths_info)):\n\t hazard_file = os.path.join(config['paths']['data'],'flood_data', file_paths_info[f_i][0],file_paths_info[f_i][1],file_paths_info[f_i][2])\n\t output_file = os.path.join(config['paths']['figures'], 'flood-map-{}.png'.format(figure_names[f_i]))\n\t ax = get_axes()\n\t plot_basemap(ax, config['paths']['data'])\n\t scale_bar(ax, location=(0.8, 0.05))\n\t plot_basemap_labels(ax, config['paths']['data'], include_regions=True,include_zorder=3)\n\n\t proj_lat_lon = ccrs.PlateCarree()\n\n\n\t # Create color map\n\t colors = plt.get_cmap('Blues')\n\n\t # Read in raster data\n\t data, lat_lon_extent = get_data(hazard_file)\n\t data[(data <= 0) | (data > 5)] = np.nan\n\t max_val = np.nanmax(data)\n\t norm=mpl.colors.Normalize(vmin=0, vmax=max_val)\n\n\t # Plot population data\n\t im = ax.imshow(data, extent=lat_lon_extent,transform=proj_lat_lon, cmap=colors,norm =norm, zorder=2)\n\n\t # Add colorbar\n\t cbar = plt.colorbar(im, ax=ax,fraction=0.1, shrink=0.87,pad=0.01, drawedges=False, orientation='horizontal',\n\t norm=mpl.colors.Normalize(vmin=0, vmax=max_val), ticks=list(np.linspace(0,max_val,3)))\n\t cbar.set_clim(vmin=0,vmax=max_val)\n\n\n\t cbar.outline.set_color(\"none\")\n\t cbar.ax.yaxis.set_tick_params(color='black')\n\t cbar.ax.set_xlabel('Flood depths (m)',fontsize=12,color='black')\n\n\t plt.title('1 in 1000 year {}'.format(figure_titles[f_i]), fontsize = 14)\n\t save_fig(output_file)\n\t plt.close()", "def terminatePlane3D():\n dislin.grffin()", "def addFluxcal():\n # Overall\n i = s.getScriptInt(odi.INDX_INT_NOBS_FLUX) - 1\n if i < 0: i = 0\n s.setScriptInt(odi.INDX_INT_NOBS_FLUX, i)\n\n # Primary\n i = s.getScriptInt(odi.INDX_INT_NOBS_FLUXPRI) - 1\n if i < 0: i = 0\n s.setScriptInt(odi.INDX_INT_NOBS_FLUXPRI, i)", "def graphics(env, fovea, objects, unit):\n plt.clf()\n\n env = environment.redraw(env, unit, objects)\n fovea_im = fovea.get_focus_image(env)\n\n plt.subplot(121)\n plt.title('Training environment')\n plt.xlim(0, unit)\n plt.ylim(0, unit)\n plt.imshow(env)\n\n # PLOT DESK EDGES\n plt.plot([0.2*unit, 0.2*unit, 0.8*unit, 0.8*unit, 0.2*unit],\n [0.2*unit, 0.8*unit, 0.8*unit, 0.2*unit, 0.2*unit], 'w-'\n )\n\n # PLOT FOVEA EDGES\n fov_indices = fovea.get_index_values()\n plt.plot([fov_indices[0][0], fov_indices[0][0], fov_indices[0][1],\n fov_indices[0][1], fov_indices[0][0]],\n [fov_indices[1][0], fov_indices[1][1], fov_indices[1][1],\n fov_indices[1][0], fov_indices[1][0]], 'w-'\n )\n\n plt.subplot(122)\n plt.title('Focus image')\n plt.imshow(fovea_im)\n\n plt.draw()\n plt.pause(0.01)", "def early_gradient_fusion():\n pass", "def sink_floats(qtile):\n for window in qtile.current_group.windows:\n if window.floating:\n window.toggle_floating()", "def update_focal_axes(self):\n self.update_sigma()\n self.updateGL()", "def on_floated(self, event):\n if not self._guard & FLOATED_GUARD:\n self._guard |= FLOATED_GUARD\n try:\n self.declaration.floating = True\n finally:\n self._guard &= ~FLOATED_GUARD", "def freespaceImageAnalysis( fids, guesses = None, fit=True, bgInput=None, bgPcInput=None, shapes=[None], zeroCorrection=0, zeroCorrectionPC=0,\n keys=None, fitModule=bump, extraPicDictionaries=None, newAnnotation=False, onlyThisPic=None, pltVSize=5, \n plotSigmas=False, plotCounts=False, manualColorRange=None, calcTemperature=False, clearOutput=True, \n dataRange=None, guessTemp=10e-6, trackFitCenter=False, picsPerRep=1, startPic=0, binningParams=None, \n win=pw.PictureWindow(), transferAnalysisOpts=None, tferBinningParams=None, tferWin= pw.PictureWindow(),\n extraTferAnalysisArgs={}, emGainSetting=300, lastConditionIsBackGround=True, showTferAnalysisPlots=True,\n show2dFitsAndResiduals=True, plotFitAmps=False, indvColorRanges=False, fitF2D=gaussian_2d.f_notheta, \n rmHighCounts=True, useBase=True, weightBackgroundByLoading=True, returnPics=False, forceNoAnnotation=False):\n fids = [fids] if type(fids) == int else fids\n keys = [None for _ in fids] if keys is None else keys\n sortedStackedPics = {}\n initThresholds = [None]\n picsForBg = []\n bgWeights = []\n isAnnotatedList = []\n for filenum, fid in enumerate(fids):\n if transferAnalysisOpts is not None:\n res = ta.stage1TransferAnalysis( fid, transferAnalysisOpts, useBase=useBase, **extraTferAnalysisArgs )\n (initAtoms, tferAtoms, initAtomsPs, tferAtomsPs, key, keyName, initPicCounts, tferPicCounts, repetitions, initThresholds,\n avgPics, tferThresholds, initAtomImages, tferAtomImages, basicInfoStr, ensembleHits, groupedPostSelectedPics, isAnnotated) = res\n isAnnotatedList.append(isAnnotated)\n # assumes that you only want to look at the first condition. \n for varPics in groupedPostSelectedPics: # don't remember why 0 works if false...\n picsForBg.append(varPics[-1 if lastConditionIsBackGround else 0])\n bgWeights.append(len(varPics[0]))\n allFSIPics = [ varpics[0][startPic::picsPerRep] for varpics in groupedPostSelectedPics]\n if showTferAnalysisPlots:\n fig, axs = plt.subplots(1,2)\n mp.makeAvgPlts( axs[0], axs[1], avgPics, transferAnalysisOpts, ['r','g','b'] ) \n allFSIPics = [win.window( np.array(pics) ) for pics in allFSIPics]\n allFSIPics = ah.softwareBinning( binningParams, allFSIPics )\n elif type(fid) == int:\n ### For looking at either PGC imgs or FSI imgs \n with exp.ExpFile(fid) as file:\n # I think this only makes sense if there is a specific bg pic in the rotation\n picsForBg.append(list(file.get_pics()))\n allFSIPics = file.get_pics()[startPic::picsPerRep]\n _, key = file.get_key()\n if len(np.array(key).shape) == 2:\n key = key[:,0]\n file.get_basic_info()\n allFSIPics = win.window( allFSIPics )\n allFSIPics = ah.softwareBinning( binningParams, allFSIPics )\n allFSIPics = np.reshape( allFSIPics, (len(key), int(allFSIPics.shape[0]/len(key)), allFSIPics.shape[1], allFSIPics.shape[2]) )\n else:\n ### Assumes given pics have the same start pic and increment (picsPerRep).\n # doesn't combine well w/ transfer analysis\n picsForBg.append(fid)\n allFSIPics = fid[startPic::picsPerRep]\n print(\"Assuming input is list of all pics, then splices to get FSI pics. Old code assumed the given were FSI pics.\")\n allFSIPics = win.window( allFSIPics )\n allFSIPics = ah.softwareBinning( binningParams, allFSIPics )\n allFSIPics = np.reshape( allFSIPics, (len(key), int(allFSIPics.shape[0]/len(key)), allFSIPics.shape[1], allFSIPics.shape[2]) )\n # ##############\n if keys[filenum] is not None:\n key = keys[filenum]\n for i, keyV in enumerate(key):\n keyV = misc.round_sig_str(keyV)\n sortedStackedPics[keyV] = np.append(sortedStackedPics[keyV], allFSIPics[i],axis=0) if (keyV in sortedStackedPics) else allFSIPics[i] \n if lastConditionIsBackGround:\n bgInput, pcBgInput = getBgImgs(picsForBg, startPic = startPic, picsPerRep = picsPerRep, rmHighCounts=rmHighCounts, bgWeights=bgWeights, \n weightBackgrounds=weightBackgroundByLoading)\n elif bgInput == 'lastPic':\n bgInput, pcBgInput = getBgImgs(picsForBg, startPic = picsPerRep-1, picsPerRep = picsPerRep, rmHighCounts=rmHighCounts, bgWeights=bgWeights,\n weightBackgrounds=weightBackgroundByLoading )\n if bgInput is not None: # was broken and not working if not given bg\n bgInput = win.window(bgInput)\n bgInput = ah.softwareBinning(binningParams, bgInput)\n if bgPcInput is not None:\n bgPcInput = win.window(bgPcInput)\n bgPcInput = ah.softwareBinning(binningParams, bgPcInput) \n \n if extraPicDictionaries is not None:\n if type(extraPicDictionaries) == dict:\n extraPicDictionaries = [extraPicDictionaries]\n for dictionary in extraPicDictionaries:\n for keyV, pics in dictionary.items():\n sortedStackedPics[keyV] = (np.append(sortedStackedPics[keyV], pics,axis=0) if keyV in sortedStackedPics else pics) \n sortedStackedKeyFl = [float(keyStr) for keyStr in sortedStackedPics.keys()]\n sortedKey, sortedStackedPics = ah.applyDataRange(dataRange, sortedStackedPics, list(sorted(sortedStackedKeyFl)))\n numVars = len(sortedStackedPics.items())\n if len(np.array(shapes).shape) == 1:\n shapes = [shapes for _ in range(numVars)] \n if guesses is None:\n guesses = [[None for _ in range(4)] for _ in range(numVars)]\n if len(np.array(bgInput).shape) == 2 or bgInput == None:\n bgInput = [bgInput for _ in range(numVars)]\n if len(np.array(bgPcInput).shape) == 2 or bgPcInput == None:\n bgPcInput = [bgPcInput for _ in range(numVars)]\n \n datalen, avgFitSigmas, images, hFitParams, hFitErrs, vFitParams, vFitErrs, fitParams2D, fitErrs2D = [{} for _ in range(9)]\n titles = ['Bare', 'Photon-Count', 'Bare-mbg', 'Photon-Count-mbg']\n assert(len(sortedKey)>0)\n for vari, keyV in enumerate(sortedKey):\n keyV=misc.round_sig_str(keyV)\n if vari==0:\n initKeyv = keyV\n varPics = sortedStackedPics[keyV]\n # 0 is init atom pics for post-selection on atom number... if we wanted to.\n expansionPics = rmHighCountPics(varPics,7000) if rmHighCounts else varPics\n datalen[keyV] = len(expansionPics)\n expPhotonCountImage = photonCounting(expansionPics, 120)[0] / len(expansionPics)\n bgPhotonCountImage = np.zeros(expansionPics[0].shape) if bgPcInput[vari] is None else bgPcInput[vari]\n expAvg = np.mean(expansionPics, 0)\n bgAvg = np.zeros(expansionPics[0].shape) if (bgInput[vari] is None or len(bgInput[vari]) == 1) else bgInput[vari]\n \n if bgPhotonCountImage is None:\n print('no bg photon', expAvg.shape)\n bgPhotonCount = np.zeros(photonCountImage.shape)\n avg_mbg = expAvg - bgAvg\n avg_mbgpc = expPhotonCountImage - bgPhotonCountImage\n images[keyV] = [expAvg, expPhotonCountImage, avg_mbg, avg_mbgpc]\n hFitParams[keyV], hFitErrs[keyV], vFitParams[keyV], vFitErrs[keyV], fitParams2D[keyV], fitErrs2D[keyV] = [[] for _ in range(6)]\n for imnum, (im, guess) in enumerate(zip(images[keyV], guesses[vari])):\n if fit:\n # fancy guess_x and guess_y values use the initial fitted value, typically short time, as a guess.\n _, pictureFitParams2d, pictureFitErrors2d, v_params, v_errs, h_params, h_errs = ah.fitPic(\n im, guessSigma_x=5, guessSigma_y=5, showFit=False, \n guess_x=None if vari==0 else fitParams2D[initKeyv][imnum][1], guess_y=None if vari==0 else fitParams2D[initKeyv][imnum][2],\n fitF=fitF2D)\n fitParams2D[keyV].append(pictureFitParams2d)\n fitErrs2D[keyV].append(pictureFitErrors2d)\n hFitParams[keyV].append(h_params)\n hFitErrs[keyV].append(h_errs)\n vFitParams[keyV].append(v_params)\n vFitErrs[keyV].append(v_errs)\n # conversion from the num of pixels on the camera to microns at the focus of the tweezers\n cf = 16e-6/64\n mins, maxes = [[], []]\n imgs_ = np.array(list(images.values()))\n for imgInc in range(4):\n if indvColorRanges:\n mins.append(None)\n maxes.append(None)\n elif manualColorRange is None:\n mins.append(min(imgs_[:,imgInc].flatten()))\n maxes.append(max(imgs_[:,imgInc].flatten()))\n else:\n mins.append(manualColorRange[0])\n maxes.append(manualColorRange[1])\n numVariations = len(images)\n if onlyThisPic is None:\n fig, axs = plt.subplots(numVariations, 4, figsize=(20, pltVSize*numVariations))\n if numVariations == 1:\n axs = np.array([axs])\n bgFig, bgAxs = plt.subplots(1, 2, figsize=(20, pltVSize))\n else:\n numRows = int(np.ceil((numVariations+3)/4))\n fig, axs = plt.subplots(numRows, 4 if numVariations>1 else 3, figsize=(20, pltVSize*numRows))\n avgPicAx = axs.flatten()[-3]\n avgPicFig = fig\n bgAxs = [axs.flatten()[-1], axs.flatten()[-2]]\n bgFig = fig\n if show2dFitsAndResiduals:\n fig2d, axs2d = plt.subplots(*((2,numVariations) if numVariations>1 else (1,2)))\n keyPlt = np.zeros(len(images))\n (totalSignal, hfitCenter, hFitCenterErrs, hSigmas, hSigmaErrs, h_amp, hAmpErrs, vfitCenter, vFitCenterErrs, vSigmas, vSigmaErrs, v_amp, \n vAmpErrs, hSigma2D, hSigma2dErr, vSigma2D, vSigma2dErr) = [np.zeros((len(images), 4)) for _ in range(17)]\n \n for vari, ((keyV,ims), hParamSet, hErr_set, vParamSet, vErr_set, paramSet2D, errSet2D) in enumerate(zip(\n images.items(), *[dic.values() for dic in [hFitParams, hFitErrs, vFitParams, vFitErrs, fitParams2D, fitErrs2D]])):\n for which in range(4):\n if onlyThisPic is None:\n (im, ax, title, min_, max_, hparams, hErrs, vparams, vErrs, param2d, err2d\n ) = [obj[which] for obj in (ims, axs[vari], titles, mins, maxes, hParamSet, hErr_set, vParamSet, vErr_set, paramSet2D, errSet2D)] \n else:\n which = onlyThisPic\n ax = axs.flatten()[vari]\n (im, title, min_, max_, hparams, hErrs, vparams, vErrs, param2d, err2d\n ) = [obj[which] for obj in (ims, titles, mins, maxes, hParamSet, hErr_set, vParamSet, vErr_set, paramSet2D, errSet2D)] \n h_amp[vari][which], hfitCenter[vari][which], hSigmas[vari][which] = hparams[0], hparams[1], hparams[2]*cf*1e6\n hAmpErrs[vari][which], hFitCenterErrs[vari][which], hSigmaErrs[vari][which] = hErrs[0], hErrs[1], hErrs[2]*cf*1e6\n v_amp[vari][which], vfitCenter[vari][which], vSigmas[vari][which] = vparams[0], vparams[1], vparams[2]*cf*1e6\n vAmpErrs[vari][which], vFitCenterErrs[vari][which], vSigmaErrs[vari][which] = vErrs[0], vErrs[1], vErrs[2]*cf*1e6\n hSigma2D[vari][which], hSigma2dErr[vari][which], vSigma2D[vari][which], vSigma2dErr[vari][which] = [\n val*cf*1e6 for val in [param2d[-3], err2d[-3], param2d[-2], err2d[-2]]]\n \n totalSignal[vari][which] = np.sum(im.flatten())\n keyPlt[vari] = keyV\n res = mp.fancyImshow(fig, ax, im, imageArgs={'cmap':dark_viridis_cmap, 'vmin':min_, 'vmax':max_}, \n hFitParams=hparams, vFitParams=vparams, fitModule=fitModule, flipVAx = True, fitParams2D=param2d)\n ax.set_title(keyV + ': ' + str(datalen[keyV]) + ';\\n' + title + ': ' + misc.errString(hSigmas[vari][which],hSigmaErrs[vari][which]) \n + r'$\\mu m$ sigma, ' + misc.round_sig_str(totalSignal[vari][which],5), fontsize=12) \n if show2dFitsAndResiduals:\n X, Y = np.meshgrid(np.arange(len(im[0])), np.arange(len(im)))\n data_fitted = fitF2D((X,Y), *param2d)\n fitProper = data_fitted.reshape(im.shape[0],im.shape[1])\n ax1 = axs2d[0] if numVariations == 1 else axs2d[0,vari]\n ax2 = axs2d[1] if numVariations == 1 else axs2d[1,vari]\n imr = ax1.imshow(fitProper, vmin=min_, vmax=max_)\n mp.addAxColorbar(fig2d, ax1, imr)\n ax1.contour(np.arange(len(im[0])), np.arange(len(im)), fitProper, 4, colors='w', alpha=0.2)\n imr = ax2.imshow(fitProper-im)\n mp.addAxColorbar(fig2d, ax2, imr)\n ax2.contour(np.arange(len(im[0])), np.arange(len(im)), fitProper, 4, colors='w', alpha=0.2)\n if onlyThisPic is not None:\n break\n \n mp.fancyImshow(avgPicFig, avgPicAx, np.mean([img[onlyThisPic] for img in images.values()],axis=0), imageArgs={'cmap':dark_viridis_cmap},flipVAx = True)\n avgPicAx.set_title('Average Over Variations')\n ### Plotting background and photon counted background\n mp.fancyImshow(bgFig, bgAxs[0], bgAvg, imageArgs={'cmap':dark_viridis_cmap},flipVAx = True) \n bgAxs[0].set_title('Background image (' + str(len(picsForBg)/picsPerRep) + ')')\n mp.fancyImshow(bgFig, bgAxs[1], bgPhotonCountImage, imageArgs={'cmap':dark_viridis_cmap},flipVAx = True) \n bgAxs[1].set_title('Photon counted background image (' + str(len(picsForBg)/picsPerRep) + ')')\n fig.subplots_adjust(left=0,right=1,bottom=0.1, hspace=0.2, **({'top': 0.7, 'wspace': 0.4} if (onlyThisPic is None) else {'top': 0.9, 'wspace': 0.3}))\n \n disp.display(fig)\n temps, tempErrs, tempFitVs, = [],[],[]\n if calcTemperature: \n for sigmas, sigmaerrs in zip([hSigmas, vSigmas, hSigma2D, vSigma2D],[hSigmaErrs, vSigmaErrs, hSigma2dErr, vSigma2dErr]):\n mbgSigmas = np.array([elt[2] for elt in sigmas])\n mbgSigmaErrs = np.array([elt[2] for elt in sigmaerrs])\n myGuess = [0.0, min((mbgSigmas)*1e-6), guessTemp]\n temp, fitV, cov = ah.calcBallisticTemperature(keyPlt*1e-3, (mbgSigmas)*1e-6, guess = myGuess, sizeErrors = mbgSigmaErrs)\n error = np.sqrt(np.diag(cov))\n temps.append(temp)\n tempErrs.append(error[2])\n tempFitVs.append(fitV)\n numAxisCol = int(plotSigmas) + int(plotCounts) + int(trackFitCenter)\n if numAxisCol != 0:\n fig2, axs = plt.subplots(1, numAxisCol, figsize = (15, 5)) \n fig2.subplots_adjust(top=0.75, wspace = 0.4)\n colors = ['b','k','c','purple']\n if plotSigmas:\n ax = (axs if numAxisCol == 1 else axs[0]) \n stdStyle = dict(marker='o',linestyle='',capsize=3)\n if onlyThisPic is not None:\n ax.errorbar(keyPlt, hSigmas[:,onlyThisPic], hSigmaErrs[:,onlyThisPic], color=colors[0], label='h '+titles[onlyThisPic], **stdStyle);\n ax.errorbar(keyPlt, hSigma2D[:,onlyThisPic], hSigma2dErr[:,onlyThisPic], color=colors[1], label='2dh '+titles[onlyThisPic], **stdStyle);\n ax.errorbar(keyPlt, vSigmas[:,onlyThisPic], vSigmaErrs[:,onlyThisPic], color=colors[2], label='v '+titles[onlyThisPic], **stdStyle);\n ax.errorbar(keyPlt, vSigma2D[:,onlyThisPic], vSigma2dErr[:,onlyThisPic], color=colors[3], label='2dv '+titles[onlyThisPic], **stdStyle);\n else:\n for whichPic in range(4):\n ax.errorbar(keyPlt, hSigmas[:,whichPic], hSigmaErrs[:,whichPic], color='b', label='h '+titles[whichPic], **stdStyle);\n ax.errorbar(keyPlt, vSigmas[:,whichPic], vSigmaErrs[:,whichPic], color='c', label='v '+titles[whichPic], **stdStyle);\n ax.set_ylim(max(0,ax.get_ylim()[0]),min([ax.get_ylim()[1],5]))\n ax.set_ylabel(r'Fit Sigma ($\\mu m$)')\n \n if calcTemperature:\n # converting time to s, hSigmas in um \n xPoints = np.linspace(min(keyPlt), max(keyPlt))*1e-3\n for num, fitV in enumerate(tempFitVs):\n #ax.plot(xPoints*1e3, LargeBeamMotExpansion.f(xPoints, *myGuess)*1e6, label = 'guess')\n ax.plot(xPoints*1e3, LargeBeamMotExpansion.f(xPoints, *fitV)*1e6, color=colors[num])\n ax.legend()\n\n if plotFitAmps: \n ax = (axs if numAxisCol == 1 else axs[0])\n ampAx = ax.twinx()\n\n if onlyThisPic is not None:\n ampAx.errorbar(keyPlt, h_amp[:,onlyThisPic], hAmpErrs[:,onlyThisPic], label='h '+titles[onlyThisPic], color = 'orange', **stdStyle);\n ampAx.errorbar(keyPlt, v_amp[:,onlyThisPic], vAmpErrs[:,onlyThisPic], label='v '+titles[onlyThisPic], color = 'r', **stdStyle);\n else:\n for whichPic in range(4):\n ampAx.errorbar(keyPlt, h_amp[:,whichPic], hAmpErrs[:,whichPic], label='h '+titles[whichPic], color = 'orange', **stdStyle);\n ampAx.errorbar(keyPlt, v_amp[:,whichPic], vAmpErrs[:,whichPic], label='v '+titles[whichPic], color = 'r', **stdStyle);\n [tick.set_color('red') for tick in ampAx.yaxis.get_ticklines()]\n [tick.set_color('red') for tick in ampAx.yaxis.get_ticklabels()]\n ampAx.set_ylabel(r'Fit h_amps', color = 'r')\n \n hTotalPhotons, vTotalPhotons = None, None\n if plotCounts:\n # numAxCol = 1: ax = axs\n # numAxCol = 2: plotSigmas + plotCounts -- ax = axs[1]\n # numAxCol = 2: plotCounts + trackFitCenter -- ax = axs[0]\n # numAxCol = 3: ax = axs[1]\n if numAxisCol == 1:\n ax = axs\n elif numAxisCol == 2:\n ax = axs[1 if plotSigmas else 0]\n else:\n ax = axs[1]\n # Create axis to plot photon counts\n ax.set_ylabel(r'Integrated signal')\n photon_axis = ax.twinx()\n # This is not currently doing any correct for e.g. the loading rate.\n countToCameraPhotonEM = 0.018577 / (emGainSetting/200) # the float is is EM200. \n countToScatteredPhotonEM = 0.018577 / 0.07 / (emGainSetting/200)\n\n if onlyThisPic is not None:\n # calculate number of photons\n hamp = h_amp[:,onlyThisPic]*len(expansionPics[0][0]) # Horizontal \"un\"normalization for number of columns begin averaged.\n vamp = v_amp[:,onlyThisPic]*len(expansionPics[0]) \n hsigpx = hSigmas[:,onlyThisPic]/(16/64) # Convert from um back to to pixels.\n vsigpx = vSigmas[:,onlyThisPic]/(16/64)\n htotalCountsPerPic = bump.area_under(hamp, hsigpx)\n vtotalCountsPerPic = bump.area_under(vamp, vsigpx)\n hTotalPhotons = countToScatteredPhotonEM*htotalCountsPerPic\n vTotalPhotons = countToScatteredPhotonEM*vtotalCountsPerPic\n ax.plot(keyPlt, totalSignal[:,onlyThisPic], marker='o', linestyle='', label=titles[onlyThisPic]);\n photon_axis.plot(keyPlt, hTotalPhotons, marker='o', linestyle='', color = 'r', label='Horizontal')\n photon_axis.plot(keyPlt, vTotalPhotons, marker='o', linestyle='', color = 'orange', label='Vertical')\n else:\n for whichPic in range(4):\n # See above comments\n amp = h_amp[:,whichPic]*len(expansionPics[0][0]) \n sig = hSigmas[:,whichPic]/(16/64) \n totalCountsPerPic = bump.area_under(amp, sig)\n hTotalPhotons = countToScatteredPhotonEM*totalCountsPerPic\n ax.plot(keyPlt, totalSignal[:,whichPic], marker='o', linestyle='', label=titles[whichPic]);\n photon_axis.plot(keyPlt, hTotalPhotons, marker='o', linestyle='', color = ['red', 'orange', 'yellow', 'pink'][whichPic]) \n ax.legend()\n photon_axis.legend()\n [tick.set_color('red') for tick in photon_axis.yaxis.get_ticklines()]\n [tick.set_color('red') for tick in photon_axis.yaxis.get_ticklabels()]\n photon_axis.set_ylabel(r'Fit-Based Avg Scattered Photon/Img', color = 'r')\n if trackFitCenter:\n #numaxcol = 1: ax = axs\n #numaxcol = 2: trackfitcenter + plothSigmas: ax = axs[1]\n #numaxcol = 2: trackfitcenter + plotCounts: ax = axs[1]\n #numaxcol = 3: ax = axs[2]\n ax = (axs if numAxisCol == 1 else axs[-1])\n if onlyThisPic is not None:\n #ax.errorbar(keyPlt, hfitCenter[:,onlyThisPic], hFitCenterErrs[:,onlyThisPic], marker='o', linestyle='', capsize=3, label=titles[onlyThisPic]);\n ax.errorbar(keyPlt, vfitCenter[:,onlyThisPic], vFitCenterErrs[:,onlyThisPic], marker='o', linestyle='', capsize=3, label=titles[onlyThisPic]);\n #def accel(t, x0, a):\n # return x0 + 0.5*a*t**2\n #accelFit, AccelCov = opt.curve_fit(accel, keyPlt*1e-3, hfitCenter[:,onlyThisPic], sigma = hFitCenterErrs[:,onlyThisPic])\n #fitx = np.linspace(keyPlt[0], keyPlt[-1])*1e-3\n #fity = accel(fitx, *accelFit)\n #ax.plot(fitx*1e3, fity)\n else:\n for whichPic in range(4):\n ax.errorbar(keyPlt, hfitCenter[:,whichPic], hFitCenterErrs[:,whichPic], marker='o', linestyle='', capsize=3, label=titles[whichPic]);\n #accelErr = np.sqrt(np.diag(AccelCov))\n fig2.legend()\n ax.set_ylabel(r'Fit Centers (pix)')\n ax.set_xlabel('time (ms)')\n \n if numAxisCol != 0:\n disp.display(fig2) \n \n if not forceNoAnnotation:\n for fid, isAnnotated in zip(fids, isAnnotatedList):\n if not isAnnotated:\n if type(fid) == int or type(fid) == type(''):\n if newAnnotation or not exp.checkAnnotation(fid, force=False, quiet=True, useBase=useBase):\n exp.annotate(fid, useBase=useBase)\n if clearOutput:\n disp.clear_output()\n if calcTemperature: \n for temp, err, label in zip(temps, tempErrs, ['Hor', 'Vert', 'Hor2D', 'Vert2D']): \n print(label + ' temperature = ' + misc.errString(temp*1e6, err*1e6) + 'uk')\n\n for fid in fids:\n if type(fid) == int:\n expTitle, _, lev = exp.getAnnotation(fid)\n expTitle = ''.join('#' for _ in range(lev)) + ' File ' + str(fid) + ': ' + expTitle\n disp.display(disp.Markdown(expTitle))\n with exp.ExpFile(fid) as file:\n file.get_basic_info()\n if trackFitCenter:\n pass\n #print('Acceleration in Mpix/s^2 = ' + misc.errString(accelFit[1], accelErr[1]))\n if transferAnalysisOpts is not None and showTferAnalysisPlots:\n colors, colors2 = misc.getColors(len(transferAnalysisOpts.initLocs()) + 2)#, cmStr=dataColor)\n pltShape = (transferAnalysisOpts.initLocsIn[-1], transferAnalysisOpts.initLocsIn[-2])\n # mp.plotThresholdHists([initThresholds[0][0],initThresholds[1][0]], colors, shape=pltShape)\n mp.plotThresholdHists([initThresholds[0][0], initThresholds[0][0]], colors, shape=[1,2])\n returnDictionary = {'images':images, 'fits':hFitParams, 'errs':hFitErrs, 'hSigmas':hSigmas, 'sigmaErrors':hSigmaErrs, 'dataKey':keyPlt, \n 'hTotalPhotons':hTotalPhotons, 'tempCalc':temps, 'tempCalcErr':tempErrs, 'initThresholds':initThresholds[0], \n '2DFit':fitParams2D, '2DErr':fitErrs2D, 'bgPics':picsForBg, 'dataLength':datalen}\n if returnPics: \n returnDictionary['pics'] = sortedStackedPics\n return returnDictionary", "def main():\n\n varList = {'beta': 6., 'convSpeed': 1.2, 'Mark': 0., 'axi': 1, 'acModes': 4, 'Nr': 801, 'Tf': 600., 'xf': 0.51}\n\n # Solve steady flame.\n # BC1: I have the attachment BC at r = 1, always\n # BC2: I need to set dF/dr = 0 at r = 0 iff Mark != 0\n [qMean, r, FMean] = steady_flame_area_FD3(varList['Mark'], varList['beta'], varList['axi'], varList['Nr'])\n r = r * varList['beta']\n\n # Calculate mean flame derivatives\n dFMeanDr = derivsnew.FD1_CT2_D(FMean, r[1] - r[0])\n d2FMeanDr2 = derivsnew.FD2_CT2_D(FMean, r[1] - r[0])\n\n #Apply BC smooth tip:\n if(varList['Mark']!=0.0):\n dFMeanDr[-1] = 0.0\n\n # Use correct number of points. Remember that the extrems need to be set depending on the BC!\n # The attach BC (first point) is always assumed to be true and removed from the vector list\n if(varList['Mark']==0):\n Nr = varList['Nr'] / 2\n dFMeanDr = dFMeanDr[1:]\n d2FMeanDr2 = d2FMeanDr2[1:]\n r = r[1:]\n # The smooth BC holds only if Mark!=0 (second derivatives appear): remove also the last point\n else:\n Nr = varList['Nr'] / 2 - 1\n dFMeanDr = dFMeanDr[1:-1]\n d2FMeanDr2 = d2FMeanDr2[1:-1]\n r = r[1:-1]\n\n # Calculate geometric values\n den = 1 + varList['beta'] * varList['beta'] * dFMeanDr * dFMeanDr\n dR = r[1] - r[0]\n # Set Nx equal to Nr for now.\n # The implementation is more complicated if they differ, and need to interpolate between values.\n Nx = Nr\n\n # Nonuniform grid spacing along x!\n # Nx = length(dx) has to hold.\n dx = np.empty(len(FMean) - 1)\n for ii in range(1, len(FMean)):\n dx[ii - 1] = FMean[ii] - FMean[ii - 1]\n\n [A, B, C, tau] = loadAcoustics(varList['xf'], varList['Tf'], varList['acModes'], varList['beta'])\n\n Matrix = buildMatrix(Nr, dR, varList['beta'], den, r, FMean, dFMeanDr, d2FMeanDr2, varList['Mark'], varList['acModes'], A,\n B, C, Nx, dx, tau, qMean, varList['convSpeed'])\n\n [d, W, V] = eigProblem.solveEigProb(Matrix)\n [dnew, Wnew, Vnew] = eigProblem.selectUnstable(d, W, V)\n\n print dnew / (2. * np.pi)", "def HasFOV(self):\n return _gmat_py.Hardware_HasFOV(self)", "def _set_draw_mode(draw_mode):\n###############################################################################\n global _draw_mode\n _draw_mode = draw_mode", "def toggle_wireframe(self):\n self.view['wireframe'] = not self.view['wireframe']\n self.update_flags()", "def plot_fppy(self,LAXIS,xbl,xbr,ybu,ybd,ilg): \n\t\t\n # load x GRID\n grd1 = self.xzn0\n\t\n # load DATA to plot\n plt1 = self.fppy\n\t\t\t\t\n # create FIGURE\n plt.figure(figsize=(7,6))\n\t\t\n # format AXIS, make sure it is exponential\n plt.gca().yaxis.get_major_formatter().set_powerlimits((0,0))\t\t\n\n # set plot boundaries \n to_plot = [plt1]\t\t\n self.set_plt_axis(LAXIS,xbl,xbr,ybu,ybd,to_plot)\n\t\t\t\t\n # plot DATA \n plt.title(r'pressure flux y')\n plt.plot(grd1,plt1,color='brown',label = r'f$_{py}$')\n\n # define and show x/y LABELS\n setxlabel = r\"r (cm)\"\n setylabel = r\"$f_{py}$ (erg cm$^{-2}$ s$^{-1}$)\"\n plt.xlabel(setxlabel)\n plt.ylabel(setylabel)\n\t\t\n # show LEGEND\n plt.legend(loc=ilg,prop={'size':18})\n\n # display PLOT\n plt.show(block=False)\n\n # save PLOT\n plt.savefig('RESULTS/'+self.data_prefix+'mean_fppy.png')", "def friewallOn():\n pass", "def flip_faceup(self):\r\n self.faceup = True", "def app(ngr=100,c2a=1.6235):\n import matplotlib.pyplot as plt\n\n ## small donuts\n # plt.gcf().clf()\n grs = main(mu=0,ngrains=ngr,tilt_1=30.,sigma=15)\n plt.gcf().savefig('small_doughnut.pdf',bbox_inches='tight')\n plt.gcf().clf()\n f = gen_file(lab='sm_doughnut',ngr=ngr)\n write_gr(f,grs)\n\n ## Big donuts\n grs = main(mu=0,ngrains=ngr,tilt_1=50.,sigma=15)\n plt.gcf().savefig('big_doughnut.pdf',bbox_inches='tight')\n plt.gcf().clf()\n f = gen_file(lab='big_doughnut',ngr=ngr)\n write_gr(f,grs)\n\n ## twin tilts (30).\n gr1=main(mu=0,ngrains=ngr/2,tilts_about_ax1=30.,sigma=45)\n plt.gcf().clf()\n gr2=main(mu=0,ngrains=ngr/2,tilts_about_ax1=-30.,sigma=45)\n plt.gcf().clf()\n grs =[]\n for i in range(len(gr1)):\n grs.append(gr1[i])\n grs.append(gr2[i])\n grs=np.array(grs)\n mypf=upf.polefigure(grains=grs,csym='hexag',cdim=[1,1,c2a])\n mypf.pf_new(poles=[[0,0,0,1],[1,0,-1,0]],cmap='jet',ix='TD',iy='RD')\n plt.gcf().savefig('t30.pdf',bbox_inches='tight')\n f = gen_file(lab='dbl_lets_30',ngr=ngr)\n write_gr(f,grs)\n\n ## twin tilts (50).\n gr1=main(mu=0,ngrains=ngr/2,tilts_about_ax1=50.,sigma=45)\n plt.gcf().clf()\n gr2=main(mu=0,ngrains=ngr/2,tilts_about_ax1=-50.,sigma=45)\n plt.gcf().clf()\n gr =[]\n for i in range(len(gr1)):\n gr.append(gr1[i])\n gr.append(gr2[i])\n gr=np.array(gr)\n mypf=upf.polefigure(grains=gr,csym='hexag',cdim=[1,1,c2a])\n mypf.pf_new(poles=[[0,0,0,1],[1,0,-1,0]],cmap='jet',ix='TD',iy='RD')\n plt.gcf().savefig('t50.pdf',bbox_inches='tight')\n plt.gcf().clf()\n f = gen_file(lab='dbl_lets_50',ngr=ngr)\n write_gr(f,gr)", "def do_fit(self):\n\n if (self._flag == 1):\n self._gf = [0.2]\n self._gf = self.par*(self._num_fu*len(self._sites)*2)\n x, F = self.read_from_file(\n self._sn, self._qn, self._path) # read data from the file\n # ,ftol=1.0e-7,xtol=1.0e-8)\n popt, pcov = curve_fit(\n self.modelfun, x, F, p0=self._gf, maxfev=5000)\n self._gf = popt\n\n elif (self._flag == 2):\n\n# par=[0.0]*(self._num_fu*5)\n# for j in range(self._num_fu):\n# par[j*5]=0.0*math.copysign(1,(pow(-1,j)))\n# self._gf[j*5]=0.1\n# par[j*5+1]=6.45\n# par[j*5+2]=0.0\n# par[j*5+3]=0.05\n# par[j*5+4]=1.0\n\n X, F = self.read_from_file(self._sn, self._qn, self._path) # read data from the file\n\n# height, xx, width=self.moments(F)\n# Tracer()()\n# par=[0.0]*(self._num_fu*5)\n# for j in range(self._num_fu):\n# par[j*5]=x[0,xx]\n# par[j*5]=X[0,xx]*math.copysign(1,(pow(-1,j)))\n# par[j*5+1]=X[1,xx]\n# par[j*5+2]=X[2,xx]\n# par[j*5+3]=0.007\n# par[j*5+4]=height*math.copysign(1,(pow(-1,j)))\n\n xi, yi, zi = np.mgrid[-6.5:6.5:160j, 4.0:8.9:160j, -7.5:7.5:160j]\n x, y, z = xi.flatten(), yi.flatten(), zi.flatten()\n XX = np.vstack((x, y, z))\n\n invdisttree = Invdisttree(X.T, F, leafsize=10, stat=1)\n AA = invdisttree(XX.T, nnear=130, eps=0, p=1)\n\n# aaa1,bbb1=self.detect_local_minima(-AA.reshape(xi.shape))\n# aaa2,bbb2=self.detect_local_maxima(-AA.reshape(xi.shape))\n if self.peaks==[]:\n print('\\n---------------------------------------------------------------------')\n print('Detecting maxima and minima of target function...',)\n\n peaks_min, min_coord, peaks_max, max_coord = self.detect_min_max(AA.reshape(xi.shape))\n print('done')\n print('Number of the min peaks: {}'.format(len(peaks_min)))\n print('Number of the max peaks: {}'.format(len(peaks_max)))\n print('---------------------------------------------------------------------\\n')\n # fig=plt.figure()\n # ax = fig.add_subplot(111, projection='3d')\n # ax.plot_surface(xi[:,:,60],yi[:,:,60],bbb2[:,:,60], cmap=cm.jet, linewidth=0.2)\n # plt.hold(True)\n # plt.show()\n\n if peaks_max==[]:\n peaks=np.insert(peaks_min, np.arange(len(peaks_max)), peaks_max)\n coords=np.insert(min_coord, np.arange(max_coord.shape[1]), max_coord, axis=1)\n else:\n peaks = np.insert(peaks_max, np.arange(len(peaks_min)), peaks_min)\n coords = np.insert(max_coord, np.arange(min_coord.shape[1]), min_coord, axis=1)\n\n self.peaks=peaks\n self.coords=coords\n\n par = [0.0]*(self._num_fu*5)\n j1 = 0\n aaaa = 1\n for j in range(self._num_fu):\n if (j > aaaa*self.coords.shape[1]-1):\n j1 = 0\n aaaa += 1\n par[j*5] = xi[self.coords[0, j1], self.coords[0, j1], self.coords[0, j1]]\n par[j*5+1] = yi[self.coords[1, j1], self.coords[1, j1], self.coords[1, j1]]\n par[j*5+2] = zi[self.coords[2, j1], self.coords[2, j1], self.coords[2, j1]]\n # par[j*5+3] = 0.1003+0.1000*math.copysign(1, (pow(-1, j)))\n par[j*5+3] = 0.0001\n# if j < 15:\n# par[j*5+3] = 0.00001\n# else:\n# par[j*5+3] = 0.0005\n par[j*5+4] = self.peaks[j1]\n# print(coords[0, j1], coords[1, j1], coords[2, j1])\n j1 += 1\n # popt, pcov = curve_fit(self.modelfun1, x[:,1:20000], F[1:20000],p0=par,maxfev=150000,xtol=1e-8,ftol=1e-8)\n popt, pcov = curve_fit(\n self.modelfun1, X, F, p0=par, maxfev=150000, xtol=1e-6,\n ftol=1e-8)\n # popt, pcov = curve_fit(self.modelfun1, XX, AA, p0=par)\n self._gf = popt\n# self.error=np.diagonal(pcov, offset=0)\n# print(pcov)\n else:\n # pass\n sys.exit(\"Wrong flag in do_fit\")", "def findFringesInScan(self, fsu='FSUB', plot=False, calibrated=True):\n fringes_pos = []\n weight = []\n\n if plot:\n plt.figure(0)\n plt.clf()\n plt.title(self.filename+' | '+fsu+' | '+self.insmode)\n plt.xlabel('OPD '+self.DLtrack+' (m)')\n\n for k in range(self.scan_nscans):\n x = self.getScan(k, isolate=False, calibrated=calibrated,\n FUOFFSET=1, fsu=fsu, resample=False)\n opd = np.linspace(x[1].min(), x[1].max(), len(x[1]))\n scan1 = np.interp(opd, x[1], x[2]-x[4])\n scan2 = np.interp(opd, x[1], x[3]-x[5])\n sigS = np.exp(-((opd-opd.mean())/10e-6)**2)*\\\n np.sin((opd-opd.mean())/2.2e-6*2*np.pi)\n sigC = np.exp(-((opd-opd.mean())/10e-6)**2)*\\\n np.cos((opd-opd.mean())/2.2e-6*2*np.pi)\n sigS = np.roll(sigS, len(sigS)/2)\n sigC = np.roll(sigC, len(sigC)/2)\n \n fft_sigS = np.fft.fft(sigS)\n fft_sigC = np.fft.fft(sigC)\n fft_scan1 = np.fft.fft(scan1)\n fft_scan2 = np.fft.fft(scan2)\n # correlate\n powerS1 = np.abs(np.fft.ifft(fft_scan1*fft_sigS))**2\n powerC1 = np.abs(np.fft.ifft(fft_scan1*fft_sigC))**2\n powerS2 = np.abs(np.fft.ifft(fft_scan2*fft_sigS))**2\n powerC2 = np.abs(np.fft.ifft(fft_scan2*fft_sigC))**2\n power1 = powerS1+powerC1\n power2 = powerS2+powerC2\n if power1.max()>(power1.mean()+8*power1.std()):\n fringes_pos.append(opd[power1.argmax()])\n weight.append(power1.max())\n if plot:\n plt.plot(opd, (power1-power1.mean())/power1.std(),\n linewidth=2)\n print x[0].min(), x[0].max()\n #plt.plot(opd, np.interp(x[0],\n # self.raw['IMAGING_DATA_'+fsu.upper()].data.field('TIME'),\n # self.raw['IMAGING_DATA_'+fsu.upper()].data.field(OPDSNR)),\n # 'k', alpha=0.5, linewidth=3)\n else:\n if plot:\n plt.plot(opd, (power1-power1.mean())/power1.std(),\n 'k', alpha='0.5')\n\n if power2.max()>(power2.mean()+8*power2.std()):\n fringes_pos.append(opd[power2.argmax()])\n weight.append(power2.max())\n if plot:\n plt.plot(opd, (power2-power2.mean())/power2.std(),\n linewidth=2)\n else:\n if plot:\n plt.plot(opd, (power2-power2.mean())/power2.std(),\n 'k', alpha='0.5')\n\n return (np.array(fringes_pos)*np.array(weight)).sum()/\\\n np.array(weight).sum()", "def visualize(z_in, azimuth=25., elevation=30.,\n thresholds=[0.95, .9, .75, .5, .25, .125], opacities=[1, .9, .7, .5, .2, .1],\n# thresholds=[0.94, .89, .75, .5, .25, .1], opacities=[.9, .8, .7, .5, .2, .1],\n# thresholds=[0.94, .89, .75], opacities=[.99, .7, .2],\n# thresholds=[0.7, .5, .2], opacities=[.95, .5, .2],\n fourier_label = {'f_x':'f_x', 'f_y':'f_y', 'f_t':'f_t'},\n filename=None, do_axis=True, do_grids=False, draw_projections=True,\n colorbar=False, f_N=2., f_tN=2., figsize=figsize, dpi=300, figpath=figpath, **kwargs):\n z = z_in.copy()\n N_X, N_Y, N_frame = z.shape\n fx, fy, ft = get_grids(N_X, N_Y, N_frame)\n\n # Normalize the amplitude.\n z /= z.max()\n\n from vispy import app, scene, use\n try:\n AffineTransform = scene.transforms.AffineTransform\n except:\n AffineTransform = scene.transforms.MatrixTransform\n\n use(app='pyglet', gl='pyopengl2')\n #from vispy.util.transforms import perspective, translate, rotate\n from vispy.color import Color\n transparent = Color(color='black', alpha=0.)\n import colorsys\n canvas = scene.SceneCanvas(size=figsize, bgcolor='white', dpi=dpi)\n view = canvas.central_widget.add_view()\n\n vol_data = np.rollaxis(np.rollaxis(z, 1), 2)\n# volume = scene.visuals.Volume(vol_data, parent=view.scene)#frame)\n center = scene.transforms.STTransform(translate=( -N_X/2, -N_Y/2, -N_frame/2))\n# volume.transform = center\n# volume.cmap = 'blues'\n\n if draw_projections:\n from vispy.color import Colormap\n cm = Colormap([(1.0, 1.0, 1.0, 1.0), 'k'])\n opts = {'parent':view.scene, 'cmap':cm, 'clim':(0., 1.)}\n\n energy_xy = np.rot90(np.max(z, axis=2)[:, ::-1], 3)#[:, ::-1]\n fourier_xy = scene.visuals.Image(np.rot90(energy_xy), **opts)\n tr_xy = AffineTransform()\n tr_xy.rotate(90, (0, 0, 1))\n tr_xy.translate((N_X/2, -N_Y/2, -N_frame/2))\n fourier_xy.transform = tr_xy\n\n energy_xt = np.rot90(np.max(z, axis=1)[:, ::-1], 3)[::-1, ::-1]\n fourier_xt = scene.visuals.Image(energy_xt, **opts)\n tr_xt = AffineTransform()\n tr_xt.rotate(90, (1, 0, 0))\n tr_xt.translate((-N_X/2, N_Y/2, -N_frame/2))\n fourier_xt.transform = tr_xt\n\n energy_yt = np.max(z, axis=0)[:, ::-1]\n fourier_yt = scene.visuals.Image(energy_yt, **opts)\n tr_yt = AffineTransform()\n tr_yt.rotate(90, (0, 1, 0))\n tr_yt.translate((-N_X/2, -N_Y/2, N_frame/2))\n fourier_yt.transform = tr_yt\n\n # Generate iso-surfaces at different energy levels\n surfaces = []\n for i_, (threshold, opacity) in enumerate(zip(thresholds, opacities)):\n surfaces.append(scene.visuals.Isosurface(z, level=threshold,\n# color=Color(np.array(colorsys.hsv_to_rgb(1.*i_/len(thresholds), 1., 1.)), alpha=opacity),\n color=Color(np.array(colorsys.hsv_to_rgb(.66, 1., 1.)), alpha=opacity),\n shading='smooth', parent=view.scene)\n )\n surfaces[-1].transform = center\n\n # Draw a sphere at the origin\n axis = scene.visuals.XYZAxis(parent=view.scene)\n for p in ([1, 1, 1, -1, 1, 1], [1, 1, -1, -1, 1, -1], [1, -1, 1, -1, -1, 1],[1, -1, -1, -1, -1, -1],\n [1, 1, 1, 1, -1, 1], [-1, 1, 1, -1, -1, 1], [1, 1, -1, 1, -1, -1], [-1, 1, -1, -1, -1, -1],\n [1, 1, 1, 1, 1, -1], [-1, 1, 1, -1, 1, -1], [1, -1, 1, 1, -1, -1], [-1, -1, 1, -1, -1, -1]):\n line = scene.visuals.Line(pos=np.array([[p[0]*N_X/2, p[1]*N_Y/2, p[2]*N_frame/2], [p[3]*N_X/2, p[4]*N_Y/2, p[5]*N_frame/2]]), color='black', parent=view.scene)\n\n axisX = scene.visuals.Line(pos=np.array([[0, -N_Y/2, 0], [0, N_Y/2, 0]]), color='red', parent=view.scene)\n axisY = scene.visuals.Line(pos=np.array([[-N_X/2, 0, 0], [N_X/2, 0, 0]]), color='green', parent=view.scene)\n axisZ = scene.visuals.Line(pos=np.array([[0, 0, -N_frame/2], [0, 0, N_frame/2]]), color='blue', parent=view.scene)\n\n if do_axis:\n t = {}\n for text in ['f_x', 'f_y', 'f_t']:\n t[text] = scene.visuals.Text(fourier_label[text], parent=canvas.scene, face='Helvetica', color='black')\n t[text].font_size = 8\n t['f_x'].pos = canvas.size[0] // 3, canvas.size[1] - canvas.size[1] // 8\n t['f_y'].pos = canvas.size[0] - canvas.size[0] // 8, canvas.size[1] - canvas.size[1] // 6\n t['f_t'].pos = canvas.size[0] // 8, canvas.size[1] // 2\n\n cam = scene.TurntableCamera(elevation=elevation, azimuth=azimuth, up='z')\n cam.fov = 48\n cam.scale_factor = N_X * 1.8\n if do_axis: margin = 1.35\n else: margin = 1\n cam.set_range((-N_X/2*margin, N_X/2/margin), (-N_Y/2*margin, N_Y/2/margin), (-N_frame/2*margin, N_frame/2/margin))\n view.camera = cam\n\n render_im = canvas.render()\n app.quit()\n if not(filename is None):\n import vispy.io as io\n io.write_png(filename, render_im)\n else:\n return render_im", "def test_DFT_rect(centering='FFTRECT', outdir=None, outname='DFT1R_', npix=None, sampling=10., nlamd=None):\n\n print \"Testing DFT, style = \"+centering\n\n\n npupil = 156\n pctr = int(npupil/2)\n s = (npupil,npupil)\n\n\n # make things rectangular:\n if nlamd is None and npix is None:\n nlamd = (10,20)\n npix = [val*sampling for val in nlamd] #(100, 200) \n elif npix is None:\n npix = [val*sampling for val in nlamd] #(100, 200) \n elif nlamd is None:\n nlamd = [val/sampling for val in npix]\n u = nlamd\n print u\n #(u, float(u)/npix[0]*npix[1])\n #npix = (npix, 2*npix)\n\n\n # FFT style\n mft1 = matrixDFT.MatrixFourierTransform(centering=centering)\n\n #ctr = (float(npupil)/2.0 + mft1.offset(), float(npupil)/2.0 + mft1.offset())\n ctr = (float(npupil)/2.0 , float(npupil)/2.0)\n #print ctr\n pupil = makedisk(s=s, c=ctr, r=float(npupil)/2.0001, t=np.float64, grey=0)\n\n pupil[0:60, 0:60] = 0\n pupil[0:10] = 0\n\n pupil /= np.sqrt(pupil.sum())\n\n plt.clf()\n plt.subplots_adjust(left=0.02, right=0.98)\n plt.subplot(141)\n\n pmx = pupil.max()\n plt.imshow(pupil, vmin=0, vmax=pmx*1.5)\n\n\n if outdir is not None:\n fits.PrimaryHDU(pupil.astype(np.float32)).writeto(outdir+os.sep+outname+\"pupil.fits\", clobber=True)\n\n a = mft1.perform(pupil, u, npix)\n\n pre = (abs(pupil)**2).sum() \n post = (abs(a)**2).sum() \n ratio = post / pre\n calcr = 1./(1.0*u[0]*u[1] *npix[0]*npix[1]) # multiply post by this to make them equal\n print \"Pre-FFT total: \"+str( pre)\n print \"Post-FFT total: \"+str( post )\n print \"Ratio: \"+str( ratio)\n #print \"Calc ratio : \"+str( calcr)\n #print \"uncorrected: \"+str( ratio/calcr)\n\n\n complexinfo(a, str=\",ft1 asf\")\n asf = a.real.copy()\n if outdir is not None:\n fits.PrimaryHDU(asf.astype(np.float32)).writeto(outdir+os.sep+outname+\"asf.fits\", clobber=True)\n cpsf = a * a.conjugate()\n psf = cpsf.real.copy()\n if outdir is not None:\n fits.PrimaryHDU(psf.astype(np.float32)).writeto(outdir+os.sep+outname+\"psf.fits\", clobber=True)\n\n ax=plt.subplot(142)\n plt.imshow(asf, norm=matplotlib.colors.LogNorm(1e-8, 1.0))\n ax.set_title='ASF'\n\n ax=plt.subplot(143)\n plt.imshow(psf, norm=matplotlib.colors.LogNorm(1e-8, 1.0))\n ax.set_title='PSF'\n\n plt.subplot(144)\n\n pupil2 = mft1.inverse(a, u, npupil)\n pupil2r = (pupil2 * pupil2.conjugate()).real\n plt.imshow( pupil2r, vmin=0,vmax=pmx*1.5*0.01) # FIXME flux normalization is not right?? I think this has to do with squaring the pupil here, that's all.\n plt.gca().set_title='back to pupil'\n plt.draw()\n print \"Post-inverse FFT total: \"+str( abs(pupil2r).sum() )\n print \"Post-inverse pupil max: \"+str(pupil2r.max())\n\n plt.suptitle('Matrix DFT with rectangular arrays using centering={0}'.format(centering))\n\n plt.savefig('test_DFT_rectangular_results_{0}.pdf'.format(centering))", "def show_filters(self):\n w_mat = np.transpose(self.sess.run(self.W_fc1))\n\n plt.figure(figsize=(10,10), facecolor='w', edgecolor='w')\n plot_positions = [(0,0),(0,1),(1,0),(1,1)]\n for ch in range(self.n_input_channels):\n grid,_ = ia.image_grid_RGB( w_mat,\n n_channels=self.n_input_channels,\n image_size=(self.y_res,self.x_res), n_x=6, n_y=6,\n channel_order=(ch,ch,ch), amplitude_scaling=(1,1,1),\n line_color=1, auto_scale=True, return_borders=False )\n colormax = np.abs(grid).max()\n with sns.axes_style(\"white\"):\n ax = plt.subplot2grid( (2,2), plot_positions[ch] )\n ax.imshow( grid[:,:,0], interpolation='nearest',\n cmap=plt.get_cmap('seismic'),\n clim=(-1*colormax,colormax) )\n ax.set_title(\"Hidden units, channel {}\".format(ch))\n plt.axis('tight')\n plt.axis('off')\n plt.tight_layout()", "def analysis_dFF_map(self):\r\n\r\n \r\n\r\n print ('Starting dF/F analysis:')\r\n\r\n self.print_image_info()\r\n\r\n # smoothwin = int(self.imageData.shape[1]/8.)\r\n\r\n # get the average image and the average of the whole image over time\r\n\r\n avgimg = np.mean(self.imageData, axis=0) # get mean image for reference later: average across all time\r\n\r\n \r\n\r\n mpl.figure(99)\r\n\r\n mpl.imshow(avgimg, vmin=0, vmax=np.max(np.max(avgimg, axis=0), axis=0))\r\n\r\n # self.meanimagevalue = np.mean(np.mean(avgimg, axis=1), axis=0)\r\n\r\n # self.stdimg = np.std(self.imageData, axis= 0) # and standard deviation\r\n\r\n imgdatasm = scipy.ndimage.filters.gaussian_filter(self.imageData,[0,2,2],order=0,output=None,mode='reflect',cval=0.0,truncate=4.0)\r\n # field correction: smooth the average image, subtract it from the imagedata, then add back the mean value\r\n avgimgsm = scipy.ndimage.filters.gaussian_filter(avgimg, 2, order=0, output=None, mode='reflect', cval=0.0, truncate=4.0)\r\n\r\n # avgimgsm = scipy.ndimage.filters.gaussian_filter(avgimg, smoothwin, order=0, output=None, mode='reflect', cval=0.0, truncate=4.0)\r\n\r\n #self.imageData = (self.imageData-avgimgsm)+ self.meanimagevalue\r\n\r\n mpl.figure(98)\r\n mpl.imshow(avgimgsm,vmin=0, vmax=np.max(np.max(avgimgsm, axis=0), axis=0))\r\n mpl.figure(97)\r\n mpl.imshow(np.mean(imgdatasm,axis=0))\r\n self.n_times = self.timebase\r\n\r\n periodsize = int(self.period*self.framerate)\r\n print('periodsize: ',periodsize)\r\n\r\n # windowsize = int(self.freqperiod*self.framerate) # window size for every response\r\n\r\n # r = range(0, self.imageData.shape[0], windowsize)\r\n\r\n sig = np.reshape(imgdatasm, (self.nrepetitions, periodsize, \r\n\r\n self.imageData.shape[1], self.imageData.shape[2]), order='C')\r\n\r\n delresp=np.zeros([19,256,256])\r\n repback = np.mean(sig[:,1:4,:,:],axis=1)\r\n resp = np.mean(sig[:,5:9,:,:],axis=1)\r\n for counter in range(19):\r\n delresp[counter,:,:]=(resp[counter,:,:]-repback[counter,:,:])/repback[counter,:,:]\r\n quot=np.mean(delresp,axis=0)\r\n quot=-quot\r\n print ('shape of quot: ', np.shape(quot))\r\n # quot=(resp-repback)/repback\r\n # quot[quot>0]=0\r\n # quot=-1000*quot\r\n\r\n mpl.figure(7)\r\n mpl.imshow(quot,cmap=mpl.cm.binary)\r\n mpl.colorbar()\r\n\r\n quotsm = scipy.ndimage.filters.gaussian_filter(quot, 3, order=0, output=None, mode='reflect', cval=0.0, truncate=4.0)\r\n mpl.figure(8)\r\n mpl.imshow(quotsm,cmap=mpl.cm.binary)\r\n mpl.colorbar()\r\n \r\n # bl = np.mean(sig[:, range(0, sig.shape[1], windowsize), :, :], axis=0)\r\n\r\n # bl = scipy.ndimage.filters.gaussian_filter(bl, smoothwin, order=0, output=None, mode='reflect', cval=0.0, truncate=4.0)\r\n\r\n\r\n\r\n # print (' windowsize: ', windowsize)\r\n\r\n # print (' periodsize: ', periodsize)\r\n # mc = matplotlib.cm\r\n\r\n # only use sequential maps here\r\n\r\n # clist = [mc.Reds, mc.YlOrBr, mc.Oranges, mc.Greens, mc.GnBu, mc.Blues, mc.RdPu, mc.Purples,mc.Reds,mc.Greens,mc.Blues,mc.Reds,mc.Reds,mc.Reds,mc.Reds]\r\n # clist2 = ['red', 'orange', 'yellow', 'green', 'blue', 'indigo', 'violet', 'black','red','purple','green','blue','red','red','red','red']\r\n\r\n cs = {}\r\n\r\n # sigd = np.zeros((bl.shape[0], sig.shape[2], sig.shape[3]))\r\n# \r\n # localmax = {}\r\n\r\n # sigmax = 0.\r\n# \r\n # kernel = np.ones((5, 5))\r\n\r\n # psf = kernel / np.sum(kernel)\r\n\r\n # compute dF/F, and get maximum over all frequencies\r\n\r\n print (' sig shape: ', sig.shape)\r\n\r\n # print (' bl shape: ', bl.shape)\r\n\r\n # smax = np.zeros(bl.shape[0])\r\n\r\n # for i in range(bl.shape[0]):\r\n\r\n # sigd[i] = (np.mean(np.max(sig[:,range(i*windowsize, i*windowsize+windowsize),:,:], axis=0), axis=0) - bl[i,:,:])/bl[i,:,:]\r\n\r\n # sigd[i] = sigd[i]**2.0\r\n\r\n # smooth\r\n\r\n #sigd[i] = scipy.ndimage.filters.gaussian_filter(sigd[i], 1., order=0, output=None, mode='reflect', cval=0.0, truncate=4.0)\r\n\r\n # deconvolve\r\n\r\n # sigd[i] = restoration.richardson_lucy(sigd[i], psf, 5)\r\n\r\n# sm = sigd[i].max().max()\r\n\r\n# if sm > sigmax:\r\n\r\n# sigmax = sm\r\n\r\n# smax[i] = sm\r\n\r\n# print( ' i, sm: ', i, sm)\r\n\r\n# # now process for display\r\n\r\n# print (' sigd shape: ', sigd.shape)\r\n\r\n# wdat = np.mean(sig, axis=0)\r\n\r\n# wds = wdat.shape\r\n\r\n# print('wdat shape: ', wds)\r\n\r\n# # print (range(int(wds[1]/2.), int(3.*wds[1]/4.)), range(int(wds[2]/2.), int(3.*wds[2]/4.)))\r\n\r\n# print( 'reduced shape: ', wdat[:,range(int(wds[1]/2.),int(3.*wds[1]/4.)),:][:,:,range(int(wds[2]/2.), int(3.*wds[2]/4.))].shape)\r\n\r\n# wp = wdat[:,range(int(wds[1]/2.),int(3.*wds[1]/4.)),:][:,:,range(int(wds[2]/2.), int(3.*wds[2]/4.))]\r\n\r\n# wp = np.mean(np.mean(wdat, axis=1), axis=1)\r\n\r\n# mpl.figure(1)\r\n\r\n# mpl.plot(np.linspace(0., len(wp)*1./self.framerate, num=len(wp)), wp)\r\n\r\n\r\n\r\n# mpl.figure(2)\r\n\r\n# for i in range(sigd.shape[0]):\r\n\r\n# sigd[i][sigd[i] < self.threshold*sigmax] = 0.\r\n\r\n# # find center of mass of areas above threshold\r\n\r\n# # mass = sigd[i].copy()\r\n\r\n# # mass[sigd[i] > 0.] = 1.\r\n\r\n# # structuring_element = [[0,1,0],[1,1,1],[0,1,0]]\r\n\r\n# # segmentation, segments = scipy.ndimage.label(mass, structuring_element)\r\n\r\n# # coords = scipy.ndimage.center_of_mass(sigd[i], segmentation, range(1,segments+1))\r\n\r\n# # xcoords = np.array([x[1] for x in coords])\r\n\r\n# # ycoords = np.array([x[0] for x in coords])\r\n\r\n# # cs[i] = (xcoords, ycoords)\r\n\r\n\r\n\r\n# # Calculating local maxima\r\n\r\n# lm = skif.peak_local_max(sigd[i], min_distance=2, threshold_rel=0.25, exclude_border=False, \r\n\r\n# indices=True, num_peaks=10, footprint=None, labels=None)\r\n\r\n# localmax[i] = [(m[0], m[1], sigd[i][(m[0], m[1])]) for m in lm]\r\n\r\n# # print ('i, local max: ',i, localmax)\r\n\r\n# mpl.subplot(5,5,i+1)\r\n# print ('shape of sigd: ',[np.shape(sigd),i])\r\n\r\n# imga1 = mpl.imshow(sigd[i], cmap=clist[i], vmin=0, origin='lower')\r\n\r\n# if len(localmax[i]) > 0:\r\n\r\n# max_fr = np.max([m[2] for m in localmax[i]])\r\n\r\n# else:\r\n\r\n# continue\r\n\r\n# scattersize = 30.\r\n\r\n# for k, lm in enumerate(localmax[i]):\r\n\r\n# mpl.scatter(lm[1], lm[0], marker='o', c=clist2[i], edgecolors='k',\r\n\r\n# s=scattersize*lm[2]/max_fr, linewidths=0.125, alpha=0.5)\r\n\r\n# mpl.subplot(6,5,i+15+1)\r\n\r\n# wr = range(i*windowsize, i*windowsize+windowsize)\r\n\r\n# # print (' wr: len, min max: ', len(wr), min(wr), max(wr))\r\n\r\n# wmax = 0.\r\n\r\n# for lmax in localmax[i]: # was xcoords\r\n\r\n# wave = wdat[wr, lmax[0],lmax[1]]\r\n\r\n# wdff = (wave-wave[0])/wave[0]\r\n\r\n# if np.max(wdff) > wmax:\r\n\r\n# wmax = np.max(wdff)\r\n\r\n# mpl.plot(np.linspace(0., len(wave)*1./self.framerate, num=len(wave)),\r\n\r\n# wdff, color=clist2[i])\r\n\r\n# mpl.ylim(-0.1*wmax, wmax)\r\n\r\n# fig = mpl.figure(3)\r\n\r\n# for i in range(sigd.shape[0]):\r\n\r\n# if len(localmax[i]) == 0:\r\n\r\n# continue\r\n\r\n# max_fr = np.max([m[2] for m in localmax[i]])\r\n\r\n# for lm in localmax[i]:\r\n\r\n# mpl.scatter(lm[1], lm[0], marker='o', c=clist2[i], \r\n\r\n# s=scattersize*lm[2]/max_fr, alpha=0.5, edgecolors='k')\r\n\r\n# mpl.ylim(0, sigd.shape[2])\r\n\r\n# mpl.xlim(0, sigd.shape[1])\r\n\r\n# mpl.axis('equal')\r\n\r\n mpl.show()\r\n\r\n print (' DF/F analysis finished.\\n')", "def plot_reconstruction_diagnostics(self, figsize=(20, 10)):\n figs = []\n fig_names = []\n\n # upsampled frequency\n fx_us = tools.get_fft_frqs(2 * self.nx, 0.5 * self.dx)\n fy_us = tools.get_fft_frqs(2 * self.ny, 0.5 * self.dx)\n\n # plot different stages of inversion\n extent = tools.get_extent(self.fy, self.fx)\n extent_upsampled = tools.get_extent(fy_us, fx_us)\n\n for ii in range(self.nangles):\n fig = plt.figure(figsize=figsize)\n grid = plt.GridSpec(3, 4)\n\n for jj in range(self.nphases):\n\n # ####################\n # separated components\n # ####################\n ax = plt.subplot(grid[jj, 0])\n\n to_plot = np.abs(self.separated_components_ft[ii, jj])\n to_plot[to_plot <= 0] = np.nan\n plt.imshow(to_plot, norm=LogNorm(), extent=extent)\n\n circ = matplotlib.patches.Circle((0, 0), radius=self.fmax, color='k', fill=0, ls='--')\n ax.add_artist(circ)\n\n if jj == 0:\n plt.title('O(f)otf(f)')\n plt.scatter(self.frqs[ii, 0], self.frqs[ii, 1], edgecolor='r', facecolor='none')\n plt.scatter(-self.frqs[ii, 0], -self.frqs[ii, 1], edgecolor='r', facecolor='none')\n elif jj == 1:\n plt.title('m*O(f-fo)otf(f)')\n plt.scatter(self.frqs[ii, 0], self.frqs[ii, 1], edgecolor='r', facecolor='none')\n elif jj == 2:\n plt.title('m*O(f+fo)otf(f)')\n plt.scatter(-self.frqs[ii, 0], -self.frqs[ii, 1], edgecolor='r', facecolor='none')\n\n plt.setp(ax.get_xticklabels(), visible=False)\n plt.setp(ax.get_yticklabels(), visible=False)\n\n plt.xlim([-2 * self.fmax, 2 * self.fmax])\n plt.ylim([2 * self.fmax, -2 * self.fmax])\n\n # ####################\n # deconvolved component\n # ####################\n ax = plt.subplot(grid[jj, 1])\n\n plt.imshow(np.abs(self.components_deconvolved_ft[ii, jj]), norm=LogNorm(), extent=extent)\n\n if jj == 0:\n plt.scatter(self.frqs[ii, 0], self.frqs[ii, 1], edgecolor='r', facecolor='none')\n plt.scatter(-self.frqs[ii, 0], -self.frqs[ii, 1], edgecolor='r', facecolor='none')\n elif jj == 1:\n plt.scatter(self.frqs[ii, 0], self.frqs[ii, 1], edgecolor='r', facecolor='none')\n elif jj == 2:\n plt.scatter(-self.frqs[ii, 0], -self.frqs[ii, 1], edgecolor='r', facecolor='none')\n\n circ = matplotlib.patches.Circle((0, 0), radius=self.fmax, color='k', fill=0, ls='--')\n ax.add_artist(circ)\n\n if jj == 0:\n plt.title('deconvolved component')\n plt.setp(ax.get_xticklabels(), visible=False)\n plt.setp(ax.get_yticklabels(), visible=False)\n\n plt.xlim([-2 * self.fmax, 2 * self.fmax])\n plt.ylim([2 * self.fmax, -2 * self.fmax])\n\n # ####################\n # shifted component\n # ####################\n ax = plt.subplot(grid[jj, 2])\n\n # avoid any zeros for LogNorm()\n cs_ft_toplot = np.abs(self.components_shifted_ft[ii, jj])\n cs_ft_toplot[cs_ft_toplot <= 0] = np.nan\n plt.imshow(cs_ft_toplot, norm=LogNorm(), extent=extent_upsampled)\n plt.scatter(0, 0, edgecolor='r', facecolor='none')\n\n circ = matplotlib.patches.Circle((0, 0), radius=self.fmax, color='k', fill=0, ls='--')\n ax.add_artist(circ)\n\n if jj == 1:\n circ2 = matplotlib.patches.Circle(-self.frqs[ii], radius=self.fmax, color='k', fill=0, ls='--')\n ax.add_artist(circ2)\n elif jj == 2:\n circ2 = matplotlib.patches.Circle(self.frqs[ii], radius=self.fmax, color='k', fill=0, ls='--')\n ax.add_artist(circ2)\n\n if jj == 0:\n plt.title('shifted component')\n plt.setp(ax.get_xticklabels(), visible=False)\n plt.setp(ax.get_yticklabels(), visible=False)\n\n plt.xlim([-2 * self.fmax, 2 * self.fmax])\n plt.ylim([2 * self.fmax, -2 * self.fmax])\n\n # ####################\n # normalized weights\n # ####################\n ax = plt.subplot(grid[jj, 3])\n\n to_plot = self.weights[ii, jj] / self.weight_norm\n to_plot[to_plot <= 0] = np.nan\n im2 = plt.imshow(to_plot, norm=LogNorm(), extent=extent_upsampled)\n im2.set_clim([1e-5, 1])\n fig.colorbar(im2)\n\n circ = matplotlib.patches.Circle((0, 0), radius=self.fmax, color='k', fill=0, ls='--')\n ax.add_artist(circ)\n\n if jj == 1:\n circ2 = matplotlib.patches.Circle(-self.frqs[ii], radius=self.fmax, color='k', fill=0, ls='--')\n ax.add_artist(circ2)\n elif jj == 2:\n circ2 = matplotlib.patches.Circle(self.frqs[ii], radius=self.fmax, color='k', fill=0, ls='--')\n ax.add_artist(circ2)\n\n if jj == 0:\n plt.title('normalized weight')\n plt.setp(ax.get_xticklabels(), visible=False)\n plt.setp(ax.get_yticklabels(), visible=False)\n\n plt.xlim([-2 * self.fmax, 2 * self.fmax])\n plt.ylim([2 * self.fmax, -2 * self.fmax])\n\n plt.suptitle('period=%0.3fnm at %0.3fdeg=%0.3frad, f=(%0.3f,%0.3f) 1/um\\n'\n 'mod=%0.3f, min mcnr=%0.3f, wiener param=%0.2f\\n'\n 'phases (deg) =%0.2f, %0.2f, %0.2f, phase diffs (deg) =%0.2f, %0.2f, %0.2f' %\n (self.periods[ii] * 1e3, self.angles[ii] * 180 / np.pi, self.angles[ii],\n self.frqs[ii, 0], self.frqs[ii, 1], self.mod_depths[ii, 1], np.min(self.mcnr[ii]), self.wiener_parameter,\n self.phases[ii, 0] * 180/np.pi, self.phases[ii, 1] * 180/np.pi, self.phases[ii, 2] * 180/np.pi,\n 0, np.mod(self.phases[ii, 1] - self.phases[ii, 0], 2*np.pi) * 180/np.pi,\n np.mod(self.phases[ii, 2] - self.phases[ii, 0], 2*np.pi) * 180/np.pi))\n\n figs.append(fig)\n fig_names.append('sim_combining_angle=%d' % (ii + 1))\n\n # #######################\n # net weight\n # #######################\n figh = plt.figure(figsize=figsize)\n grid = plt.GridSpec(1, 2)\n plt.suptitle('Net weight, Wiener param = %0.2f' % self.wiener_parameter)\n\n ax = plt.subplot(grid[0, 0])\n net_weight = np.sum(self.weights, axis=(0, 1)) / self.weight_norm\n im = ax.imshow(net_weight, extent=extent_upsampled, norm=PowerNorm(gamma=0.1))\n\n figh.colorbar(im, ticks=[1, 0.9, 0.8, 0.7, 0.6, 0.5, 0.4, 0.3, 0.2, 0.1, 1e-2, 1e-3, 1e-4, 1e-5])\n\n ax.set_title(\"non-linear scale\")\n circ = matplotlib.patches.Circle((0, 0), radius=self.fmax, color='k', fill=0, ls='--')\n ax.add_artist(circ)\n\n circ2 = matplotlib.patches.Circle((0, 0), radius=2*self.fmax, color='k', fill=0, ls='--')\n ax.add_artist(circ2)\n\n circ3 = matplotlib.patches.Circle(self.frqs[0], radius=self.fmax, color='k', fill=0, ls='--')\n ax.add_artist(circ3)\n\n circ4 = matplotlib.patches.Circle(-self.frqs[0], radius=self.fmax, color='k', fill=0, ls='--')\n ax.add_artist(circ4)\n\n circ5 = matplotlib.patches.Circle(self.frqs[1], radius=self.fmax, color='k', fill=0, ls='--')\n ax.add_artist(circ5)\n\n circ6 = matplotlib.patches.Circle(-self.frqs[1], radius=self.fmax, color='k', fill=0, ls='--')\n ax.add_artist(circ6)\n\n circ7 = matplotlib.patches.Circle(self.frqs[2], radius=self.fmax, color='k', fill=0, ls='--')\n ax.add_artist(circ7)\n\n circ8 = matplotlib.patches.Circle(-self.frqs[2], radius=self.fmax, color='k', fill=0, ls='--')\n ax.add_artist(circ8)\n\n ax.set_xlim([-2 * self.fmax, 2 * self.fmax])\n ax.set_ylim([2 * self.fmax, -2 * self.fmax])\n\n ax = plt.subplot(grid[0, 1])\n ax.set_title(\"linear scale\")\n im = ax.imshow(net_weight, extent=extent_upsampled)\n\n figh.colorbar(im)\n circ = matplotlib.patches.Circle((0, 0), radius=self.fmax, color='k', fill=0, ls='--')\n ax.add_artist(circ)\n\n circ2 = matplotlib.patches.Circle((0, 0), radius=2 * self.fmax, color='k', fill=0, ls='--')\n ax.add_artist(circ2)\n\n circ3 = matplotlib.patches.Circle(self.frqs[0], radius=self.fmax, color='k', fill=0, ls='--')\n ax.add_artist(circ3)\n\n circ4 = matplotlib.patches.Circle(-self.frqs[0], radius=self.fmax, color='k', fill=0, ls='--')\n ax.add_artist(circ4)\n\n circ5 = matplotlib.patches.Circle(self.frqs[1], radius=self.fmax, color='k', fill=0, ls='--')\n ax.add_artist(circ5)\n\n circ6 = matplotlib.patches.Circle(-self.frqs[1], radius=self.fmax, color='k', fill=0, ls='--')\n ax.add_artist(circ6)\n\n circ7 = matplotlib.patches.Circle(self.frqs[2], radius=self.fmax, color='k', fill=0, ls='--')\n ax.add_artist(circ7)\n\n circ8 = matplotlib.patches.Circle(-self.frqs[2], radius=self.fmax, color='k', fill=0, ls='--')\n ax.add_artist(circ8)\n\n ax.set_xlim([-2 * self.fmax, 2 * self.fmax])\n ax.set_ylim([2 * self.fmax, -2 * self.fmax])\n\n figs.append(figh)\n fig_names.append('net_weight')\n\n return figs, fig_names", "def setupconfig():\n from Manager import Studio\n studio = Studio.Instance\n cfgeff = studio.configEffect_st\n cfgeff.bloomToggle.isOn = False\n cfgeff.vignetteToggle.isOn = False\n cfgeff.sunShaftsToggle.isOn = False\n cfgeff.fogToggle.isOn = False\n cfgeff.depthOfFieldToggle.isOn = False\n #cfgeff.ssaoToggle.isOn = True\n #cfgeff.selfShadowToggle.isOn = True\n \n # Turn off backgrounds\n studio.uiBGChanger.onOffToggle.isOn = False", "def plot5(self):\n\n cond = ((self.ds.freq<32.6) & (self.ds.freq>17))\n freq = self.ds.freq[cond]\n power = self.ds.power[cond]\n\n # the modes for KIC 9205705\n m = pd.read_csv('/home/mxs191/Desktop/MathewSchofield/TRG/GetData/Modes/modes_9205705.csv')\n m1 = [17.65, 20.6, 23.7, 26.9, 30.1] # l=1\n\n plt.rc('font', size=18)\n fig, ax = plt.subplots()\n plt.plot(freq, power, zorder=1, alpha=0.4)\n\n # NOTE: annotate mode angular degrees\n plt.scatter(m['f0'].as_matrix(), np.full(len(m), 150000), c='k', zorder=2, s=80)\n plt.scatter(m['f2'].as_matrix(), np.full(len(m), 130000), c='mediumseagreen', zorder=2, s=80, marker='^')\n plt.scatter(m1, np.full(len(m1), 140000), c='grey', zorder=2, s=80, marker='v')\n\n # NOTE: plot envelope\n numax = info['numax'].as_matrix() # mu Hz\n env_width = 0.66 * numax**0.88\n plt.plot(freq, 40004*np.exp( -( (freq-24.7)**2 / (2*7.**2) ) ), c='k', linestyle='--')\n\n # NOTE: annotate envelope\n style = dict(size=16, color='k')\n ax.text(24.1, 49167, r\"$\\nu_{\\rm max}$\", color='k', size=18)\n ax.text(24.1, 20994, r\"$\\Gamma_{\\rm env}$\", color='k', size=18)\n ax.text(23, 162944, r\"$\\Delta \\nu$\", **style)\n plt.annotate(s='', xy=(25.3, 158610), xytext=(21.91, 158610),\n arrowprops=dict(arrowstyle='<->')) # dnu\n plt.annotate(s='', xy=((24.7-env_width/2.), 15861), xytext=((24.7+env_width/2.), 15861),\n arrowprops=dict(arrowstyle='<->')) # env width\n\n ax.set_xlabel(r'Frequency ($\\rm \\mu Hz$)')\n ax.set_ylabel(r'PSD ($\\rm ppm^{2} \\, \\mu Hz^{-1}$)')\n plt.xlim(17, 32.6)\n plt.ylim(17, 195181)\n plt.tight_layout()\n plt.show()\n fig.savefig(os.getcwd() + '/DetTest1_plots/Plot5_ps_' + str(self.ds.epic) + '.pdf')", "def wulff_plot(self, show=False, n_angles=120):\n try:\n from matplotlib import pyplot as plt\n fig_xy = plt.figure()\n ax_xy = fig_xy.add_subplot(1, 1, 1)\n pos = self.cluster.get_positions()\n com = np.mean(pos, axis=0)\n pos -= com\n\n # Project atomic positions into the xy plane\n proj_xy = pos[:, :2]\n ax_xy.plot(proj_xy[:, 0], proj_xy[:, 1], 'x')\n theta = np.zeros(n_angles) + np.pi/2.0\n theta = theta.tolist()\n phi = np.linspace(0.0, 2.0*np.pi, n_angles).tolist()\n gamma = np.array([self.eval(t, p) for t, p in zip(theta, phi)])\n x = gamma * np.cos(phi)\n y = gamma * np.sin(phi)\n ax_xy.plot(x, y)\n except Exception as exc:\n print(\"Could not plot because of \"\n \"{}: {}\".format(type(exc).__name__, str(exc)))\n\n # Plot the full surface in 3D\n try:\n from itertools import product\n from mayavi import mlab\n theta = np.linspace(0.0, np.pi, n_angles)\n phi = np.linspace(0.0, 2.0*np.pi, n_angles)\n theta = theta.tolist()\n T, P = np.meshgrid(theta, phi)\n Gamma = np.zeros(T.shape)\n print(\"Evaluating gamma at all angles...\")\n for indx in product(range(n_angles), range(n_angles)):\n Gamma[indx] = self.eval(T[indx], P[indx])\n\n X = Gamma*np.cos(P)*np.sin(T)\n Y = Gamma*np.sin(P)*np.sin(T)\n Z = Gamma*np.cos(T)\n mlab.figure(bgcolor=(1, 1, 1), fgcolor=(0, 0, 0))\n mlab.mesh(X, Y, Z, scalars=Gamma/np.min(Gamma))\n mlab.colorbar()\n if show:\n mlab.show()\n except ImportError as exc:\n print(\"{}: {}\".format(type(exc).__name__, str(exc)))\n print(\"To visualize in 3D mayavi is required!\")\n\n if show:\n plt.show()\n return fig_xy", "def __init__(self, g_impath, f_impath):\n self.image_g = cv2.imread(g_impath)\n assert self.image_g is not None\n if f_impath is None:\n self.image_f = self.image_g\n else:\n self.image_f = cv2.imread(f_impath)\n assert self.image_f is not None\n self.f_path = f_impath\n self.g_path = g_impath\n self.mask = np.zeros_like(self.image_g)\n self.draw = False\n self.size = 5\n self.image_g_reset = self.image_g.copy()\n self.image_f_reset = self.image_f.copy()\n self.mask_reset = self.mask.copy()\n self.original_mask_copy = np.zeros(self.image_f.shape)\n self.window_name = \"Draw mask: s-save; r:reset; q:quit; l:larger painter; m:smaller painter\"\n self.window_name_move = \"Move mask: s-save; r:reset; q:quit;\"\n self.to_move = False\n self.move=False\n self.x0 = 0\n self.y0 = 0\n self.is_first = True\n self.xi = 0\n self.yi = 0", "def vis_mechanically_coupled_regions(img_dir,output_dir,data,dbscn_length,dbscn_min_size,display_not_save=False):\n #Read in the image that is segmented/labelled for nuclei\n img=imread(img_dir)\n\n #save plots to show clusters\n fig = plt.figure(figsize=(6, 2))\n ax0 = fig.add_subplot(131)\n ax1 = fig.add_subplot(132)\n ax3 = fig.add_subplot(133)\n #show segmented image labels\n ax0.imshow(img,aspect='auto') \n ax0.axis('off')\n #nuclear centroid color-coded by their orientation\n img1=ax1.scatter(data[\"Y\"], data[\"X\"], c=data[\"angles\"],s=1)\n ax1.set_xlim(0,img.shape[0])\n ax1.set_ylim(img.shape[1],0)\n plt.colorbar(img1)\n ax1.axis('off')\n\n # plot the cluster assignments\n img3=ax3.scatter(data[data[\"clusters\"]> -1][\"Y\"], data[data[\"clusters\"]> -1][\"X\"], \n c=data[data[\"clusters\"]> -1][\"clusters\"],cmap=\"plasma\",s=1)\n ax3.set_xlim(0,img.shape[0])\n ax3.set_ylim(img.shape[1],0)\n ax3.axis('off')\n\n #add titles\n ax0.title.set_text('Segmented Image')\n ax1.title.set_text('Filtered Orientation')\n ax3.title.set_text('Clusters')\n\n if display_not_save:\n plt.show()\n else: \n plt.savefig((output_dir+\"/\"+img_dir.rsplit('/', 1)[-1][:-4]+\"_\"+str(dbscn_length)+\"_\"+ str(dbscn_min_size)+\".png\"),dpi=600, bbox_inches = 'tight',pad_inches = 0)\n fig.clf()\n plt.close(fig)\n plt.close('all')\n \n \n del fig,ax0,ax1,ax3,img1,img3", "def startface(self):\n self.fan = (self.position.x,self.position.y,self.position.z)", "def setContourFilling(mode='cell'):\n mdict = {'cell':'CELL','polygon':'POLY'}\n dislin.shdmod(mdict[mode],'CONTUR')", "def setSurfaceMeshing(state='off',shading=1):\n sdict = {'off':'OFF','on':'ON'}\n val = sdict[state]\n if not shading:\n val = 'ONLY'\n dislin.surmsh(val)", "def __init__(self, MRIObj, pRFModelObj = None, FAModelObj = None,\n pRF_data = [], FA_data = [],\n prf_dm = [], max_ecc_ext = 5.5,\n pysub = 'hcp_999999', flatmap_height = 2048, full_figsize = (12, 8)):\n\n # set data object to use later on\n self.MRIObj = MRIObj\n\n # Load pRF and model object\n self.pRFModelObj = pRFModelObj\n self.FAModelObj = FAModelObj\n\n ## data to be plotted \n self.pRF_data = pRF_data\n self.FA_data = FA_data\n\n ## figure settings\n self.flatmap_height = flatmap_height\n self.full_figsize = full_figsize\n self.images = {}\n \n ## create pycortex vars\n self.mask, extents = cortex.quickflat.utils.get_flatmask(pysub, height = self.flatmap_height)\n self.vc = cortex.quickflat.utils._make_vertex_cache(pysub, height = self.flatmap_height)\n\n self.mask_index = np.zeros(self.mask.shape)\n self.mask_index[self.mask] = np.arange(self.mask.sum())\n\n # set prf dm\n self.prf_dm = prf_dm\n\n ## set grid of possible points in downsampled space\n self.point_grid_2D = np.array(np.meshgrid(np.linspace(-1, 1, prf_dm.shape[0]) * max_ecc_ext,\n np.linspace(1, -1, prf_dm.shape[0]) * max_ecc_ext))", "def set_cartesian_velo_continuous(self, on_off):\r\n return self._arm.set_cartesian_velo_continuous(on_off)", "def main():\n save = False\n show = True\n\n #hd_parameter_plots = HDparameterPlots(save=save)\n #hd_parameter_plots.flow_parameter_distribution_for_non_lake_cells_for_current_HD_model()\n #hd_parameter_plots.flow_parameter_distribution_current_HD_model_for_current_HD_model_reprocessed_without_lakes_and_wetlands()\n #hd_parameter_plots.flow_parameter_distribution_ten_minute_data_from_virna_0k_ALG4_sinkless_no_true_sinks_oceans_lsmask_plus_upscale_rdirs()\n #hd_parameter_plots.flow_parameter_distribution_ten_minute_data_from_virna_0k_ALG4_sinkless_no_true_sinks_oceans_lsmask_plus_upscale_rdirs_no_tuning()\n #ice5g_comparison_plots = Ice5GComparisonPlots(save=save)\n #ice5g_comparison_plots.plotLine()\n #ice5g_comparison_plots.plotFilled()\n #ice5g_comparison_plots.plotCombined()\n #ice5g_comparison_plots.plotCombinedIncludingOceanFloors()\n #flowmapplot = FlowMapPlots(save)\n #flowmapplot.FourFlowMapSectionsFromDeglaciation()\n #flowmapplot.Etopo1FlowMap()\n #flowmapplot.ICE5G_data_all_points_0k()\n #flowmapplot.ICE5G_data_all_points_0k_no_sink_filling()\n #flowmapplot.ICE5G_data_all_points_0k_alg4_two_color()\n #flowmapplot.ICE5G_data_all_points_21k_alg4_two_color()\n #flowmapplot.Etopo1FlowMap_two_color()\n #flowmapplot.Etopo1FlowMap_two_color_directly_upscaled_fields()\n #flowmapplot.Corrected_HD_Rdirs_FlowMap_two_color()\n #flowmapplot.ICE5G_data_ALG4_true_sinks_21k_And_ICE5G_data_ALG4_true_sinks_0k_FlowMap_comparison()\n #flowmapplot.Corrected_HD_Rdirs_And_Etopo1_ALG4_sinkless_directly_upscaled_fields_FlowMap_comparison()\n #flowmapplot.Corrected_HD_Rdirs_And_Etopo1_ALG4_true_sinks_directly_upscaled_fields_FlowMap_comparison()\n #flowmapplot.Corrected_HD_Rdirs_And_ICE5G_data_ALG4_sinkless_0k_directly_upscaled_fields_FlowMap_comparison()\n #flowmapplot.Corrected_HD_Rdirs_And_ICE5G_data_ALG4_true_sinks_0k_directly_upscaled_fields_FlowMap_comparison()\n #flowmapplot.Corrected_HD_Rdirs_And_ICE5G_data_ALG4_corr_orog_0k_directly_upscaled_fields_FlowMap_comparison()\n #flowmapplot.Corrected_HD_Rdirs_And_ICE5G_data_ALG4_corr_orog_downscaled_ls_mask_0k_directly_upscaled_fields_FlowMap_comparison()\n #flowmapplot.Corrected_HD_Rdirs_And_ICE5G_data_ALG4_no_true_sinks_corr_orog_0k_directly_upscaled_fields_FlowMap_comparison()\n #flowmapplot.Corrected_HD_Rdirs_And_ICE5G_HD_as_data_ALG4_true_sinks_0k_directly_upscaled_fields_FlowMap_comparison()\n #flowmapplot.Upscaled_Rdirs_vs_Directly_Upscaled_fields_ICE5G_data_ALG4_corr_orog_downscaled_ls_mask_0k_FlowMap_comparison()\n #flowmapplot.Ten_Minute_Data_from_Virna_data_ALG4_corr_orog_downscaled_lsmask_no_sinks_21k_vs_0k_FlowMap_comparison()\n #flowmapplot.Upscaled_Rdirs_vs_Corrected_HD_Rdirs_ICE5G_data_ALG4_corr_orog_downscaled_ls_mask_0k_FlowMap_comparison()\n flowmapplotwithcatchment = FlowMapPlotsWithCatchments(save)\n #flowmapplotwithcatchment.Upscaled_Rdirs_vs_Corrected_HD_Rdirs_ICE5G_data_ALG4_corr_orog_downscaled_ls_mask_0k_FlowMap_comparison()\n #flowmapplotwithcatchment.compare_present_day_and_lgm_river_directions_with_catchments_virna_data_plus_tarasov_style_orog_corrs_for_both()\n #flowmapplotwithcatchment.compare_present_day_river_directions_with_catchments_virna_data_with_vs_without_tarasov_style_orog_corrs()\n #flowmapplotwithcatchment.compare_lgm_river_directions_with_catchments_virna_data_with_vs_without_tarasov_style_orog_corrs()\n #flowmapplotwithcatchment.Upscaled_Rdirs_vs_Corrected_HD_Rdirs_tarasov_upscaled_data_ALG4_corr_orog_downscaled_ls_mask_0k_FlowMap_comparison()\n #flowmapplotwithcatchment.upscaled_rdirs_with_and_without_tarasov_upscaled_data_ALG4_corr_orog_downscaled_ls_mask_0k_FlowMap_comparison()\n #flowmapplotwithcatchment.\\\n #upscaled_rdirs_with_and_without_tarasov_upscaled_north_america_only_data_ALG4_corr_orog_downscaled_ls_mask_0k_FlowMap_comparison()\n #flowmapplotwithcatchment.\\\n #Upscaled_Rdirs_vs_Corrected_HD_Rdirs_tarasov_upscaled_north_america_only_data_ALG4_corr_orog_downscaled_ls_mask_0k_FlowMap_comparison()\n #flowmapplotwithcatchment.\\\n #Upscaled_Rdirs_vs_Corrected_HD_Rdirs_tarasov_upscaled_north_america_only_data_ALG4_corr_orog_glcc_olson_lsmask_0k_FlowMap_comparison()\n #flowmapplotwithcatchment.compare_present_day_and_lgm_river_directions_with_catchments_ICE5G_plus_tarasov_style_orog_corrs_for_both()\n #flowmapplotwithcatchment.compare_present_day_and_lgm_river_directions_with_catchments_ICE6G_plus_tarasov_style_orog_corrs_for_both()\n #flowmapplotwithcatchment.compare_ICE5G_and_ICE6G_with_catchments_tarasov_style_orog_corrs_for_both()\n #flowmapplotwithcatchment.compare_river_directions_with_dynriver_corrs_and_MERIThydro_derived_corrs()\n #flowmapplotwithcatchment.compare_river_directions_with_dynriver_corrs_and_MERIThydro_derived_corrs_original_ts()\n flowmapplotwithcatchment.compare_river_directions_with_dynriver_corrs_and_MERIThydro_derived_corrs_new_ts_10min()\n outflowplots = OutflowPlots(save)\n #outflowplots.Compare_Upscaled_Rdirs_vs_Directly_Upscaled_fields_ICE5G_data_ALG4_corr_orog_downscaled_ls_mask_0k()\n #outflowplots.Compare_Corrected_HD_Rdirs_And_ICE5G_as_HD_data_ALG4_sinkless_all_points_0k()\n #outflowplots.Compare_Corrected_HD_Rdirs_And_ICE5G_as_HD_data_ALG4_true_sinks_all_points_0k()\n #outflowplots.Compare_Corrected_HD_Rdirs_And_ICE5G_ALG4_sinkless_all_points_0k_directly_upscaled_fields()\n #outflowplots.Compare_Corrected_HD_Rdirs_And_ICE5G_ALG4_true_sinks_all_points_0k_directly_upscaled_fields()\n #outflowplots.Compare_Corrected_HD_Rdirs_And_ICE5G_ALG4_corr_orog_all_points_0k_directly_upscaled_fields()\n #outflowplots.Compare_Corrected_HD_Rdirs_And_ICE5G_ALG4_corr_orog_downscaled_ls_mask_all_points_0k_directly_upscaled_fields()\n #outflowplots.Compare_Corrected_HD_Rdirs_And_Etopo1_ALG4_sinkless_directly_upscaled_fields()\n #outflowplots.Compare_Corrected_HD_Rdirs_And_Etopo1_ALG4_true_sinks_directly_upscaled_fields()\n #outflowplots.Compare_Corrected_HD_Rdirs_And_ICE5G_plus_tarasov_upscaled_srtm30_ALG4_corr_orog_0k_directly_upscaled_fields()\n #outflowplots.Compare_Original_Corrections_vs_Upscaled_MERIT_DEM_0k()\n outflowplots.Compare_Original_Corrections_vs_Upscaled_MERIT_DEM_0k_new_truesinks()\n #outflowplots.Compare_Original_Corrections_vs_Upscaled_MERIT_DEM_0k_new_truesinks_individual_rivers()\n #outflowplots.Compare_ICE5G_with_and_without_tarasov_upscaled_srtm30_ALG4_corr_orog_0k_directly_upscaled_fields()\n #hd_output_plots = HDOutputPlots()\n #hd_output_plots.check_water_balance_of_1978_for_constant_forcing_of_0_01()\n #hd_output_plots.plot_comparison_using_1990_rainfall_data()\n #hd_output_plots.plot_comparison_using_1990_rainfall_data_adding_back_to_discharge()\n #coupledrunoutputplots = CoupledRunOutputPlots(save=save)\n #coupledrunoutputplots.ice6g_rdirs_lgm_run_discharge_plot()\n #coupledrunoutputplots.extended_present_day_rdirs_lgm_run_discharge_plot()\n #coupledrunoutputplots.ocean_grid_extended_present_day_rdirs_vs_ice6g_rdirs_lgm_run_discharge_plot()\n #coupledrunoutputplots.extended_present_day_rdirs_vs_ice6g_rdirs_lgm_echam()\n #coupledrunoutputplots.extended_present_day_rdirs_vs_ice6g_rdirs_lgm_mpiom_pem()\n #lake_plots = LakePlots()\n #lake_plots.plotLakeDepths()\n #lake_plots.LakeAndRiverMap()\n #lake_plots.LakeAndRiverMaps()\n if show:\n plt.show()", "def setF(self):\n if self.parent: self.f = self.setG(self.parent.g) + self.setH()\n else: self.f = self.setG() + self.setH()\n return self.f", "def __set_FIP(self,FP_num):\n\t\tsize = len(self.matrix)\n\t\tif FP_num == 1:\n\t\t\t[row,col] = [0,0]\n\t\telif FP_num == 2:\n\t\t\t[row,col] = [0,size-7]\n\t\telif FP_num == 3:\n\t\t\t[row,col] = [size-7,0]\n\n\t\tfor r in range(7):\n\t\t\tfor c in range(7):\n\t\t\t\tif (0 <= r and r <= 6 and (c ==0 or c == 6) or (0 <= c and c <= 6 and (r == 0 or r == 6))\n\t\t\t\t\tor (2 <= r and r <= 4 and 2 <= c and c <= 4)):\n\t\t\t\t\tself.matrix[row+r][col+c] = 1\n\t\t\t\telse:\n\t\t\t\t\tself.matrix[row+r][col+c] = 0\n\n\t\t\n\t\tif FP_num == 1:\n\t\t\tself.matrix[7][0:8] = [0] * 8\n\t\t\tfor i in range(0,8):\n\t\t\t\tself.matrix[i][7] = 0\n\t\telif FP_num == 2:\n\t\t\tself.matrix[7][size-8:size] = [0] * 8\n\t\t\tfor i in range(0,8):\n\t\t\t\tself.matrix[i][size-8] = 0\n\t\telif FP_num == 3:\n\t\t\tself.matrix[size-8][0:8] = [0] * 8\n\t\t\tfor i in range(size-8,size):\n\t\t\t\tself.matrix[i][7] = 0", "def mainFunction(f):\n\n #############################################################################\n \n \n # biomass hexagon\n predF = '/vol/v3/lt_stem_v3.1/models/biomassfiaald_20180708_0859/2000/biomassfiaald_20180708_0859_2000_mean.tif'\n trainF = '/vol/v2/datasets/biomass/nbcd/fia_ald/nbcd_fia_ald_biomass_clipped_to_conus.tif'\n shpF = '/vol/v1/general_files/datasets/spatial_data/hexagons/hexagons_conus_albers_30km_with_id.shp'\n trainND = -32768\n predND = -9999\n trgField = 'id'\n descrField = 'id'\n outDir = '/vol/v3/lt_stem_v3.1/evaluation/biomassfiaald_20180708_0859/hexagon_correlation'\n xyLim = (500, 500)\n xLab = 'Reference (tons/ha)'\n yLab = 'Prediction (tons/ha)'\n annoXY = (15,420)\n \n \n \"\"\"\n # cc\n predF = '/vol/v3/lt_stem_v3.1/models/canopy_20180915_1631/2001/canopy_20180915_1631_2001_mean.tif'\n trainF = '/vol/v2/stem/conus/reference_rasters/nlcd_2001_canopy_clipped_to_conus_train.tif'\n #shpF = '/vol/v2/datasets/Eco_Level_III_US/us_eco_l3_no_states_multipart.shp'\n shpF = '/vol/v1/general_files/datasets/spatial_data/hexagons/hexagons_conus_albers_30km_with_id.shp'\n trainND = 255\n predND = 255\n trgField = 'id'\n descrField = 'id'\n #trgField = 'US_L3CODE'\n #descrField = 'US_L3NAME'\n #outDir = '/vol/v3/lt_stem_v3.1/evaluation/canopy_20180915_1631/ecoregion_correlation'\n outDir = '/vol/v3/lt_stem_v3.1/evaluation/canopy_20180915_1631/hexagon_correlation'\n xyLim = (100, 100)\n xLab = 'Reference (%)'\n yLab = 'Prediction (%)'\n annoXY = (5,82)\n \"\"\"\n #############################################################################\n\n\n # get color setup\n norm = colors.Normalize(vmin=0, vmax=1)\n f2rgb = cm.ScalarMappable(norm=norm, cmap=cm.get_cmap('YlGnBu_r'))\n \n # open the shapefile\t\n vDriver = ogr.GetDriverByName(\"ESRI Shapefile\")\n vSrc = vDriver.Open(shpF, 0)\n vLayer = vSrc.GetLayer()\n \n commonBox = get_intersec([predF, trainF])\n\n#for f in range(vLayer.GetFeatureCount()):\n feature = vLayer[f]\n name = feature.GetField(trgField)\n print('f: '+str(f))\n outFig = os.path.join(outDir, (trgField.replace(' ','_').lower()+'_'+str(name)+'.png'))\n if os.path.exists(outFig):\n #break\n return\n \n descr = feature.GetField(descrField)\n \n predP, coords = get_zone_pixels(feature, shpF, predF, 1, [commonBox[0], commonBox[2], commonBox[3], commonBox[1]])#.compressed() [commonBox[0], commonBox[2], commonBox[3], commonBox[1]]\n trainP, coords = get_zone_pixels(feature, shpF, trainF, 1, [coords[0], coords[1], coords[2], coords[3]])#.compressed()\n \n predP = ma.masked_equal(predP, predND)\n trainP = ma.masked_equal(trainP, trainND)\n trainP = ma.masked_equal(trainP, 0)\n\n combMask = np.logical_not(np.logical_not(predP.mask) * np.logical_not(trainP.mask))\n predP[combMask] = ma.masked\n trainP[combMask] = ma.masked\n predP = predP.compressed()\n trainP = trainP.compressed()\n if (predP.shape[0] == 0) | (trainP.shape[0] == 0) | (predP==0).all() | (trainP==0).all():\n predP = np.array([0,0,1,1], dtype='float64')\n trainP = np.array([0,0,1,1], dtype='float64')\n mae = round(np.mean(np.absolute(np.subtract(predP, trainP))),1)\n rmse = round(np.sqrt(np.mean((predP-trainP)**2)),1)\n \n\n totPixs = trainP.shape[0]\n sampSize = round(totPixs*1)\n pickFrom = range(sampSize)\n #sampIndex = np.random.choice(pickFrom, size=sampSize)\n sampIndex = pickFrom\n\n r = round(np.corrcoef(trainP[sampIndex], predP[sampIndex])[0][1], 2)\n if (mae == 0) & (r == 1):\n r = 0.0\n rColor = f2hex(f2rgb, r)\n p = sns.jointplot(trainP[sampIndex], predP[sampIndex], kind=\"hex\", color='blue', xlim=(0,xyLim[0]), ylim=(0,xyLim[1]), size=5)\n p.ax_joint.set_xlabel(xLab)\n p.ax_joint.set_ylabel(yLab)\n p.ax_joint.annotate('r: '+str(r)+'\\nrmse: '+str(rmse)+'\\nmae: '+str(mae), annoXY)\n plt.tight_layout()\n outFig = os.path.join(outDir, (trgField.replace(' ','_').lower()+'_'+str(name)+'.png'))\n p.savefig(outFig)\n \n df = pd.DataFrame({'id':name, 'descr':descr, 'r':r, 'rmse':rmse, 'mae':mae, 'color':rColor, 'img':os.path.basename(outFig)}, index=[0])\n outCSV = outFig.replace('.png','.csv')\n df.to_csv(outCSV, ',', index=False)", "def plot_rectangular_FOV(ax, exp='cm'):\n params = {'lon': {'cm': [50, 200], 'argo': [25, 100], '40': [285, 320]},\n 'color': {'cm':'magenta', 'argo': 'green', '40':'green'},\n #'x': {'cm': 150, 'argo': 75, '40':-50},\n 'x': {'cm': 165, 'argo': 90, '40':-50},\n 'y': {'cm': 7, 'argo': 7, '40':-55.5},\n 'label': {'cm': 'CASA-MIA', 'argo': 'ARGO-YBJ', '40':'IC-40'}}\n\n lon1 = move_gc_to_center(params['lon'][exp][0]*np.pi/180.)*180./np.pi\n lon2 = move_gc_to_center(params['lon'][exp][1]*np.pi/180.)*180./np.pi\n ax.plot([lon1, lon1], [-5, 5], lw=2,\n c=params['color'][exp], ls='--')\n ax.plot([lon2, lon2], [-5, 5], lw=2,\n c=params['color'][exp], ls='--')\n\n if exp == 'cm':\n ax.plot([lon2, -180], [-5, -5], lw=2,\n c=params['color'][exp], ls='--')\n ax.plot([lon2, -180], [5, 5], lw=2,\n c=params['color'][exp], ls='--')\n ax.plot([lon1, 180], [5, 5], lw=2,\n c=params['color'][exp], ls='--')\n ax.plot([lon1, 180], [-5, -5], lw=2,\n c=params['color'][exp], ls='--')\n else:\n lon1 += 1.9\n ax.plot([lon1, lon2], [-5, -5], lw=2,\n c=params['color'][exp], ls='--')\n ax.plot([lon1, lon2], [5, 5], lw=2,\n c=params['color'][exp], ls='--')\n ax.text(x=params['x'][exp], y=params['y'][exp],\n s=params['label'][exp], color=params['color'][exp],\n fontsize=40, fontweight='bold')", "def complete_paper_plot(plot_dir,\n index,\n image1,\n image2,\n flow_uv,\n ground_truth_flow_uv,\n flow_valid_occ,\n predicted_occlusion,\n ground_truth_occlusion,\n frame_skip=None):\n\n def save_fig(name, plot_dir):\n plt.xticks([])\n plt.yticks([])\n if frame_skip is not None:\n filename = str(index) + '_' + str(frame_skip) + '_' + name\n plt.savefig(os.path.join(plot_dir, filename), bbox_inches='tight')\n else:\n filepath = str(index) + '_' + name\n plt.savefig(os.path.join(plot_dir, filepath), bbox_inches='tight')\n plt.clf()\n\n#############here#######################\n # def robust_l1(x):\n # \"\"\"Robust L1 metric.\"\"\"\n # return (x ** 2 + 0.001 ** 2) ** 0.5\n #\n #\n # error = robust_l1(ground_truth_flow_uv - flow_uv)\n #\n # mask_non_zero = ground_truth_flow_uv != 0\n # mask_zero = ground_truth_flow_uv == 0\n #\n # loss_gt = (tf.reduce_sum(error[mask_non_zero]) / (tf.reduce_sum(tf.cast(mask_non_zero, tf.float32)) + 1e-16))\n # loss_zero = (tf.reduce_sum(error[mask_zero]) / (tf.reduce_sum(tf.cast(mask_zero, tf.float32)) + 1e-16))\n #\n # # flowpy.flow_write(plot_dir + '/flow_gt'+ str(index)+\".flo\",ground_truth_flow_uv)\n # flowpy.flow_write(plot_dir + '/flow_pred_bkwd' + str(index) + \".flo\", flow_uv)\n #\n # # print(flow_uv.shape)\n # fig, axis = plt.subplots(3,2)\n # fig.set_figheight(14)\n # fig.set_figwidth(14)\n # axis[0,0].imshow(image1)\n # axis[0,0].set_title(\"Image1\")\n # axis[0, 1].imshow(image2)\n # axis[0, 1].set_title(\"Image2\")\n # max_radius_f = flowpy.get_flow_max_radius(ground_truth_flow_uv)\n # axis[1, 0].imshow(flowpy.flow_to_rgb(ground_truth_flow_uv, flow_max_radius= max_radius_f))\n # axis[1, 0].set_title(\"Ground-truth Flow\")\n # flowpy.attach_calibration_pattern(axis[1,1], flow_max_radius=max_radius_f)\n # max_radius_p = flowpy.get_flow_max_radius(flow_uv)\n # axis[2, 0].imshow(flowpy.flow_to_rgb(flow_uv, flow_max_radius=max_radius_p))\n # axis[2, 0].set_title(\"Predicted Flow\")\n # axis[2,0].set_xlabel('l1 loss for gt pixels: {} \\n l1 loss for zero pixels: {}'.format(loss_gt,loss_zero))\n # flowpy.attach_calibration_pattern(axis[2,1], flow_max_radius=max_radius_p)\n # # print(np.mean(ground_truth_flow_uv), np.mean(flow_uv))\n #\n # axis[2,1].imshow((1-predicted_occlusion[:, :, 0]) * 255, cmap='Greys')\n # axis[2,1].set_title(\"Predicted Occlusion\")\n #\n # # plt.imshow(flowpy.flow_to_rgb(flow_uv))\n # # plt.savefig( plot_dir+'/pred_flow'+str(index))\n # # plt.imshow(flowpy.flow_to_rgb(ground_truth_flow_uv))\n # # plt.savefig( plot_dir+'/gt_flow'+ str(index))\n # # print(ground_truth_flow_uv.shape)\n # plt.imshow(image1)\n # plt.savefig(plot_dir + '/plots'+ str(index ))\n\n\n#############till_here##########################\n flow_uv = -flow_uv[::-1,:,:]\n ground_truth_flow_uv = -ground_truth_flow_uv[::-1,:, :]\n plt.figure()\n plt.clf()\n\n plt.imshow(np.moveaxis(((image1 + image2) / 2.),0,-1))\n\n save_fig('image_rgb', plot_dir)\n # np.save(\"flow_pred\"+plot_dir,flow_uv)\n plt.imshow(flow_to_rgb(flow_uv))\n save_fig('predicted_flow', plot_dir)\n # np.save(\"flow_gt\" + plot_dir, ground_truth_flow_uv * flow_valid_occ)\n plt.imshow(flow_to_rgb(ground_truth_flow_uv * flow_valid_occ))\n save_fig('ground_truth_flow', plot_dir)\n\n endpoint_error = np.sum(\n (ground_truth_flow_uv - flow_uv)**2, axis= 0 , keepdims=True)**0.5\n\n plt.imshow(\n (endpoint_error * flow_valid_occ)[0],\n cmap='viridis',\n vmin=0,\n vmax=40)\n save_fig('flow_error', plot_dir)\n\n plt.imshow((predicted_occlusion[0]) * 255, cmap='Greys')\n save_fig('predicted_occlusion', plot_dir)\n\n plt.imshow((ground_truth_occlusion[0]) * 255, cmap='Greys')\n save_fig('ground_truth_occlusion', plot_dir)\n\n plt.close('all')", "def toggle_fore_mod(self, checked):\n for tile in self.tiles:\n tile.toggle_fore_mod(checked)", "def float_to_front(qtile):\n for window in qtile.current_group.windows:\n if window.floating:\n window.cmd_bring_to_front()", "def FlowMapTwoColourPlotHelper(self,filename,lsmask_filename=None,grid_type='HD',\n minflowcutoff=100,flip_data=False,flip_mask=False,\n **kwargs):\n flowmap_object = iodriver.load_field(filename,\n file_type=iodriver.get_file_extension(filename),\n field_type='Generic',\n grid_type=grid_type,**kwargs)\n lsmask_field = iodriver.load_field(lsmask_filename,\n file_type=iodriver.get_file_extension(lsmask_filename),\n field_type='Generic', grid_type=grid_type,**kwargs)\n if flip_data:\n flowmap_object.flip_data_ud()\n if flip_mask:\n lsmask_field.flip_data_ud()\n lsmask = lsmask_field.get_data()\n flowmap = flowmap_object.get_data()\n plt.figure()\n plt.subplot(111)\n flowmap[flowmap < minflowcutoff] = 1\n flowmap[flowmap >= minflowcutoff] = 2\n if lsmask is not None:\n flowmap[lsmask == 1] = 0\n cmap = mpl.colors.ListedColormap(['blue','peru','black'])\n bounds = list(range(4))\n norm = mpl.colors.BoundaryNorm(bounds,cmap.N)\n plt.imshow(flowmap,cmap=cmap,norm=norm,interpolation=\"none\")\n plt.title('Cells with cumulative flow greater than or equal to {0}'.format(minflowcutoff))", "def _add_force(Fg: np.ndarray, dof_map: Dict[Tuple[int, int], int], model: BDF,\n load, offset: int, ndof_per_grid: int, cid: int=0, show_warning: bool=True):\n #cid = load.cid\n nid = load.node\n node_ref = load.node_ref\n ndofi = ndof_per_grid if node_ref.type == 'GRID' else 1\n assert ndofi == 6, f'GRID must have 6 DOF for structural analysis\\n{node_ref}'\n\n if node_ref.cd == cid:\n fglobal = load.mag * load.xyz\n elif node_ref.cd != cid:\n fbasic = load.to_global()\n if show_warning:\n model.log.warning(f'differing cid & cd is not supported; cid={cid} cd={node_ref.cd}')\n show_warning = False\n cd_ref = node_ref.cd_ref\n Tbg = cd_ref.beta()\n fglobal = _force_to_local(cd_ref, fbasic)\n\n if 0: # pragma: no cover\n if cd_ref.type[-1] in ['C', 'S']:\n ex = Tbg[0, :]\n ey = Tbg[1, :]\n #ez = Tbg[2, :]\n xyz_local = node_ref.get_position_wrt(model, node_ref.cd)\n if cd_ref.type[-1] == 'C':\n theta = radians(xyz_local[1])\n ct = cos(theta)\n st = sin(theta)\n T = np.array([\n [ct, -st, 0.],\n [st, ct, 0.],\n [0., 0., 1.],\n ])\n Tbg = Tbg @ T\n else:\n from pyNastran.bdf.cards.coordinate_systems import CORD2S\n rho, thetad, phid = xyz_local\n coord = CORD2S.add_ijk(-1, origin=cd_ref.origin, i=ex, j=ey, k=None, rid=0, comment='')\n beta = coord.beta()\n Tbg = Tbg @ beta\n coord.transform_vector_to_local([rho, thetad, phid])\n #theta = radians(xyz_local[1])\n #phi = radians(xyz_local[2])\n #ct = cos(theta)\n #st = sin(theta)\n\n #cp = cos(phi)\n #sp = sin(phi)\n\n str(xyz_local)\n else:\n # rectangular\n pass\n Tgb = Tbg.T\n fglobal = Tgb @ fbasic\n else:\n raise NotImplementedError(f'node_ref.cd={node_ref.cd} cid={cid} load:\\n{str(load)}')\n\n for dof in range(3):\n irow = dof_map[(nid, dof+offset)]\n Fg[irow] += fglobal[dof]\n return show_warning", "def segmentNeedle(self):\n #productive #event\n profprint()\n if self.fiducialButton.isEnabled():\n print \"new checked state: \",not self.fiducialButton.checked\n self.onStartStopGivingNeedleTipsToggled(not self.fiducialButton.checked)", "def ToggleAllGizmoLocalMode( self ):\n\n value = self.gizmoMgr.GetGizmoLocal( 'pos' )\n self.gizmoMgr.SetGizmoLocal( 'pos', not value )\n self.gizmoMgr.SetGizmoLocal( 'rot', not value )\n self.gizmoMgr.SetGizmoLocal( 'scl', not value )", "def main():\n\n robot = S2Fluke2( \"/dev/rfcomm2\" )\n\n print( \"getVersion :\", robot.getVersion() )\n print( \"identifyRobot :\", robot.identifyRobot() )\n print( \"getBattery :\", robot.getBattery() )\n print( \"setForwardness: SCRIBBLER_FORWARD\" )\n robot.setForwardness( robot.SCRIBBLER_FORWARD )\n print( \"setForwardness: FLUKE_FORWARD\" )\n robot.setForwardness( robot.FLUKE_FORWARD )\n print( \"setForwardness: SCRIBBLER_FORWARD\" )\n robot.setForwardness( robot.SCRIBBLER_FORWARD )\n print( \"getErrors : \" )\n print( robot.getErrors() )\n robot.resetScribbler()\n\n robot.close()", "def cb_reset(event):\n axDirichlet.cla()\n # Reset Sliders\n sAlpha0.reset() # resetが駄目!一番最初に戻ってしまう\n sAlpha1.reset()\n sAlpha2.reset()\n alpha_update = [sAlpha0.val, sAlpha1.val, sAlpha2.val]\n print('alpha_update=', alpha_update)\n\n # ML\n lambda_ML = CatML.MLinfer(x_cat)\n\n axML.cla()\n drawBarGraph( axML, \"ML\", lambda_ML, bar_y_max, col_ML ) # Draw Bar graph\n\n\n # MAP\n dirichlet.set_param(alpha_update)\n lambda_MAP = CatMAP.MAPinfer(x_cat, dirichlet)\n\n axMAP.cla()\n drawBarGraph( axMAP, \"MAP\", lambda_MAP, bar_y_max, col_MAP ) # Draw Bar Graph\n\n # Bayes\n posteriorDirichlet.set_param(alpha_update)\n posteriorDirichlet.calcPosterior(x_cat)\n lambda_Bayes = np.zeros(3)\n for k in range(3):\n lambda_Bayes[k] = posteriorDirichlet.BayesInfer(k)\n\n axBayes.cla()\n drawBarGraph( axBayes, \"Bayes\", lambda_Bayes, bar_y_max, col_Bayes ) # Draw Bar Graph\n\n draw_pdf_contours(axDirichlet, dirichlet, True) # Draw Dirichlet\n\n print('Reset')\n print('lambda_ML =', lambda_ML)\n print('lambda_MAP =', lambda_MAP)\n print('lambda_Bayes=', lambda_Bayes)\n draw_point(axDirichlet, lambda_ML, col_ML)\n draw_point(axDirichlet, lambda_MAP, col_MAP)\n draw_point(axDirichlet, lambda_Bayes, col_Bayes)\n draw_point(axLikelihood, lambda_ML, col_ML)\n draw_point(axPosteriorDirichlet, lambda_MAP, col_MAP)\n draw_point(axPosteriorDirichlet, lambda_Bayes, col_Bayes)\n\n fig.canvas.draw_idle()", "def mollview(map=None,fig=None,plot=False,filenme=None,\n\t\t\t rot=None,coord=None,unit='',\n\t\t\t xsize=800,title='Mollweide view',nest=False,\n\t\t\t min=None,max=None,flip='astro',\n\t\t\t remove_dip=False,remove_mono=False,\n\t\t\t gal_cut=0,\n\t\t\t format='%g',format2='%g',\n\t\t\t cbar=True,cmap=None, notext=False,\n\t\t\t norm=None,hold=False,margins=None,sub=None,\n\t\t\t return_projected_map=False):\n\ttry:\n\t\tfrom healpy import pixelfunc, projaxes as PA\n\texcept ImportError:\n\t\twarnings.warn(\n\t\t\t\"Could not load healpy package. If you want to use this feature, \"\n\t\t\t\"plaese install the healpy package from here: http://healpy.readthedocs.io/en/latest/\"\n\t\t\t\"or via pip or conda.\", RuntimeWarning)\n\t\treturn\n\n\t# Create the figure\n\n\tif not (hold or sub):\n\t\tif fig == None:\n\t\t\tf=plt.figure(figsize=(8.5,5.4))\n\t\t\textent = (0.02,0.05,0.96,0.9)\n\t\telse:\n\t\t\tf=fig\n\t\t\textent = (0.02,0.05,0.96,0.9)\n\telif hold:\n\t\tf=plt.gcf()\n\t\tleft,bottom,right,top = np.array(f.gca().get_position()).ravel()\n\t\textent = (left,bottom,right-left,top-bottom)\n\t\tf.delaxes(f.gca())\n\telse: # using subplot syntax\n\t\tf=plt.gcf()\n\t\tif hasattr(sub,'__len__'):\n\t\t\tnrows, ncols, idx = sub\n\t\telse:\n\t\t\tnrows, ncols, idx = sub//100, (sub%100)//10, (sub%10)\n\t\tif idx < 1 or idx > ncols*nrows:\n\t\t\traise ValueError('Wrong values for sub: %d, %d, %d'%(nrows,\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t ncols,\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t idx))\n\t\tc,r = (idx-1)%ncols,(idx-1)//ncols\n\t\tif not margins:\n\t\t\tmargins = (0.01,0.0,0.0,0.02)\n\t\textent = (c*1./ncols+margins[0],\n\t\t\t 1.-(r+1)*1./nrows+margins[1],\n\t\t\t 1./ncols-margins[2]-margins[0],\n\t\t\t 1./nrows-margins[3]-margins[1])\n\t\textent = (extent[0]+margins[0],\n\t\t\t extent[1]+margins[1],\n\t\t\t extent[2]-margins[2]-margins[0],\n\t\t\t extent[3]-margins[3]-margins[1])\n\n\t# Starting to draw : turn interactive off\n\twasinteractive = plt.isinteractive()\n\tplt.ioff()\n\ttry:\n\t\tif map is None:\n\t\t\tmap = np.zeros(12)+np.inf\n\t\t\tcbar=False\n\t\tmap = pixelfunc.ma_to_array(map)\n\t\tax=PA.HpxMollweideAxes(f,extent,coord=coord,rot=rot,\n\t\t\t\t\t\t format=format2,flipconv=flip)\n\t\tf.add_axes(ax)\n\t\tif remove_dip:\n\t\t\tmap=pixelfunc.remove_dipole(map,gal_cut=gal_cut,\n\t\t\t\t\t\t\t\t\tnest=nest,copy=True,\n\t\t\t\t\t\t\t\t\tverbose=True)\n\t\telif remove_mono:\n\t\t\tmap=pixelfunc.remove_monopole(map,gal_cut=gal_cut,nest=nest,\n\t\t\t\t\t\t\t\t\t copy=True,verbose=True)\n\t\timg = ax.projmap(map,nest=nest,xsize=xsize,coord=coord,vmin=min,vmax=max,\n\t\t\t cmap=cmap,norm=norm)\n\t\tif cbar:\n\t\t\tim = ax.get_images()[0]\n\t\t\tb = im.norm.inverse(np.linspace(0,1,im.cmap.N+1))\n\t\t\tv = np.linspace(im.norm.vmin,im.norm.vmax,im.cmap.N)\n\t\t\tif matplotlib.__version__ >= '0.91.0':\n\t\t\t\tcb=f.colorbar(im,ax=ax,\n\t\t\t\t\t\t orientation='horizontal',\n\t\t\t\t\t\t shrink=0.5,aspect=25,ticks=PA.BoundaryLocator(),\n\t\t\t\t\t\t pad=0.05,fraction=0.1,boundaries=b,values=v,\n\t\t\t\t\t\t format=format)\n\t\t\telse:\n\t\t\t\t# for older matplotlib versions, no ax kwarg\n\t\t\t\tcb=f.colorbar(im,orientation='horizontal',\n\t\t\t\t\t\t shrink=0.5,aspect=25,ticks=PA.BoundaryLocator(),\n\t\t\t\t\t\t pad=0.05,fraction=0.1,boundaries=b,values=v,\n\t\t\t\t\t\t format=format)\n\t\t\tcb.solids.set_rasterized(True)\n\t\tax.set_title(title)\n\t\tif not notext:\n\t\t\tax.text(0.86,0.05,ax.proj.coordsysstr,fontsize=14,\n\t\t\t\tfontweight='bold',transform=ax.transAxes)\n\t\tif cbar:\n\t\t\tcb.ax.text(0.5,-1.0,unit,fontsize=14,\n\t\t\t\t transform=cb.ax.transAxes,ha='center',va='center')\n\t\tf.sca(ax)\n\tfinally:\n\t\tif plot:\n\t\t\tplt.draw()\n\t\tif wasinteractive:\n\t\t\tplt.ion()\n\t\t\t#plt.show()\n\tif return_projected_map:\n\t\treturn img", "def imdespeckle(imagefile, threshold):\n\n\n data = plt.imread(imagefile); # image is read into the array \"data\"\n data = np.mean(data,2); # convert to greyscale\n \n # Perform the 2D numerical fourier transform and scale it correctly. The result is a\n # picture of the image in \"frequency space\" (spatial frequency, that is).\n N1 = np.shape(data)[0] # number of rows\n N2 = np.shape(data)[1] # number of columns\n F=np.fft.fftshift(np.fft.fft2(data)/np.sqrt(N1*N2)) # 2D FT with zero freq's in center\n\n # Threshold the fourier transformed image\n pixels_below_threshold = np.log10(np.abs(F))<threshold # logical mask for pixels -> 0\n Fthresh = F # start unthresholded\n Fthresh[pixels_below_threshold] = 0 # set pixels below threshold to 0 \n \n # Finally, perform the inverse transform on the thresholded data to get back\n # to position space. (I.e. to get back our image.).\n despekld_image = np.abs(np.fft.ifft2(Fthresh)*np.sqrt(N1*N2))\n\n # Now display the results\n plt.figure(1) # open figure 1\n plt.clf() # clear it in case previously used\n ax1 = plt.axes() # define a set of axes\n ax1.pcolormesh(despekld_image, cmap='bone') # plot the despeckled image\n ax1.set_aspect('equal', 'box') # set aspect ratio to be correct\n ax1.set_title('Despeckled Image') # add a title\n plt.show() # display the plot\n\n fig2 = plt.figure(2)\n plt.clf()\n ax2 = plt.axes()\n with np.errstate(divide='ignore'): # suppresses warning for \"log10(0)\" \n c2 = ax2.pcolormesh(np.log10(np.abs(Fthresh)), cmap='viridis') # plot the FT\n fig2.colorbar(c2)\n ax2.set_aspect('equal', 'box')\n ax2.set_title('Log10 of the 2D FFT, Thresholded')\n plt.show()\n \n return despekld_image", "def _checkForSixaxis():\n return sixaxis.init(\"/dev/input/js1\")", "def segmentNeedle(self):\r\n # productive #event\r\n profprint()\r\n if self.fiducialButton.isEnabled():\r\n print \"new checked state: \", not self.fiducialButton.checked\r\n self.onStartStopGivingNeedleTipsToggled(not self.fiducialButton.checked)", "def __init__(self, options, imgs, frq_sim_guess, otf=None,\n wiener_parameter=1, fbounds=(0.01, 1), fbounds_shift=(0.01, 1),\n use_wicker=True, normalize_histograms=True, background_counts=100,\n do_global_phase_correction=True, determine_amplitudes=False, find_frq_first=True,\n default_to_guess_on_bad_phase_fit=True, max_phase_err=20*np.pi/180,\n default_to_guess_on_low_mcnr=True, min_mcnr=1,\n size_near_fo_to_remove=0,\n phases_guess=None, mod_depths_guess=None, pspec_params_guess=None,\n use_fixed_phase=False, use_fixed_frq=False, use_fixed_mod_depths=False,\n plot_diagnostics=True, interactive_plotting=False, save_dir=None, figsize=(20, 10)):\n # #############################################\n # saving information\n # #############################################\n self.save_dir = save_dir\n self.hold_figs_open = False\n self.figsize = figsize\n\n if self.save_dir is not None:\n self.log_file = open(os.path.join(self.save_dir, \"sim_log.txt\"), 'w')\n else:\n self.log_file = None\n\n # #############################################\n # setup plotting\n # #############################################\n if not interactive_plotting:\n plt.ioff()\n plt.switch_backend(\"agg\")\n\n # #############################################\n # analysis settings\n # #############################################\n self.wiener_parameter = wiener_parameter\n self.use_wicker = use_wicker\n self.global_phase_correction = do_global_phase_correction\n self.normalize_histograms = normalize_histograms\n self.size_near_fo_to_remove = size_near_fo_to_remove\n self.default_to_guess_on_bad_phase_fit = default_to_guess_on_bad_phase_fit\n self.max_phase_error = max_phase_err\n self.default_to_guess_on_low_mcnr = default_to_guess_on_low_mcnr\n self.min_mcnr = min_mcnr\n self.determine_amplitudes = determine_amplitudes\n self.use_fixed_phase = use_fixed_phase\n self.use_fixed_frq = use_fixed_frq\n self.use_fixed_mod_depths = use_fixed_mod_depths\n self.find_frq_first = find_frq_first\n self.plot_diagnostics = plot_diagnostics\n\n # #############################################\n # images\n # #############################################\n self.background_counts = background_counts\n self.imgs = imgs.astype(np.float64)\n self.nangles, self.nphases, self.ny, self.nx = imgs.shape\n \n # #############################################\n # get basic parameters\n # #############################################\n self.dx = options['pixel_size']\n self.dy = options['pixel_size']\n self.na = options['na']\n self.wavelength = options['wavelength']\n\n self.fmax = 1 / (0.5 * self.wavelength / self.na)\n self.fbounds = fbounds\n self.fbounds_shift = fbounds_shift\n\n self.frqs_guess = frq_sim_guess\n self.phases_guess = phases_guess\n self.mod_depths_guess = mod_depths_guess\n self.power_spectrum_params_guess = pspec_params_guess\n\n # #############################################\n # get frequency data and OTF\n # #############################################\n self.fx = tools.get_fft_frqs(self.nx, self.dx)\n self.fy = tools.get_fft_frqs(self.ny, self.dy)\n\n if otf is None:\n otf = psf.circ_aperture_otf(self.fx[None, :], self.fy[:, None], self.na, self.wavelength)\n self.otf = otf\n\n # #############################################\n # print current time\n # #############################################\n now = datetime.datetime.now()\n\n self.print_tee(\"####################################################################################\", self.log_file)\n self.print_tee(\"%d/%02d/%02d %02d:%02d:%02d\" % (now.year, now.month, now.day, now.hour, now.minute, now.second), self.log_file)\n self.print_tee(\"####################################################################################\", self.log_file)\n\n # #############################################\n # normalize histograms for input images\n # #############################################\n if self.normalize_histograms:\n tstart = time.process_time()\n\n for ii in range(self.nangles):\n for jj in range(1, self.nphases):\n self.imgs[ii, jj] = match_histograms(self.imgs[ii, jj], self.imgs[ii, 0])\n\n tend = time.process_time()\n self.print_tee(\"Normalizing histograms took %0.2fs\" % (tend - tstart), self.log_file)\n\n # #############################################\n # remove background\n # #############################################\n self.imgs = self.imgs - self.background_counts\n self.imgs[self.imgs <= 0] = 1e-12\n\n # #############################################\n # Fourier transform SIM images\n # #############################################\n tstart = time.process_time()\n\n self.imgs_ft = np.zeros((self.nangles, self.nphases, self.ny, self.nx), dtype=np.complex)\n for jj in range(self.nangles):\n for kk in range(self.nphases):\n # use periodic/smooth decomposition instead of traditional apodization\n img_to_xform, _ = psd.periodic_smooth_decomp(self.imgs[jj, kk])\n self.imgs_ft[jj, kk] = fft.fftshift(fft.fft2(fft.ifftshift(img_to_xform)))\n\n tend = time.process_time()\n\n self.print_tee(\"FT images took %0.2fs\" % (tend - tstart), self.log_file)\n\n # #############################################\n # get widefield image\n # #############################################\n tstart = time.process_time()\n\n self.widefield = get_widefield(self.imgs)\n wf_to_xform, _ = psd.periodic_smooth_decomp(self.widefield)\n self.widefield_ft = fft.fftshift(fft.fft2(fft.ifftshift(wf_to_xform)))\n\n tend = time.process_time()\n self.print_tee(\"Computing widefield image took %0.2fs\" % (tend - tstart), self.log_file)\n\n # #############################################\n # get optically sectioned image\n # #############################################\n tstart = time.process_time()\n\n sim_os = np.zeros((self.nangles, self.imgs.shape[-2], self.imgs.shape[-1]))\n for ii in range(self.nangles):\n sim_os[ii] = sim_optical_section(self.imgs[ii])\n # todo: maybe want to weight by power/mod depth?\n self.imgs_os = np.mean(sim_os, axis=0)\n\n tend = time.process_time()\n self.print_tee(\"Computing OS image took %0.2fs\" % (tend - tstart), self.log_file)", "def plot_IC_FOV(ax, config='86'):\n params = {'max_dec': {'86':np.arccos(0.8), '40': np.radians(30)},\n 'color': {'86':colors[0], '40': colors[1]},\n 'y': {'86':11, '40': -55.5}}\n\n dec, ra = np.meshgrid(np.linspace(0., params['max_dec'][config], 100) - np.pi/2.,\n np.linspace(0., 2.0*np.pi, 1000))\n dec = dec.flatten()\n ra = ra.flatten()\n c = SkyCoord(ra=ra*u.radian, dec=dec*u.radian, frame='fk5')\n latFOV = c.galactic.b.degree\n lonFOV = np.degrees(move_gc_to_center(c.galactic.l.radian))\n points = np.array([ [lonFOV[i], latFOV[i]] for i in range(len(lonFOV))])\n hull = ConvexHull(points)\n ax.plot(points[hull.vertices,0], points[hull.vertices,1],\n c=params['color'][config], lw=2, ls='--')\n #ax.text(x=-50, y=params['y'][config],\n ax.text(x=-45, y=params['y'][config],\n s='IC-{}'.format(config), color=params['color'][config],\n fontsize=40, fontweight='bold')", "def switch_frequency_plot_channel_four(self):\n if self.plot_channel_key_booleans[3]:\n self.plot_channel_key_booleans[3] = False\n self.parent_widget.graph_channel_four_button.setStyleSheet(\n \"background-color:rgb(%d,%d,%d)\" % (255, 255, 255))\n else:\n self.plot_channel_key_booleans[3] = True\n self.parent_widget.graph_channel_four_button.setStyleSheet(\n \"background-color:rgb(%d,%d,%d)\" % (LINE_COLORS[3]))", "def generate_siaf_pre_flight_reference_files_fgs(verbose=False, mode='siaf'):\n instrument = 'FGS'\n\n center_offset_x = 1023.5\n center_offset_y = 1023.5\n\n # hardcoded pixelscale, reference?\n scale = 0.06738281367 # arcsec/pixel\n\n if mode == 'siaf':\n # write focal plane alignment reference file\n outfile = os.path.join(JWST_SOURCE_DATA_ROOT, instrument, '{}_siaf_alignment.txt'.format(instrument.lower()))\n oss_flags = [False, True]\n elif mode == 'fsw':\n outfile = os.path.join(JWST_SOURCE_DATA_ROOT, instrument, '{}_fsw_coefficients.txt'.format(instrument.lower()))\n oss_flags = [True]\n\n if os.path.isfile(outfile):\n os.remove(outfile)\n\n siaf_alignment = None\n counter = 0\n\n for aperture_id in 'FGS1 FGS2'.split():\n\n if aperture_id == 'FGS1':\n V2Ref = 207.1900\n V3Ref = -697.5000\n\n # coefficients copied from Cox' makeSIAF.py to reproduce PRDOPSSOC-H-015\n # February 2015 FGS delivery\n # these numbers match the `To be Updated for CV3` column in the Tables on page 6ff\n # of an unpublished word document entitled `FGS Transformation for CV3.docx` by\n # Julia Zhou, e.g. C = IDEALPTOREALPXCOE_N\n\n # Initialize the parameters\n A = np.array(\n [-2.33369320E+01, 9.98690490E-01, 1.05024970E-02, 2.69889020E-06, 6.74362640E-06,\n 9.91415010E-07, 1.21090320E-09, -2.84802930E-11, 1.27892930E-09, -1.91322470E-11,\n 5.34567520E-14, 9.29791010E-14, 8.27060020E-14, 9.70576590E-14, 1.94203870E-14])\n\n B = np.array(\n [-2.70337440E+01, -2.54596080E-03, 1.01166810E+00, 2.46371870E-06, 2.08880620E-06,\n 9.32489680E-06, -4.11885660E-11, 1.26383770E-09, -7.60173360E-11, 1.36525900E-09,\n 2.70499280E-14, 5.70198270E-14, 1.43943080E-13, 7.02321790E-14, 1.21579450E-13])\n\n C = np.array(\n [2.31013520E+01, 1.00091800E+00, -1.06389620E-02, -2.65680980E-06, -6.51704610E-06,\n -7.45631440E-07, -1.29600400E-09, -4.27453220E-12, -1.27808870E-09, 5.01165140E-12,\n 2.72622090E-15, 5.42715750E-15, 3.46979980E-15, 2.49124350E-15, 1.22848570E-15])\n\n D = np.array(\n [2.67853100E+01, 2.26545910E-03, 9.87816850E-01, -2.35598140E-06, -1.91455620E-06,\n -8.92779540E-06, -3.24201520E-11, -1.30056630E-09, -1.73730700E-11,\n -1.27341590E-09, 1.84205730E-15, 3.13647160E-15, -2.99705840E-16, 1.98589690E-15,\n -1.26523200E-15])\n\n elif aperture_id == 'FGS2':\n V2Ref = 24.4300\n V3Ref = -697.5000\n\n A = np.array(\n [-3.28410900E+01, 1.03455010E+00, 2.11920160E-02, -9.08746430E-06, -1.43516480E-05,\n -3.93814140E-06, 1.60956450E-09, 5.82814640E-10, 2.02870570E-09, 2.08582470E-10,\n -2.79748590E-14, -8.11622820E-14, -4.76943000E-14, -9.01937740E-14,\n -8.76203780E-15])\n\n B = np.array(\n [-7.76806220E+01, 2.92234710E-02, 1.07790000E+00, -6.31144890E-06, -7.87266390E-06,\n -2.14170580E-05, 2.13293560E-10, 2.03376270E-09, 6.74607790E-10, 2.41463060E-09,\n -2.30267730E-14, -3.63681270E-14, -1.35117660E-13, -4.22207660E-14,\n -1.16201020E-13])\n\n C = np.array(\n [3.03390890E+01, 9.68539030E-01, -1.82288450E-02, 7.72758330E-06, 1.17536430E-05,\n 2.71516870E-06, -1.28167820E-09, -6.34376120E-12, -1.24563160E-09, -9.26192040E-12,\n 8.14604260E-16, -5.93798790E-16, -2.69247540E-15, -4.05196100E-15, 2.14529600E-15])\n\n D = np.array(\n [7.13783150E+01, -2.55191710E-02, 9.30941560E-01, 5.01322910E-06, 5.10548510E-06,\n 1.68083960E-05, 9.41565630E-12, -1.29749490E-09, -1.89194230E-11, -1.29425530E-09,\n -2.81501600E-15, -1.73025000E-15, 2.57732600E-15, 1.75268080E-15, 2.95238320E-15])\n\n number_of_coefficients = len(A)\n polynomial_degree = int((np.sqrt(8 * number_of_coefficients + 1) - 3) / 2)\n\n # generate distortion coefficient files\n siaf_index = []\n exponent_x = []\n exponent_y = []\n for i in range(polynomial_degree + 1):\n for j in np.arange(i + 1):\n siaf_index.append('{:d}{:d}'.format(i, j))\n exponent_x.append(i-j)\n exponent_y.append(j)\n\n\n print('*'*100)\n aperture_name = '{}_FULL'.format(aperture_id)\n for oss in oss_flags:\n\n if oss:\n aperture_name = aperture_name + '_OSS'\n oss_factor = 1.\n else:\n oss_factor = -1.\n\n print('{}'.format(aperture_name))\n\n if mode == 'fsw':\n (AX, BX, CX, DX) = (A, B, C, D)\n\n AS = polynomial.shift_coefficients(AX, center_offset_x, center_offset_y)\n BS = polynomial.shift_coefficients(BX, center_offset_x, center_offset_y)\n\n AS0 = copy.deepcopy(AS[0])\n BS0 = copy.deepcopy(BS[0])\n AS[0] = 0.0\n BS[0] = 0.0\n\n betaY = np.arctan2(AS[2], BS[2])\n print('Beta Y', np.degrees(betaY))\n print('Shift zeros', AS0, BS0)\n\n AR = AS * np.cos(betaY) - BS * np.sin(betaY)\n BR = AS * np.sin(betaY) + BS * np.cos(betaY)\n\n\n AR[0] = center_offset_x\n BR[0] = center_offset_y\n\n AF = polynomial.shift_coefficients(AR, -center_offset_x, -center_offset_y)\n BF = polynomial.shift_coefficients(BR, -center_offset_x, -center_offset_y)\n\n # Inverse matrices\n xc = polynomial.poly(AX, center_offset_x, center_offset_y)\n yc = polynomial.poly(BX, center_offset_x, center_offset_y)\n # CS1 = 1.0*C1 # Force a real copy\n CS = polynomial.shift_coefficients(CX, xc, yc)\n DS = polynomial.shift_coefficients(DX, xc, yc)\n CS0 = copy.deepcopy(CS[0])\n DS0 = copy.deepcopy(DS[0])\n\n CS[0] = 0.0\n DS[0] = 0.0\n CR = polynomial.prepend_rotation_to_polynomial(CS, np.degrees(betaY))\n DR = polynomial.prepend_rotation_to_polynomial(DS, np.degrees(betaY))\n CR[0] = CS0\n DR[0] = DS0\n CF = polynomial.shift_coefficients(CR, -center_offset_x, -center_offset_y)\n DF = polynomial.shift_coefficients(DR, -center_offset_x, -center_offset_y)\n distortion_reference_table = Table((siaf_index, exponent_x, exponent_y, AF, BF, CF, DF),\n names=('siaf_index', 'exponent_x', 'exponent_y', 'Sci2IdlX', 'Sci2IdlY', 'Idl2SciX','Idl2SciY'))\n\n V3angle = 0\n betaX = 0\n\n\n else:\n # Scale to arcsec\n (AX, BX, CX, DX) = polynomial.rescale(A, B, C, D, scale)\n\n\n V2c = polynomial.poly(AX, center_offset_x, center_offset_y)\n V3c = polynomial.poly(BX, center_offset_x, center_offset_y)\n\n AS = polynomial.shift_coefficients(AX, center_offset_x, center_offset_y)\n AS[0] = 0.0\n BS = polynomial.shift_coefficients(BX, center_offset_x, center_offset_y)\n BS[0] = 0.0\n CS = polynomial.shift_coefficients(CX, V2c, V3c)\n CS[0] = 0.0\n DS = polynomial.shift_coefficients(DX, V2c, V3c)\n DS[0] = 0.0\n\n if aperture_id == 'FGS1':\n if oss is False:\n AF = -polynomial.flip_x(polynomial.flip_y(AS))\n BF = -polynomial.flip_x(polynomial.flip_y(BS))\n CF = -polynomial.flip_x(polynomial.flip_y(CS))\n DF = -polynomial.flip_x(polynomial.flip_y(DS))\n else:\n AF = AS # For OSS detector and science are identical\n BF = -BS\n CF = polynomial.flip_y(CS)\n DF = polynomial.flip_y(DS)\n elif aperture_id == 'FGS2':\n if oss is False:\n AF = -polynomial.flip_x(AS)\n BF = polynomial.flip_x(BS)\n CF = -polynomial.flip_x(CS)\n DF = polynomial.flip_x(DS)\n else:\n AF = AS # For OSS detector and science are identical\n BF = BS\n CF = CS\n DF = DS\n\n betaX = np.arctan2(oss_factor * AF[1], BF[1])\n betaY = np.arctan2(oss_factor * AF[2], BF[2])\n\n V3angle = copy.deepcopy(betaY)\n if (abs(V3angle) > np.pi/2):\n V3angle = V3angle - np.copysign(np.pi, V3angle)\n\n (AR,BR) = polynomial.add_rotation(AF, BF, -1 * oss_factor * np.rad2deg(V3angle))\n\n # take out the rotation, carried separately in V3IdlYangle\n CR = polynomial.prepend_rotation_to_polynomial(CF, oss_factor * np.rad2deg(V3angle))\n DR = polynomial.prepend_rotation_to_polynomial(DF, oss_factor * np.rad2deg(V3angle))\n distortion_reference_table = Table((siaf_index, exponent_x, exponent_y, AR, BR, CR, DR),\n names=('siaf_index', 'exponent_x', 'exponent_y', 'Sci2IdlX', 'Sci2IdlY', 'Idl2SciX','Idl2SciY'))\n\n print('{} {}'.format(aperture_name, np.rad2deg(betaY)))\n # if aperture_name == 'FGS1_FULL': # first in loop\n if counter == 0: # first in loop\n siaf_alignment = Table()\n siaf_alignment['AperName'] = ['{:>30}'.format(aperture_name)]\n siaf_alignment['V3IdlYAngle'] = [np.rad2deg(V3angle)]\n siaf_alignment['V3SciXAngle'] = [np.rad2deg(betaX)]\n siaf_alignment['V3SciYAngle'] = [np.rad2deg(betaY)]\n siaf_alignment['V2Ref'] = [V2Ref]\n siaf_alignment['V3Ref'] = [V3Ref]\n else:\n siaf_alignment.add_row(['{:>30}'.format(aperture_name), np.rad2deg(V3angle), np.rad2deg(betaX), np.rad2deg(betaY), V2Ref, V3Ref])\n\n counter += 1\n\n\n distortion_reference_table.add_column(Column([aperture_name] * len(distortion_reference_table), name='AperName'), index=0)\n if mode == 'fsw':\n distortion_reference_file_name = os.path.join(JWST_SOURCE_DATA_ROOT, instrument, 'fgs_fsw_distortion_{}.txt'.format(aperture_name.lower()))\n else:\n distortion_reference_file_name = os.path.join(JWST_SOURCE_DATA_ROOT, instrument, 'fgs_siaf_distortion_{}.txt'.format(aperture_name.lower()))\n\n comments = []\n comments.append('FGS distortion reference file for SIAF\\n')\n comments.append('')\n comments.append('Based on coefficients delivered to STScI in February 2015.')\n comments.append('These parameters are stored in PRDOPSSOC-H-014.')\n comments.append('')\n comments.append('Generated {} {}'.format(timestamp.isot, timestamp.scale))\n comments.append('by {}'.format(username))\n comments.append('')\n distortion_reference_table.meta['comments'] = comments\n distortion_reference_table.write(distortion_reference_file_name, format='ascii.fixed_width', delimiter=',', delimiter_pad=' ', bookend=False, overwrite=True)\n\n comments = []\n comments.append('{} alignment parameter reference file for SIAF'.format(instrument))\n comments.append('')\n comments.append('This file contains the focal plane alignment parameters calibrated during FGS-SI alignment.')\n comments.append('')\n comments.append('Generated {} {}'.format(timestamp.isot, timestamp.scale))\n comments.append('by {}'.format(username))\n comments.append('')\n siaf_alignment.meta['comments'] = comments\n siaf_alignment.write(outfile, format='ascii.fixed_width', delimiter=',',\n delimiter_pad=' ', bookend=False, overwrite=True)", "def setDrawingMode(self):\n pass", "def oop(poster=False):\n fig = plt.figure(constrained_layout=True, figsize=(7.8, 5.5))\n gs = gridspec.GridSpec(nrows=4, ncols=10, hspace=0.5, wspace=0.0,\n left=0.075, bottom=0.07, top=0.91, right=0.95,\n width_ratios=(1, 0.25, 0.15, 0.05, 0.03, 0.17, 0.35, 0.05, 0.03, 0.12),\n height_ratios=(0.5, 0.5, 0.9, 0.2))\n ax1 = fig.add_subplot(gs[0:2, 0:3])\n ax2 = fig.add_subplot(gs[2, 0])\n\n axb1 = fig.add_subplot(gs[0, 6], projection=fabricplotlib.PRJ)\n axb2 = fig.add_subplot(gs[1, 6], projection=fabricplotlib.PRJ)\n axb_cb = fig.add_subplot(gs[0:2, 8])\n\n ax8 = fig.add_subplot(gs[2, 2:10], sharex=ax2)\n ax_leg = fig.add_subplot(gs[3, :])\n ax_leg.axis('off')\n dist = 75.0\n x, y = np.ones(3) * 1000.0 * dist, np.array([1000., 1500.])\n\n ftws = ['ftw', 'uc', 'hiacc']\n oops = ['1.0e-3', '1.0e-2', '1.0e-1']\n bms = ['0.0', '1.0e-2', '2.0e-2']\n rcs = ['2.0e-3', '2.0e-4', '2.0e-2']\n\n oop_names = [name_fmt.format(ftws[0], rcs[0], oop, bms[0]) for oop in oops]\n bck_names = [ob_name_fmt.format(ftws[0], rcs[0], oop, bms[0]) for oop in oops]\n\n fmts = oop_names + bck_names\n pretty_fmts = {}\n pretty_fmts_oop = {}\n for fmt in fmts:\n fm_spl = fmt.split('_')\n pretty_fmts[fmt] = r'%s $\\lambda$=%1.4g $\\dot b$=%1.4g' % (short_name_dict[fm_spl[1]], float(fm_spl[2][2:]), float(fm_spl[4][2:]))\n if fmt[0] == 's':\n pretty_fmts_oop[fmt] = r'$\\dot\\varepsilon_{xy}^{(max)}$=%1.4g Fwd' % (float(fm_spl[3][3:]))\n else:\n pretty_fmts_oop[fmt] = r'$\\dot\\varepsilon_{xy}^{(max)}$=%1.4g Bck' % (float(fm_spl[3][3:]))\n\n\n if not debug:\n files = [glob('../stream/stream_ftw/' + fmt + '_????.vtu') for fmt in fmts]\n inds = [np.argsort(np.array([float(fn[-8:-4]) for fn in filel])) for filel in files]\n fns = [[file_list[ind] for ind in inds_i] for inds_i, file_list in zip(inds, files)]\n\n times = np.hstack(([0.0, 0.1], np.arange(10.0, 100010.0, 10.0)))\n\n a_s = {}\n for i, fmt in enumerate(fmts):\n a_s[fmt] = get_vars_or_cache(x, y, fmt, fns[i], folder='../stream/stream_ftw')\n timess = {fmt: times[:min(a_s[fmt]['fabric 1'].shape[1], len(times))] for fmt, fn in zip(fmts, fns)}\n taus = {name: val / (dist * 1000.0 / 50.0) for name, val in timess.items()}\n\n else:\n taus = {name: np.linspace(0, 3, 100) for name in fmts}\n vtu = {name: np.ones((3, 100)) for name in ['fabric 1', 'fabric 2', 'fabric 3', 'eigenv 1', 'eigenv 2', 'eigenv 3', 'eigenv 4']}\n a_s = {name: vtu for name in fmts}\n\n def do_plot(fmt, color, lw, ls):\n if fmt[0] == 's':\n label = pretty_fmts_oop[fmt]\n else:\n label = None\n a_s[fmt]['eigenv 3'][1, :][a_s[fmt]['eigenv 3'][1, :] > 1.0] = 1.0\n ax2.plot(timess[fmt] / 1000.0, a_s[fmt]['eigenv 3'][1, :], color=color, linewidth=lw, linestyle=ls, label=label)\n\n if fmt[0] == 'a':\n label = pretty_fmts_oop[fmt]\n else:\n label = None\n\n ax8.plot(timess[fmt][1:] / 1000.0, fabricplotlib.fabric_to_hor_rot(a_s[fmt]['fabric 1'][1, 1:],\n a_s[fmt]['fabric 2'][1, 1:],\n a_s[fmt]['fabric 5'][1, 1:]),\n color=color, linewidth=lw, linestyle=ls, label=pretty_fmts_oop[fmt])\n\n for fmt, color in zip(oop_names, oop_colors):\n do_plot(fmt, color, 1, 'solid')\n\n for fmt, color in zip(bck_names, oop_colors):\n do_plot(fmt, color, 1, 'dashed')\n\n ax1.set_ylabel('Depth (m)', fontsize=fs)\n ax1.set_xlabel('Distance (km)', fontsize=fs)\n ax2.set_ylabel(r'$a^{(2)}_{1}$', fontsize=fs)\n ax8.set_ylabel(r'$\\theta$', fontsize=fs)\n ax2.set_xlabel(r'Time (kyr)', fontsize=fs)\n ax8.set_xlabel(r'Time (kyr)', fontsize=fs)\n ax2.set_ylim(0.66666, 1.0)\n ax2.set_yticks([0.66666, 5. / 6., 1.])\n ax2.set_yticklabels([r'$\\frac{2}{3}$', r'$\\frac{5}{6}$', '1'])\n ax2.set_xlim(0, 3)\n ax8.set_ylim(0, 45)\n h, l = ax8.get_legend_handles_labels()\n ax_leg.legend([h[0], h[3], h[1], h[4], h[2], h[5]], [l[0], l[3], l[1], l[4], l[2], l[5]], loc='upper left', frameon=False, ncol=3, fontsize=fs)\n # ax2.legend(loc='upper right', frameon=False, fontsize=fs)\n # ax5.legend(loc='upper right', frameon=False, fontsize=fs)\n\n vtus = [fastvtulib.get_structured_vtu(fns[1][-1])]\n tris = [Triangulation(np.array([rc[0] + 100000. for rc in vtu.raw_coords[:, 0]]) / 1000., np.array([rc[1] for rc in vtu.raw_coords[:, 0]]), vtu.simptt) for vtu in vtus]\n\n a12_axes = [ax1]\n ax_c_a12 = fig.add_subplot(gs[0:2, 4])\n\n for axa, tri, vtu in zip(a12_axes, tris, vtus):\n axa.set_xlim(0, 175)\n axa.set_xticks([0., 50., 100., 150.])\n axa.set_xticklabels(['' for tick in axa.get_xticklabels()])\n\n cm3 = axa.tricontourf(tri, vtu.rawdata_dict['eigenv 3'], cmap='summer', levels=np.linspace(0.3333, 1, 101), extend='neither')\n for c in cm3.collections:\n c.set_edgecolor(\"face\")\n axa.set_xticklabels(['0', '50', '100', '150'])\n fabricplotlib.quiver(axa, vtu, scale=25, width=0.003)\n\n a12_axes[0].scatter(-1000, -1000, marker=r'$\\uparrow$', label='Single max. in x-z', color='k')\n a12_axes[0].legend(loc='lower left', bbox_to_anchor=(0.1, 1.0), ncol=2, fontsize=fs, framealpha=1.0)\n a12_axes[0].set_xlim(0, 175)\n a12_axes[0].set_ylim(0, 2200)\n\n cbr = plt.colorbar(cm3, cax=ax_c_a12, orientation='vertical', ticks=(1. / 3., 2. / 3., 1.))\n cbr.set_label(label=r'$a^{(2)}_{1}$', size=fs)\n cbr.ax.set_yticklabels([r'$\\frac{1}{3}$', r'$\\frac{2}{3}$', '1'])\n cbr.ax.tick_params(axis='both', which='major', labelsize=fs)\n\n # Cartoons\n x, y = np.array([40000, 40000]), np.array([1100, 150])\n ax1.text(x[0] / 1000. + 100, y[0], 'b', fontsize=fs, ha='center', va='center', bbox=dict(boxstyle='square,pad=0.1', facecolor='white', alpha=0.75))\n ax1.text(x[1] / 1000. + 100, y[1], 'c', fontsize=fs, ha='center', va='center', bbox=dict(boxstyle='square,pad=0.1', facecolor='white', alpha=0.75))\n fab_at_pts = vtu.get_pts_2d(anisolib.fabs, x, y)\n a2 = anisolib.fabric_dict_to_a2(fab_at_pts)\n\n for letter, ax in zip('bcde', [axb1, axb2]):\n ax.text(0.00, 0.9, letter, transform=ax.transAxes, fontsize=bfs)\n\n fabricplotlib.a2plot(a2[:, :, 0], ax=axb1, cbr=False, show=False, levels=13)\n cm = fabricplotlib.a2plot(a2[:, :, 1], ax=axb2, cbr=False, show=False, levels=13)\n cbr = plt.colorbar(cm, cax=axb_cb, orientation='vertical')\n cbr.set_label(label=r'ODF($\\theta,\\phi$)', size=fs)\n # cbr.ax.set_xticklabels(['0', '1', '2'])\n cbr.ax.tick_params(axis='both', which='major', labelsize=fs)\n\n for letter, ax in zip('adefghijklmnop', (ax1, ax2, ax8)):\n ax.text(0.05, 0.85, letter, transform=ax.transAxes, fontsize=bfs)\n ax.tick_params(axis='both', which='major', labelsize=fs)\n\n fig.savefig('../plots/idealized_core_oop_td.png', dpi=300)\n fig.savefig('../plots/poster_idealized_core_oop_td.png', dpi=300, transparent=True)", "def set_ground_channel_fourier_modes(self, nxmax=None, nymax=None, auto=True, mode='analytic'):\n\n if mode == \"symbolic\":\n if nxmax is not None and nymax is not None:\n basis = contiguous_channel_basis(nxmax, nymax, self.scale_params.n)\n else:\n basis = None\n self.set_ground_modes(basis, auto)\n else:\n self._set_ground_analytic_fourier_modes(nxmax, nymax, auto)", "def set_udfs(self):\n\n flowcell_type = self.process.all_inputs()[0].udf.get('Flowcell Type')\n\n for key, val in self.process_settings[flowcell_type].items():\n self.process.udf[key] = val\n self.process.put()\n\n for art in self.artifacts:\n for key, val in self.artifact_settings[flowcell_type].items():\n art.udf[key] = val\n art.put()", "def outputFO(self, outfile):\n\n#fof(axiom_0,axiom,\n# ( ! [V2] : ? [V1]:\n# ( p(V2) | p(V1) ) & (~p(V2) | ~p(V1) ) & ( p(V2) | ~p(V1) ) )).\n# outfile.write(\"\"\"cnf(rule_true,axiom, p(1)).\n#cnf(rule_false,axiom, ~p(0)).\n#\"\"\")\n outfile.write(\"fof(quant,axiom,(\\n\\t\")\n for q in self.__quantifierList:\n\n if q.qtype == \"a\":\n outfile.write(\" ! \")\n elif q.qtype == \"e\":\n outfile.write(\" ? \")\n variables = [\"V%d\" % x for x in q.getVariableNames()]\n \n outfile.write(\"[ %s ] : \\n\\t\" % \",\".join(variables))\n clauselist = [] \n outfile.write(\" ( \\n\\t p(true) & ~p(false) & \\n \") \n for c in self.__clauseList:\n clause = []\n clause.append(\"( \")\n vlist = []\n for var in c.varlist:\n if var.inv:\n vlist.append(\"~p(V%s)\" % var.name)\n else:\n vlist.append(\" p(V%s)\" % var.name)\n clause.append(\" | \".join(vlist))\n clause.append(\") \")\n clauselist.append(\"\".join(clause))\n outfile.write(\"\\n\\t & \".join(clauselist))\n outfile.write(\" ) \");\n outfile.write(\"\\n)).\")", "def flip(self):\n \n if self.faceup:\n self.faceup = False\n else:\n self.faceup = True", "def ParallelFace(movablePlane: str, fixedPlane: str, flip: Boolean) -> \"Feature\":\n return Feature()", "def init_plot_force(nb_mus):\n # --- Curve graph --- #\n # app = pg.mkQApp(\"force\")\n # remote = []\n # layout = pg.LayoutWidget()\n # layout.resize(800, 800)\n # label = QtGui.QLabel()\n # box = []\n # rplt = []\n # row_count = 0\n # col_span = 4 if nb_mus > 8 else 8\n # for mus in range(nb_mus):\n # remote.append(rgv.RemoteGraphicsView())\n # remote[mus].pg.setConfigOptions(antialias=True)\n # app.aboutToQuit.connect(remote[mus].close)\n # box.append(QtGui.QCheckBox(f\"muscle_{mus}\"))\n # if mus >= 8:\n # layout.addWidget(box[mus], row=1, col=mus-8)\n # layout.addWidget(remote[mus], row=mus - 8 + 2, col=4, colspan=col_span)\n # else:\n # layout.addWidget(box[mus], row=0, col=mus)\n # layout.addWidget(remote[mus], row=mus + 2, col=0, colspan=col_span)\n # rplt.append(remote[mus].pg.PlotItem())\n # rplt[mus]._setProxyOptions(deferGetattr=True) ## speeds up access to rplt.plot\n # remote[mus].setCentralItem(rplt[mus])\n # layout.addWidget(label)\n # layout.show()\n # row_count += 1\n # return rplt, layout, app , box\n\n # --- Progress bar graph --- #\n # app = pg.mkQApp(\"force\")\n # layout = pg.LayoutWidget()\n # layout.resize(400, 800)\n # layout.move(0, 0)\n # box = []\n # rplt = []\n # row_count = 0\n # for mus in range(nb_mus):\n # rplt.append(QProgressBar())\n # rplt[mus].setMaximum(1000)\n # layout.addWidget(rplt[mus], row=mus, col=0)\n # layout.show()\n # row_count += 1\n # return rplt, layout, app\n\n # --- Bar graph --- #\n app = pg.mkQApp()\n layout = pg.plot()\n layout.resize(800, 800)\n rplt = pg.BarGraphItem(x=range(nb_mus), height=np.zeros((nb_mus)), width=0.3, brush=\"r\")\n layout.addItem(rplt)\n return rplt, layout, app", "def M20_graph(self,img,active_90,active_20):\n fake = num.zeros(shape=img.shape,type='Float32')\n fake[active_90] = 1. ; fake[active_20] = 2.\n id = self._getGraphId()\n figname = 'G_%s.eps' % id\n root = 'M20_%s' % id\n pngname = root + '.png' ; jpgname = root + '.jpg'\n epsname = root + '.eps'\n doStamp(fake,pngname,format='PNG',minhisto=0,maxhisto=100)\n Convert(pngname,jpgname)\n Painted = Paint(jpgname)\n Painted.load()\n text = 'M20=%5.2f' % self['M20']\n #Painted.Graffiti(text,commtextpos)\n Painted.save(jpgname)\n Painted.release()\n \n Convert(jpgname,epsname)\n os.system('rm %s %s' % (pngname,jpgname))\n self['figures']['M20'] = epsname\n self['figcomms']['M20'] = text", "def main():\n print_banner()\n params = read_steering()\n s, x, y, cur, theta = build_kinoshita()\n s, x, y, cur, theta = read_centerline(s, x, y, cur, theta)\n s, x, y, cur, theta = extend_centerline(s, x, y, cur, theta)\n for t in range(TSTEPS+1):\n cur, theta = tan2curv(s, x, y)\n cur_ori = np.copy(cur)\n cur = filter_curvature(cur, t)\n cur_flt = np.copy(cur)\n cur = lag(s, cur, t)\n cur_lag = np.copy(cur)\n beck_bed = build_beck(cur, s, t)\n allxyz = offset_all(x, y, beck_bed, t)\n if t == 0:\n write_xyz_file(allxyz)\n write_mesh_file(allxyz, beck_bed)\n oxbowxList, oxbowyList = [], []\n centerlinexList, centerlineyList = [], []\n if np.mod(t, GPRINT) == 0:\n centerlinexList.append(x)\n centerlineyList.append(y)\n mf.make_figure(x, y, allxyz, cur_ori, cur_flt, cur_lag, s, beck_bed,\n params, t, oxbowxList, oxbowyList, centerlinexList, centerlineyList)\n if t == TSTEPS:\n break\n s, x, y = migration(s, x, y, cur_flt, cur_lag, theta, t)\n s, x, y, oxbowx, oxbowy, found_cutoff = cutoff(s, x, y)\n s, x, y = smooth_centerline(x, y)\n s, x, y, cur, theta = resample_centerline(s, x, y)\n if found_cutoff:\n oxbowxList.append(oxbowx)\n oxbowyList.append(oxbowy)\n make_gif()\n job_done()", "def analysePhotometry(self, fsu='FSUA', channel=0, noFTK=False,\n plot=False, label='', normalized=True,\n xlims=None):\n\n t = self.raw['IMAGING_DATA_'+fsu].data.field('TIME')\n\n if noFTK: # check fluxes outside fringes\n if (fsu=='FSUB' and self.insmode=='NORMAL') or \\\n (fsu=='FSUA' and self.insmode=='SWAPPED'):\n w = np.where((np.interp(t, self.raw['OPDC'].data.field('TIME'),\n self.raw['OPDC'].data.field('STATE'))<5))\n else:\n w = np.where(np.interp(t, self.raw['DOPDC'].data.field('TIME'),\n self.raw['DOPDC'].data.field('STATE'))<5)\n print 'noFTK:', round(100*len(w[0])/float(len(t)), 3), '%'\n else:\n w = (range(len(t)),[])\n\n photA = self.raw['IMAGING_DATA_'+fsu].data.field('DATA1')[w[0],channel]-\\\n self.fsu_calib[(fsu, 'DARK')][channel,0]\n photB = self.raw['IMAGING_DATA_'+fsu].data.field('DATA2')[w[0],channel]-\\\n self.fsu_calib[(fsu, 'DARK')][channel,1]\n photC = self.raw['IMAGING_DATA_'+fsu].data.field('DATA3')[w[0],channel]-\\\n self.fsu_calib[(fsu, 'DARK')][channel,2]\n photD = self.raw['IMAGING_DATA_'+fsu].data.field('DATA4')[w[0],channel]-\\\n self.fsu_calib[(fsu, 'DARK')][channel,3]\n\n phot0 = (photA+photB+photC+photD)/4.0\n s0 = np.argsort(phot0)\n sA = np.argsort(photA)\n sB = np.argsort(photB)\n sC = np.argsort(photC)\n sD = np.argsort(photD)\n\n if plot:\n fig = plt.figure(0)\n fig.clf()\n if normalized:\n plt.hist(phot0/phot0.mean(), bins=50, normed=True,\n alpha=0.8, color='y')\n h = np.histogram(phot0/phot0.mean(), bins=50, normed=True)\n plt.xlabel('flux / mean(flux)')\n else:\n plt.hist(phot0, bins=50, normed=True, alpha=0.8, color='y')\n h = np.histogram(phot0, bins=50, normed=True)\n plt.xlabel('flux (ADU)')\n if not xlims is None:\n plt.xlim(xlims[0], xlims[1])\n plt.title(label)\n poissonDist = lambda x,p:\\\n poisson(p['m']*p['p']).pmf(np.int_(np.floor(x*p['p'])))*p['p'] +\\\n (x*p['p']-np.floor(x*p['p']))/\\\n (np.ceil(x*p['p'])-np.floor(x*p['p']))*\\\n (poisson(p['m']*p['p']).pmf(np.int_(np.ceil(x*p['p'])))*p['p'] -\n poisson(p['m']*p['p']).pmf(np.int_(np.floor(x*p['p'])))*p['p'])\n if not normalized:\n guess = {'m':phot0.mean(), 'p':1/10.}\n X = 0.5*(h[1][:-1]+h[1][1:])\n fit = dpfit.leastsqFit(poissonDist, X, guess, h[0])\n guess = fit['best']\n uncer = fit['uncer']\n chi2 = fit['chi2']\n model = fit['model']\n print 'POISSON: LAMBDA', guess['p']*guess['m']\n print 'POISSON: STD/MEAN', 1/np.sqrt(guess['p']*guess['m'])\n plt.plot(X, poissonDist(X, guess), '-r', linewidth=3,\n alpha=0.8, linestyle='dashed')\n return\n res = {'MEAN':[phot0.mean(), photA.mean(), photB.mean(),\n photC.mean(), photD.mean()],\n 'STD':[phot0.std(), photA.std(), photB.std(), photC.std(), photD.std()],\n '90-10':[phot0[s0[9*len(s0)/10]]-phot0[s0[len(s0)/10]],\n photA[sA[9*len(sA)/10]]-photA[sA[len(sA)/10]],\n photB[sB[9*len(sB)/10]]-photA[sB[len(sB)/10]],\n photC[sC[9*len(sC)/10]]-photA[sC[len(sC)/10]],\n photD[sD[9*len(sD)/10]]-photA[sD[len(sD)/10]]]}\n\n res['STD/MEAN'] = [res['STD'][k]/res['MEAN'][k] for k in range(5)]\n res['(90-10)/MEAN'] = [res['90-10'][k]/res['MEAN'][k] for k in range(5)]\n res['(90-10)/STD'] = [res['90-10'][k]/res['STD'][k] for k in range(5)]\n res['BEAMS']=['(A+B+C+D)/4', 'A', 'B', 'C', 'D']\n return res", "def calculateFFBox(qOfFlights):\n\t# if qOfFlights == 2: rows=2; columns=1\n\t# else:\n\tpass" ]
[ "0.6021151", "0.5678602", "0.556794", "0.5545283", "0.54808503", "0.54380274", "0.5351072", "0.53036", "0.5298335", "0.529772", "0.5294927", "0.52931446", "0.52901006", "0.52764875", "0.5263691", "0.5190619", "0.51681143", "0.512835", "0.5121012", "0.511816", "0.51176304", "0.5094344", "0.5074707", "0.50642455", "0.50568306", "0.50513107", "0.50437254", "0.50415325", "0.5037268", "0.5007386", "0.49609506", "0.4960482", "0.49598107", "0.49505663", "0.49492386", "0.49435523", "0.4932687", "0.49313653", "0.4929717", "0.49258718", "0.492363", "0.49170426", "0.4914391", "0.4908069", "0.49025708", "0.49005684", "0.48960692", "0.48872903", "0.48863626", "0.48834792", "0.4877197", "0.48684236", "0.48594153", "0.48591438", "0.4858252", "0.4856824", "0.48540312", "0.48531848", "0.48518002", "0.48457852", "0.4844704", "0.4844522", "0.48425364", "0.48345104", "0.48274767", "0.48244867", "0.48237404", "0.48210442", "0.48197678", "0.48191682", "0.48184925", "0.4815997", "0.48157093", "0.4813862", "0.48074025", "0.4806924", "0.480591", "0.48053625", "0.48046654", "0.47992635", "0.479821", "0.47956407", "0.47903422", "0.47813594", "0.47811824", "0.47775546", "0.4774301", "0.4767148", "0.47665587", "0.47648865", "0.47644547", "0.47637525", "0.4761063", "0.47574154", "0.47570783", "0.4756897", "0.47562245", "0.47556755", "0.47528082", "0.47492707", "0.47483274" ]
0.0
-1
Testing for a potential problem on SJD 56993, with gotoField not slewing when gang bypass had been set.
def test_goto_field_apogee_bypass_gangToCart(self): self._prep_bypass('gangToCart', clear=True) cmdState = self.actorState.gotoField cmdState.reinitialize(self.cmd) self._goto_feld_apogee(13, 44, 4, 0, cmdState)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_goto_field_apogee_no_guider(self):\n cmdState = self.actorState.gotoField\n cmdState.reinitialize(self.cmd)\n cmdState.doGuider = False\n self._goto_feld_apogee(3, 11, 0, 0, cmdState)", "def test_goto_field_apogee_no_slew(self):\n cmdState = self.actorState.gotoField\n cmdState.reinitialize(self.cmd)\n cmdState.doSlew = False\n self._goto_feld_apogee(9, 37, 0, 0, cmdState)", "def test_goto_field_cartridge_mismatch(self):\n\n sopTester.updateModel('guider', TestHelper.guiderState['bossLoaded'])\n\n mcpState = TestHelper.mcpState['boss_science']\n mcpState.update({'instrumentNum': [15]})\n sopTester.updateModel('mcp', mcpState)\n\n cmdState = self.actorState.gotoField\n cmdState.reinitialize(self.cmd)\n\n masterThread.goto_field(self.cmd, cmdState, myGlobals.actorState)\n self._check_cmd(0, 14, 0, 0, finish=True, didFail=True)", "def test_goto_field_apogee(self):\n cmdState = self.actorState.gotoField\n cmdState.reinitialize(self.cmd)\n self._goto_feld_apogee(13, 46, 0, 0, cmdState)", "def test_goto_field_apogee_no_slew_decenter_off(self):\n sopTester.updateModel('mcp', TestHelper.mcpState['all_off'])\n sopTester.updateModel('guider', TestHelper.guiderState['guiderOnDecenter'])\n cmdState = self.actorState.gotoField\n cmdState.reinitialize(self.cmd)\n cmdState.doSlew = False\n self._goto_feld_apogee(9, 37, 0, 0, cmdState)", "def test_goto_field_boss_ffs_open_fails(self):\n sopTester.updateModel('mcp', TestHelper.mcpState['all_off'])\n cmdState = self.actorState.gotoField\n cmdState.reinitialize(self.cmd)\n self.cmd.failOn = 'mcp ffs.open'\n self._goto_field_boss(21, 102, 1, 1, cmdState, didFail=True, finish=True)", "def test_goto_field_boss_flat_on_fails(self):\n sopTester.updateModel('mcp', TestHelper.mcpState['all_off'])\n cmdState = self.actorState.gotoField\n cmdState.reinitialize(self.cmd)\n self.cmd.failOn = 'mcp ff.on'\n self._goto_field_boss(16, 71, 0, 1, cmdState, didFail=True, finish=True)", "def test_goto_field_apogee_no_slew_shutter_open(self):\n sopTester.updateModel('apogee', TestHelper.apogeeState['B_open'])\n cmdState = self.actorState.gotoField\n cmdState.reinitialize(self.cmd)\n cmdState.doSlew = False\n self._goto_feld_apogee(10, 37, 0, 0, cmdState)", "def test_goto_field_boss_hartmann_blue_fails(self):\n\n sopTester.updateModel('mcp', TestHelper.mcpState['all_off'])\n sopTester.updateModel('hartmann', TestHelper.hartmannState['blue_fails'])\n\n cmdState = self.actorState.gotoField\n cmdState.reinitialize(self.cmd)\n\n self._goto_field_boss(12, 37, 0, 0, cmdState, didFail=True, finish=True)", "def fix_jump(self):\n pass", "def problem_298():\n pass", "def test_goto_field_boss_guider(self):\n sopTester.updateModel('mcp', TestHelper.mcpState['all_off'])\n sopTester.updateModel('guider', TestHelper.guiderState['guiderOnDecenter'])\n cmdState = self.actorState.gotoField\n cmdState.reinitialize(self.cmd)\n cmdState.doSlew = False\n cmdState.doHartmann = False\n cmdState.doCalibs = False\n cmdState.arcTime = 0\n cmdState.flatTime = 0\n self._goto_field_boss(9, 37, 0, 0, cmdState)", "def fix_bug(self):\n self.bugged = False\n self.error_prob = 0.0", "def test_goto_field_boss_slew(self):\n sopTester.updateModel('mcp', TestHelper.mcpState['all_off'])\n cmdState = self.actorState.gotoField\n cmdState.reinitialize(self.cmd)\n cmdState.doGuider = False\n cmdState.doHartmann = False\n cmdState.doCalibs = False\n cmdState.arcTime = 0\n cmdState.flatTime = 0\n self._goto_field_boss(3, 26, 0, 0, cmdState)", "def test_one_trick_pony(self):\n self.validate_goal_for('game-20120625-114828-af02f875.html',\n u'WanderingWinder',\n 'OneTrickPony')", "def test_invalid_field_access(self):\r\n out = self.c.post(self.url, {'op': 'delete hints', 'field': 'all your private data'})\r\n print out\r\n self.assertTrue('an invalid field was accessed' in out.content)", "def experiment3():\n raise FAKE_ERROR", "def op_jump_preconditions(self,piece):\n\n # Flag must be FREE or JUMP\n if(self.next_move == self.CAPT or self.next_move == self.ADDPIECE_1 or self.next_move == self.ADDPIECE_2):\n return False\n\n # Check if the piece is in the next pieces (deals with obligatory jumps)\n if(self.next_move == self.JUMP and piece not in self.next_pieces):\n return False\n\n return True", "def _test_out_of_range(self):\n self.cdbconf.setup('KKG')\n self.cdbconf.setConfiguration('CUSTOM_OPT')\n az, el, latitude = [radians(50)] * 3\n site_info = {'latitude': latitude}\n self.p.setup(site_info, self.source, self.device)\n self.p.setRewindingMode('AUTO')\n offset = 20\n max_limit = self.device.getMaxLimit() \n min_limit = self.device.getMinLimit()\n Pis = max_limit - offset/2\n time.sleep(0.2) if self.using_mock else time.sleep(3)\n self.p.setPosition(Pis)\n time.sleep(0.2) # Wait a bit for the setup\n max_rewinding_steps = (max_limit - min_limit) // self.device.getStep()\n expected = Pis - max_rewinding_steps*self.device.getStep() + offset\n self.source.setAzimuth(az)\n self.source.setElevation(el)\n self.p.startUpdating('MNG_TRACK', 'ANT_NORTH', az, el, None, None)\n time.sleep(0.2) if self.using_mock else time.sleep(3)\n self.p.setOffset(offset)\n time.sleep(0.2) if self.using_mock else time.sleep(3)\n self.assertEqual(self.device.getActPosition(), expected)", "def test_goto_field_boss_hartmann(self):\n sopTester.updateModel('mcp', TestHelper.mcpState['all_off'])\n cmdState = self.actorState.gotoField\n cmdState.reinitialize(self.cmd)\n cmdState.doSlew = False\n cmdState.doCalibs = False\n cmdState.arcTime = 0\n cmdState.flatTime = 0\n cmdState.doGuider = False\n self._goto_field_boss(5, 29, 0, 0, cmdState)", "def violated(self) -> bool:\n ...", "def test_02_visit_again(self):", "def test_73_break(self):\n\t\tinput = \"\"\"procedure main(); var x:integer;\n\t\tbegin while(true)do begin\n\t\twith x:integer; do with x:real; do begin\n\t\t\tif (x>0) then break;\n\t\t\twith x:integer; do if (x=0) then break; else return;\n\t\tend{with} end{while} foo(); end\"\"\"\n\t\texpect = \"Undeclared Procedure: foo\"\n\t\tself.assertTrue(TestChecker.test(input,expect,473))", "def test_check_problem_other_errors(self):\r\n # Create the module\r\n module = CapaFactory.create(attempts=1)\r\n\r\n # Ensure that the user is NOT staff\r\n module.system.user_is_staff = False\r\n\r\n # Ensure that DEBUG is on\r\n module.system.DEBUG = True\r\n\r\n # Simulate answering a problem that raises the exception\r\n with patch('capa.capa_problem.LoncapaProblem.grade_answers') as mock_grade:\r\n error_msg = u\"Superterrible error happened: ☠\"\r\n mock_grade.side_effect = Exception(error_msg)\r\n\r\n get_request_dict = {CapaFactory.input_key(): '3.14'}\r\n result = module.check_problem(get_request_dict)\r\n\r\n # Expect an AJAX alert message in 'success'\r\n self.assertTrue(error_msg in result['success'])", "def condition_forward_checking(csp, var) :\n return False", "def condition_forward_checking(csp, var) :\n return False", "def test_gethint_0hint(self):\r\n mock_module = CHModuleFactory.create()\r\n json_in = {'problem_name': '26.0'}\r\n out = mock_module.get_hint(json_in)\r\n print mock_module.previous_answers\r\n self.assertTrue(out is None)\r\n self.assertTrue('26.0' in mock_module.user_submissions)", "def test_handle_flag_error(self):\n pass", "def test_2_bogus_input(self) :\n self.banner(\"Testing what your program does with bogus input.\")\n filename = self.find_file('project9.py')\n self.assertIsNotNone(filename, \"I can't find your project file (project9.py)\")\n\n with open('logs/test_2_bogus_input.out', 'a') as log :\n test = pexpect.spawnu('python \"' + filename.as_posix() + '\"', logfile=log, encoding='utf-8')\n turns = random.randrange(200, 100000)\n test.sendline('this')\n test.sendline('is')\n test.sendline('bogus')\n test.sendline('-10')\n test.sendline('100001')\n test.sendline(str(turns))\n try : \n got = test.expect([pexpect.EOF, '(\\d+\\.\\d+)\\s*%'], timeout=5)\n switch_percent = float(test.match.group(1))\n got = test.expect([pexpect.EOF, '(\\d+\\.\\d+)\\s*%'], timeout=5)\n stay_percent = float(test.match.group(1))\n except :\n self.fail(\"The grader program failed to parse the output of your project.\")\n\n if not (64 < switch_percent < 68) :\n self.fail('Your switch percentage ({}) is out of range.'.format(switch_percent))\n if not (31 < stay_percent < 35) :\n self.fail('Your stay percentage ({}) is out of range.'.format(stay_percent))\n test.close()", "def test_goto_field_boss_calibs(self):\n sopTester.updateModel('mcp', TestHelper.mcpState['all_off'])\n cmdState = self.actorState.gotoField\n cmdState.reinitialize(self.cmd)\n cmdState.doSlew = False\n cmdState.doHartmann = False\n cmdState.doGuider = False\n self._goto_field_boss(10, 57, 0, 0, cmdState)", "def problem_statement():\n pass", "def event_m20_11_x79():\n \"\"\"State 0,1: Key guide access standby\"\"\"\n IsObjSearched(0, 20111200)\n assert ConditionGroup(0)\n \"\"\"State 2: Are the conditions met?\"\"\"\n IsChrDead(0, 20)\n IsChrDead(0, 25)\n ComparePlayerSinLevel(0, 2, 0)\n IsPlayerHollow(0, 0, 1)\n # goods:60151000:Human Effigy\n DoesPlayerHaveItem(0, 60151000, 1, 3, 1, 1, 1)\n # goods:60151000:Human Effigy\n IsItemLeft(0, 60151000, 1)\n if ConditionGroup(0):\n \"\"\"State 4: Nothing happens\"\"\"\n return 1\n else:\n \"\"\"State 3: End state\"\"\"\n return 0", "def checkGlitches(self):\n cP = self.getKeyword('ISS PRI MET C') # in m/s\n dnuP = self.getKeyword('ISS PRI MET F_SHIFT')*1e6 # in Hz\n nuP = self.getKeyword('ISS PRI MET LASER_F')\n #self.jump = (cP*dnuP/2/(nuP**2))*(2**24-1) # PRIMET jump in m, COMM14\n self.metJumpSize = (cP*dnuP/2/(nuP**2))*(2**31-1) # PRIMET jump in m\n\n relevant_keywords = filter(lambda x: 'NGLIT' in x and\n 'START' in x,\n self.raw[0].header.keys())\n relevant_keywords = [k.split()[4] for k in relevant_keywords]\n glitches = {}\n glitchesStartEnd = {}\n for k in relevant_keywords:\n glitches[k] = self.getKeyword('ISS PRI MET '+k+' END')-\\\n self.getKeyword('ISS PRI MET '+k+' START')\n glitchesStartEnd[k] = (self.getKeyword('ISS PRI MET '+k+' START'),\n self.getKeyword('ISS PRI MET '+k+' END'))\n self.glitches = glitches\n self.glitchesStartEnd = glitchesStartEnd\n if 'NGLITAB' in glitches.keys():\n if glitches['NGLITAB'] !=0:\n print '*SERIOUS WARNING*', glitches['NGLITAB'],\\\n 'glitches in PRIMET A-B in this file'\n else:\n print '*WARNING*: could not assess glitches in A-B'\n\n if 'NGLITB' in glitches.keys():\n if glitches['NGLITB'] !=0:\n print '*SERIOUS WARNING*', glitches['NGLITB'],\\\n 'glitches in PRIMET -B in this file'\n else:\n print 'WARNING: could not assess glitches in -B'\n\n if glitches['NGLITABFCO'] !=0:\n print 'WARNING: AB overflow!', glitches['NGLITABFCO']\n if glitches['NGLITBFCO'] !=0:\n print 'WARNING: -B overflow!', glitches['NGLITBFCO']\n self.glitches = glitches\n return", "def test_debugger_api_boundary_condition(self):\n self.dbg.HandleCommand(None)\n self.dbg.SetDefaultArchitecture(None)\n self.dbg.GetScriptingLanguage(None)\n self.dbg.CreateTarget(None)\n self.dbg.CreateTarget(None, None, None, True, lldb.SBError())\n self.dbg.CreateTargetWithFileAndTargetTriple(None, None)\n self.dbg.CreateTargetWithFileAndArch(None, None)\n self.dbg.FindTargetWithFileAndArch(None, None)\n self.dbg.SetInternalVariable(None, None, None)\n self.dbg.GetInternalVariableValue(None, None)\n # FIXME (filcab): We must first allow for the swig bindings to know if\n # a Python callback is set. (Check python-typemaps.swig)\n # self.dbg.SetLoggingCallback(None)\n self.dbg.SetPrompt(None)\n self.dbg.SetCurrentPlatform(None)\n self.dbg.SetCurrentPlatformSDKRoot(None)\n \n fresh_dbg = lldb.SBDebugger()\n self.assertEquals(len(fresh_dbg), 0)", "def breakCheck(self, procname, name):\n\n pat = 'VkExternalFenceFeatureFlagBits'\n if name[0:len(pat)] == pat:\n print('{}(name = {}) matches {}'.format(procname, name, pat))\n import pdb\n pdb.set_trace()", "def com_google_fonts_check_038(font, fontforge_check_results):\n\n filtered_err_msgs = \"\"\n for line in fontforge_check_results[\"ff_err_messages\"].split('\\n'):\n if ('The following table(s) in the font'\n ' have been ignored by FontForge') in line:\n continue\n if \"Ignoring 'DSIG' digital signature table\" in line:\n continue\n filtered_err_msgs += line + '\\n'\n\n if len(filtered_err_msgs.strip()) > 0:\n yield WARN, (\"FontForge seems to dislike certain aspects of this font file.\"\n \" The actual meaning of the log messages below is not always\"\n \" clear and may require further investigation.\\n\\n\"\n \"{}\").format(filtered_err_msgs)\n else:\n yield PASS, \"FontForge validation did not output any error message.\"", "def check_vulnerability(self):\n\t\tpass", "def test_is_solved_when_puzzle_is_not_solved(self):\n sudoku = sudolver.Sudoku()\n self.assertFalse(sudoku.is_solved())", "def jumped_on(self):\r\n pass", "def handle_crash(self):\n if self.crash_test[0]:\n # assert self.alive, 'Something is wrong, dead bird is dying again'\n self.alive = False\n # assign the fitness\n self.genome.fitness = self.get_fitness()\n self.crash_test = False, False", "def check_potentially_fake(self):\n # Check if the second group of numbers is different than 0000\n if(self.code[5:9] != \"0000\"):\n self.filters |= Filters.PotentiallyFake", "def test_74_break(self):\n\t\tinput = \"\"\"procedure main(); var x:integer;\n\t\tbegin while(true)do begin\n\t\twith x:integer; do with x:real; do begin\n\t\t\tif (x>0) then break;\n\t\t\twith x:integer; do if (x=0) then return;\n\t\tend{with} break; end{while} break; end\"\"\"\n\t\texpect = \"Break Not In Loop\"\n\t\tself.assertTrue(TestChecker.test(input,expect,474))", "def test_match_right_none():\r\n runmatch(lcode_right_none)", "def test_kyc_post_legal(self):\n pass", "def check_angry(self):\n if self.die_a.value == 3 and self.die_b.value == 3:\n print(\"WOW, you're ANGRY!\\nTime to go back to Stage 1!\")\n self.current_stage = 1", "def test_check_solved():\n game = Game()\n game.word = 'word'\n game.pattern = 'word'\n game.check_solved()\n assert game.solved is True", "def introduce_bug(self, error_prob):\n self.bugged = True\n self.error_prob = error_prob", "def test_nonlocal_pauli_error_gate_25percent(self):\n qr = QuantumRegister(3, 'qr')\n cr = ClassicalRegister(3, 'cr')\n circuit = QuantumCircuit(qr, cr)\n circuit.cx(qr[0], qr[1])\n circuit.barrier(qr)\n circuit.cx(qr[1], qr[0])\n circuit.barrier(qr)\n circuit.measure(qr, cr)\n backend = QasmSimulator()\n shots = 2000\n # test noise model\n error = pauli_error([('XII', 0.25), ('III', 0.75)])\n noise_model = NoiseModel()\n noise_model.add_nonlocal_quantum_error(error, 'cx', [0, 1], [0, 1, 2])\n # Execute\n target = {'0x0': 3 * shots / 4, '0x4': shots / 4}\n circuit = transpile(circuit, basis_gates=noise_model.basis_gates)\n qobj = assemble([circuit], backend, shots=shots)\n result = backend.run(qobj, noise_model=noise_model).result()\n self.is_completed(result)\n self.compare_counts(result, [circuit], [target], delta=0.05 * shots)", "def test_step_gol():\n test_field = torch.tensor([[0, 1, 1, 1], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0]])\n\n # Checking that the game works properly\n assert torch.all(torch.eq(step(test_field), torch.tensor([[0, 1, 1, 1],\n [0, 0, 1, 0],\n [0, 0, 0, 0],\n [0, 0, 1, 0]])))\n return", "def test_match_left_none():\r\n runmatch(lcode_left_none)", "def check_scroll_to_problem(self, step):\r\n annotation_input_selector = self.active_problem_selector('.annotation-input')\r\n assert_true(world.css_visible(annotation_input_selector))", "def solveOneStep(self):\n ### Student code goes here\n return True", "def checkReissues(self):\n return None", "def verify_hack_key(self):\r\n\t\tself.percent_english = Dict_Control(self.my_code).check_key()\r\n\t\t#If more than half the words are english, the key will pass. \r\n\t\tif self.percent_english > 50:\r\n\t\t\tself.hack_plausible = True", "def test_cadastros_de_registros_no_site_rpa_challenge():", "def _is_goto(self, words):\n if words[0] == 'goto':\n if len(words) != 2:\n raise SyntaxError(\"File line {}: Invalid number of arguments for C_GOTO command.\".format(self._file_line))\n return True\n else:\n return False", "def test_invalid_link(self):\r\n\r\n # Setup the peer grading module with no linked locations.\r\n peer_grading = self._create_peer_grading_with_linked_problem(self.coe_location, valid_linked_descriptor=False)\r\n\r\n self.assertFalse(peer_grading.use_for_single_location_local)\r\n self.assertTrue(peer_grading.use_for_single_location)", "def test_double_corrupt(pid: int, otId: int) -> bool:\n box_mon = BoxMon()\n box_mon.personality = pid\n box_mon.otId = otId\n box_mon.sub(0).type0.species = 308\n box_mon.sub(0).type0.experience = 2195\n box_mon.sub(0).type0.friendship = 70\n sub1 = box_mon.sub(1).type1\n sub1.moves[0] = 33\n sub1.moves[1] = 253\n sub1.moves[2] = 185\n sub1.pp[0] = 35\n sub1.pp[1] = 10\n sub1.pp[2] = 20\n sub2 = box_mon.sub(2).type2\n sub2.attackEV = 22\n sub2.hpEV = 8\n sub3 = box_mon.sub(3).type3\n sub3.metLocation = 28\n sub3.metLevel = 14\n sub3.metGame = 3\n sub3.pokeBall = 2\n sub3.otGender = 1\n sub3.unk = 977594907\n box_mon.checksum = box_mon.calc_checksum()\n sum1 = box_mon.checksum\n box_mon.encrypt()\n box_mon.personality |= 0x40000000\n box_mon.decrypt()\n sum2 = box_mon.calc_checksum()\n box_mon.encrypt()\n box_mon.otId |= 0x40000000\n box_mon.decrypt()\n sum3 = box_mon.calc_checksum()\n if sum1 == sum2 == sum3 and box_mon.sub(3).type3.isEgg == 0:\n box_mon.encrypt()\n return True\n return False", "def test_is_solved_when_puzzle_is_solved(self):\n self.assertTrue(self.sudoku.is_solved())", "def test_staff_inputs_expressions_legacy(self):\r\n problem = self.build_problem(answer=\"1+1j\", tolerance=1e-3)\r\n self.assert_grade(problem, '1+j', 'correct')", "def broken_refers(one_experiment, storage):\n ensure_deterministic_id(\n \"test_single_exp\", storage, update=dict(refers={\"oups\": \"broken\"})\n )", "def test(self):\n # 0x13 is nop\n self.gdb.command(\"p *((int*) 0x%x)=0x13\" % self.target.ram)\n self.gdb.command(\"p *((int*) 0x%x)=0x13\" % (self.target.ram + 4))\n self.gdb.command(\"p *((int*) 0x%x)=0x13\" % (self.target.ram + 8))\n self.gdb.p(\"$pc=0x%x\" % self.target.ram)\n self.gdb.stepi()\n assertEqual((self.target.ram + 4), self.gdb.p(\"$pc\"))\n self.gdb.stepi()\n assertEqual((self.target.ram + 8), self.gdb.p(\"$pc\"))", "def test(did_pass):\n linenum = sys._getframe(1).f_lineno\n if did_pass:\n msg = 'Test at line {0} ok.'.format(linenum)\n else:\n msg = 'Test at line {0} FAILED.'.format(linenum)\n print(msg)", "def test_72_break(self):\n\t\tinput = \"\"\"procedure main(); var x:integer;\n\t\tbegin while(true)do begin\n\t\twith x:integer; do with x:real; do begin\n\t\t\tif (x>0) then break;\n\t\t\twith x:integer; do break;\n\t\tend{with} end{with} foo(); end\"\"\"\n\t\texpect = \"Undeclared Procedure: foo\"\n\t\tself.assertTrue(TestChecker.test(input,expect,472))", "def skip_test(n):\n return k > 0 and magic * n * k**0.5 >= t4_ref", "def check():", "def test(did_pass):\r\n linenum = sys._getframe(1).f_lineno # Get the caller's line number\r\n if did_pass:\r\n msg = \"Test at line {0} is ok\".format(linenum)\r\n else:\r\n msg = \"Test at line {0} is FAILED\".format(linenum)\r\n print(msg)", "def test(did_pass):\n linenum = sys._getframe(1).f_lineno # Get the caller's line number.\n if did_pass:\n msg = \"Test at line {0} ok.\".format(linenum)\n else:\n msg = (\"Test at line {0} FAILIED.\".format(linenum))\n print(msg)", "def test(did_pass):\n linenum = sys._getframe(1).f_lineno # Get the caller's line number.\n if did_pass:\n msg = \"Test at line {0} ok.\".format(linenum)\n else:\n msg = (\"Test at line {0} FAILIED.\".format(linenum))\n print(msg)", "def testoptdone(self):\r\n assert self.data.optdone\r\n assert numpy.all(numpy.abs(self.data.geovalues[-1]) <= self.data.geotargets)", "def event_m20_11_15040():\n \"\"\"State 0,2: [Preset] Photoworm_Frog_SubState reacts to enemy and PC approach\"\"\"\n assert (event_m20_11_x117(z13=5, z14=4220, z15=20114610, z16=211000016, z17=16010, z18=211000081,\n z19=802))\n \"\"\"State 1: Finish\"\"\"\n EndMachine()", "def TestPercentWrong(times, percent_wrong):\n print \"Testing %d%% wrong.\" % (percent_wrong)\n cycle_at = 100. * LIMIT/(100 - percent_wrong)\n if cycle_at != round(cycle_at, 0):\n raise ValueError, (\n \"Inputs will not produce a fair comparison.\")\n cycle_at = int(cycle_at)\n # pychecker complains that i is unused below! \n for i, j in enumerate(range(times)): \n j %= cycle_at\n TryWay(j)\n TestWay(j)\n TestWay2(j)", "def test_block_bad_state(self):\n pass", "def errorCheck(self):\n\t\twhile 1:\n #check for bad state\n\t\t\tif epics.caget(self.error_bypass) == 1:\n\t\t\t\tout_msg=\"Bypass flag is TRUE\"\n elif epics.caget(self.error_bcs) != 1:\n out_msg=\"BCS tripped\"\n elif epics.caget(self.error_mps) != 0:\n out_msg=\"MPS tripped\"\n elif epics.caget(self.error_gaurdian) != 0:\n out_msg=\"Gaurdian tripped\"\n\t\t\n #elif epics.caget(self.error_und_tmit) < 5.0e7:\n # out_msg=\"UND Tmit Low\"\n else:\n out_msg='Everything Okay'\n\n #exit if the stop button is set\n #if not self.mi.getter.caget(\"SIOC:SYS0:ML03:AO702\"):\n\t\t\tif not epics.caget(\"SIOC:SYS0:ML03:AO702\"):\n break\n\n #set the error check message\n epics.caput (\"SIOC:SYS0:ML00:CA000\",out_msg)\n print out_msg\n\n #break out if error check is bypassed\n if (out_msg==\"Bypass flag is TRUE\"):\n break\n\n #break out if everything is okay\n if (out_msg==\"Everything Okay\"):\n epics.caput(self.error_tripped,0)\n break\n\t\t\t\t#return\n else:\n epics.caput(self.error_tripped,1)\n time.sleep(0.1)", "def com_google_fonts_check_038(font, fontforge_check_results):\n if \"skip\" in fontforge_check_results:\n yield SKIP, fontforge_check_results[\"skip\"]\n return\n\n filtered_err_msgs = \"\"\n for line in fontforge_check_results[\"ff_err_messages\"].split('\\n'):\n if ('The following table(s) in the font'\n ' have been ignored by FontForge') in line:\n continue\n if \"Ignoring 'DSIG' digital signature table\" in line:\n continue\n filtered_err_msgs += line + '\\n'\n\n if len(filtered_err_msgs.strip()) > 0:\n yield WARN, (\"FontForge seems to dislike certain aspects of this font file.\"\n \" The actual meaning of the log messages below is not always\"\n \" clear and may require further investigation.\\n\\n\"\n \"{}\").format(filtered_err_msgs)\n else:\n yield PASS, \"FontForge validation did not output any error message.\"", "def test_case_01(self):\n if True:\n self.fail()", "def com_google_fonts_check_037(font):\n\n # In some cases we want to override the severity level of\n # certain checks in FontValidator:\n downgrade_to_warn = [\n # There are reports that this fontval check has an out-of-date\n # understanding of valid bits in fsSelection.\n # More info at:\n # https://github.com/googlei18n/fontmake/issues/414#issuecomment-379408127\n \"There are undefined bits set in fsSelection field\",\n\n # FIX-ME: Why did we downgrade this one to WARN?\n \"Misoriented contour\"\n ]\n\n # Some other checks we want to completely disable:\n disabled_fval_checks = [\n # FontVal E4012 thinks that\n # \"Versions 0x00010000 and 0x0001002 are currently\n # the only defined versions of the GDEF table.\"\n # but the GDEF chapter of the OpenType specification at\n # https://docs.microsoft.com/en-us/typography/opentype/spec/gdef\n # describes GDEF header version 1.3, which is not yet recognized\n # by FontVal, thus resulting in this spurious false-FAIL:\n \"The version number is neither 0x00010000 nor 0x0001002\",\n\n # These messages below are simply fontval given user feedback\n # on the progress of runnint it. It has nothing to do with\n # actual issues on the font files:\n \"Validating glyph with index\",\n \"Table Test:\",\n\n # No software is affected by Mac strings nowadays.\n # More info at: googlei18n/fontmake#414\n \"The table doesn't contain strings for Mac platform\",\n \"The PostScript string is not present for both required platforms\",\n\n # Font Bakery has got a native check for the xAvgCharWidth field\n # which is: com.google.fonts/check/034\n \"The xAvgCharWidth field does not equal the calculated value\",\n\n # The optimal ordering suggested by FVal check W0020 seems to only be\n # relevant to performance optimizations on old versions of Windows\n # running on old hardware. Since such performance considerations\n # are most likely negligible, we're not going to bother users with\n # this check's table ordering requirements.\n # More info at:\n # https://github.com/googlefonts/fontbakery/issues/2105\n \"Tables are not in optimal order\",\n\n # Font Bakery has its own check for required/optional tables:\n # com.google.fonts/check/052 - \"Font contains all required tables?\"\n \"Recommended table is missing\"\n ]\n\n # There are also some checks that do not make\n # sense when we're dealing with variable fonts:\n VARFONT_disabled_fval_checks = [\n # Variable fonts typically do have lots of self-intersecting\n # contours because they are used to draw each portion\n # of variable glyph features.\n \"Intersecting contours\",\n \"Intersecting components of composite glyph\",\n\n # DeltaFormat = 32768 (same as 0x8000) means VARIATION_INDEX,\n # according to https://docs.microsoft.com/en-us/typography/opentype/spec/chapter2\n # The FontVal problem description for this check (E5200) only mentions\n # the other values as possible valid ones. So apparently this means FontVal\n # implementation is not up-to-date with more recent versions of the OpenType spec\n # and that's why these spurious FAILs are being emitted.\n # That's good enough reason to mute it.\n # More info at:\n # https://github.com/googlefonts/fontbakery/issues/2109\n \"The device table's DeltaFormat value is invalid\"\n ]\n\n from fontTools.ttLib import TTFont\n if is_variable_font(TTFont(font)):\n disabled_fval_checks.extend(VARFONT_disabled_fval_checks)\n\n try:\n import subprocess\n fval_cmd = [\n \"FontValidator\", \"-file\", font, \"-all-tables\",\n \"-report-in-font-dir\", \"-no-raster-tests\"\n ]\n subprocess.check_output(fval_cmd, stderr=subprocess.STDOUT)\n except subprocess.CalledProcessError as e:\n filtered_msgs = \"\"\n for line in e.output.decode().split(\"\\n\"):\n disable_it = False\n for substring in disabled_fval_checks:\n if substring in line:\n disable_it = True\n if not disable_it:\n filtered_msgs += line + \"\\n\"\n yield INFO, (\"Microsoft Font Validator returned an error code.\"\n \" Output follows :\\n\\n{}\\n\").format(filtered_msgs)\n except (OSError, IOError) as error:\n yield ERROR, (\"Mono runtime and/or \"\n \"Microsoft Font Validator are not available!\")\n raise error\n\n def report_message(msg, details):\n if details:\n if isinstance(details, list) and len(details) > 1:\n # We'll print lists with one item per line for\n # improved readability.\n if None in details:\n details.remove(None)\n\n # A designer will likely not need the full list\n # in order to fix a problem.\n # Showing only the 10 first ones is more than enough\n # and helps avoid flooding the report.\n if len(details) > 25:\n num_similar = len(details) - 10\n details = details[:10]\n details.append(f\"NOTE: {num_similar} other similar\"\n \" results were hidden!\")\n details = '\\n\\t- ' + '\\n\\t- '.join(details)\n return f\"MS-FonVal: {msg} DETAILS: {details}\"\n else:\n return f\"MS-FonVal: {msg}\"\n\n xml_report_file = f\"{font}.report.xml\"\n html_report_file = f\"{font}.report.html\"\n fval_file = os.path.join(os.path.dirname(font), 'fval.xsl')\n\n grouped_msgs = {}\n with open(xml_report_file, \"rb\") as xml_report:\n import defusedxml.lxml\n doc = defusedxml.lxml.parse(xml_report)\n\n for report in doc.iter('Report'):\n msg = report.get(\"Message\")\n details = report.get(\"Details\")\n\n disable_it = False\n for substring in disabled_fval_checks:\n if substring in msg:\n disable_it = True\n if disable_it:\n continue\n\n if msg not in grouped_msgs:\n grouped_msgs[msg] = {\"errortype\": report.get(\"ErrorType\"),\n \"details\": [details]}\n else:\n if details not in grouped_msgs[msg][\"details\"]:\n # avoid cluttering the output with tons of identical reports\n # yield INFO, 'grouped_msgs[msg][\"details\"]: {}'.format(grouped_msgs[msg][\"details\"])\n grouped_msgs[msg][\"details\"].append(details)\n\n # ---------------------------\n # Clean-up generated files...\n os.remove(xml_report_file)\n # FontVal internal detail: HTML report generated only on non-Windows due to\n # Mono or the used HTML renderer not being able to render XML with a\n # stylesheet directly. https://github.com/googlefonts/fontbakery/issues/1747\n if os.path.exists(html_report_file):\n os.remove(html_report_file)\n os.remove(fval_file)\n\n # ---------------------------\n # Here we start emitting the grouped log messages\n for msg, data in grouped_msgs.items():\n # But before printing we try to make the \"details\" more\n # readable. Otherwise the user would get the text terminal\n # flooded with messy data.\n\n # No need to print is as a list if wereally only\n # got one log message of this kind:\n if len(data[\"details\"]) == 1:\n data[\"details\"] = data[\"details\"][0]\n\n # Simplify the list of glyph indices by only displaying\n # their numerical values in a list:\n for glyph_index in [\"Glyph index \", \"glyph# \"]:\n if data[\"details\"] and \\\n data[\"details\"][0] and \\\n glyph_index in data[\"details\"][0]:\n try:\n data[\"details\"] = {'Glyph index': [int(x.split(glyph_index)[1])\n for x in data[\"details\"]]}\n break\n except ValueError:\n pass\n\n # And, finally, the log messages are emitted:\n if data[\"errortype\"] == \"P\":\n yield PASS, report_message(msg, data[\"details\"])\n\n elif data[\"errortype\"] == \"E\":\n status = FAIL\n for substring in downgrade_to_warn:\n if substring in msg:\n status = WARN\n yield status, report_message(msg, data[\"details\"])\n\n elif data[\"errortype\"] == \"W\":\n yield WARN, report_message(msg, data[\"details\"])\n\n else:\n yield INFO, report_message(msg, data[\"details\"])", "def isJump(self) -> bool:\n ...", "def event_m20_11_x32(z104=20111500):\n \"\"\"State 0,1: [Private] [Asynchronous] [Reproduction] Bug key _SubState\"\"\"\n call = event_m20_11_x26(z104=z104)\n if call.Get() == 1:\n \"\"\"State 7: End of reproduction\"\"\"\n return 0\n elif call.Get() == 2:\n \"\"\"State 5: [Private] [Asynchronous] [Condition] Guest Bug Key_SubState\"\"\"\n assert event_m20_11_x30(z104=z104)\n \"\"\"State 6: [Lib] [Execution] Dummy_SubState\"\"\"\n assert event_m20_11_x31()\n elif call.Done():\n \"\"\"State 2: [Private] [Asynchronous] [Condition] Bug key _SubState\"\"\"\n # goods:60536000:Pharros' Lockstone\n call = event_m20_11_x27(z104=z104, mode2=1, goods3=60536000)\n if call.Get() == 0:\n \"\"\"State 3: [Private] [Asynchronous] [Execution] Use bug key _SubState\"\"\"\n # goods:60536000:Pharros' Lockstone\n assert event_m20_11_x28(z104=z104, z106=38, z107=12, z108=1, goods2=60536000)\n elif call.Done():\n \"\"\"State 4: [Private] [Asynchronous] [Execution] Insect key unavailable dialog _SubState\"\"\"\n # goods:60536000:Pharros' Lockstone\n event_m20_11_x29(z104=z104, z105=1, goods1=60536000)\n Quit()\n \"\"\"State 8: End of execution\"\"\"\n return 1", "def exercise_b2_53():\r\n pass", "def WinCheck(field):\n for i in range(len(field)):\n if field[i] == 2048:\n return True\n return False", "def step_solution(self):\n import time, random\n time.sleep(1.0)\n print '(step_solution) Implement me!'\n return True if random.random() < 0.25 else False", "def op_jump_postconditions(self,oldPieceCoords,newPieceCoords):\n\n # Start of new state constrution\n next_gs_board = Board.from_binary_matrix(self.board)\n next_gs_board.set_element(newPieceCoords[0], newPieceCoords[1], self.curr_player)\n next_gs_board.remove_element(oldPieceCoords[0], oldPieceCoords[1])\n next_gs_next_player = self.curr_player\n next_gs_next_move = self.FREE\n next_gs_next_pieces = set()\n\n\n new_gs = Eximo(next_gs_next_player,next_gs_next_move,next_gs_next_pieces,next_gs_board)\n\n # Check if moved piece has reached opposite side\n if(new_gs.reach_otherside(newPieceCoords)):\n new_gs.board.remove_element(newPieceCoords[0], newPieceCoords[1])\n new_gs.next_move = self.ADDPIECE_2\n new_gs.next_pieces = new_gs.addition_viable_tiles()\n new_gs.perform_checkup()\n\n # Check if the next move must also be a jump by the same player\n elif(new_gs.can_jump(newPieceCoords)):\n new_gs.next_move = self.JUMP\n new_gs.next_pieces = {newPieceCoords}\n\n else:\n new_gs.curr_player = self.get_enemy(self.curr_player)\n\n # Check if the next_piece checkup needs to be made\n if new_gs.curr_player == self.get_enemy(self.curr_player):\n new_gs.perform_checkup()\n\n new_gs.last_piece = newPieceCoords\n\n return new_gs", "def test_77_continue(self):\n\t\tinput = \"\"\"procedure main(); var x:integer;\n\t\tbegin while(true)do begin\n\t\twith x:integer; do with x:real; do begin\n\t\t\tif (x>0) then continue;\n\t\t\twith x:integer; do if (x=0) then continue; else return;\n\t\tend{with} end{while} foo(); end\"\"\"\n\t\texpect = \"Undeclared Procedure: foo\"\n\t\tself.assertTrue(TestChecker.test(input,expect,477))", "def test_jump_to_location_shift(self, cpu):\n for shift in range(0x0, 0xFFF):\n cpu.opcode = 0xB000 | shift\n cpu.jump_to_location_shift()\n cpu.program_counter += 2\n assert(cpu.program_counter == cpu.V_register[0] + shift)", "def test_27(self):\n assert 'False' == Api.requestBlock('test-27')", "def test_78_continue(self):\n\t\tinput = \"\"\"procedure main(); var x:integer;\n\t\tbegin while(true)do begin\n\t\twith x:integer; do with x:real; do begin\n\t\t\tif (x>0) then continue;\n\t\t\twith x:integer; do if (x=0) then return;\n\t\tend{with} continue; end{while} continue; end\"\"\"\n\t\texpect = \"Continue Not In Loop\"\n\t\tself.assertTrue(TestChecker.test(input,expect,478))", "def check_offset(self):\n\n for d in range(self.n_dmps):\n if abs(self.y0[d] - self.goal[d]) < 1e-4:\n self.goal[d] += 1e-4", "def test_kyc_post_legal_board_member(self):\n pass", "def test_is_advancing_to_next_stage_no(self):\n\n # test_input_cases =\n # [(die_a_value, die_b_value, stage, ok_output),]\n test_input_cases = [\n (\"1\", \"2\", 2, False),\n (\"2\", \"1\", 3, False),\n (\"1\", \"1\", 1, False),\n (\"1\", \"1\", 2, False),\n (\"1\", \"1\", 3, False),\n (\"ANGRY\", \"1\", 1, False),\n (\"ANGRY\", \"1\", 2, False),\n ]\n\n for test_io in test_input_cases:\n self._test_is_game_over(*test_io)", "def check_go(*args):\n if np.sum(getGOState(args[0]))!= 3:\n args[0].Controls.IgnitorState.ignitor_state = 0\n return args[0].Controls.IgnitorState.ignitor_state", "def test_linked_problem(self):\r\n\r\n # Setup the peer grading module with the proper linked location.\r\n peer_grading = self._create_peer_grading_with_linked_problem(self.coe_location)\r\n\r\n # Ensure that it is properly setup.\r\n self.assertTrue(peer_grading.use_for_single_location)", "def test(did_pass):\r\n linenum = sys._getframe(1).f_lineno # Get the caller's line number.\r\n if did_pass:\r\n msg = \"Test at line {0} ok.\".format(linenum)\r\n else:\r\n msg = (\"Test at line {0} FAILED.\".format(linenum))\r\n print(msg)", "def test(did_pass):\r\n linenum = sys._getframe(1).f_lineno # Get the caller's line number.\r\n if did_pass:\r\n msg = \"Test at line {0} ok.\".format(linenum)\r\n else:\r\n msg = (\"Test at line {0} FAILED.\".format(linenum))\r\n print(msg)", "def test(did_pass):\r\n linenum = sys._getframe(1).f_lineno # Get the caller's line number.\r\n if did_pass:\r\n msg = \"Test at line {0} ok.\".format(linenum)\r\n else:\r\n msg = (\"Test at line {0} FAILED.\".format(linenum))\r\n print(msg)", "def test(did_pass):\r\n linenum = sys._getframe(1).f_lineno # Get the caller's line number.\r\n if did_pass:\r\n msg = \"Test at line {0} ok.\".format(linenum)\r\n else:\r\n msg = (\"Test at line {0} FAILED.\".format(linenum))\r\n print(msg)", "def test(did_pass):\r\n linenum = sys._getframe(1).f_lineno # Get the caller's line number.\r\n if did_pass:\r\n msg = \"Test at line {0} ok.\".format(linenum)\r\n else:\r\n msg = (\"Test at line {0} FAILED.\".format(linenum))\r\n print(msg)", "def _test(self, c):\n\treturn self.UNRESOLVED\t\t# Placeholder", "def noCheck():\n dislin.nochek()", "def _perturbInPlaceHard(self):\n die" ]
[ "0.6839827", "0.674124", "0.66378564", "0.62783855", "0.62351984", "0.6205175", "0.6149223", "0.60777533", "0.6027441", "0.5959538", "0.5939859", "0.59033215", "0.58415735", "0.5746709", "0.5571056", "0.55030173", "0.54555553", "0.5397869", "0.5396254", "0.5367164", "0.5297811", "0.5294832", "0.5288746", "0.52521676", "0.5241512", "0.5241512", "0.522016", "0.5187237", "0.51809484", "0.5179068", "0.5170127", "0.5143471", "0.512823", "0.51114684", "0.51043755", "0.50940484", "0.5093066", "0.5081337", "0.50790745", "0.5077202", "0.50700665", "0.50679237", "0.50657666", "0.5055504", "0.5043713", "0.5042598", "0.50421023", "0.50300694", "0.5024057", "0.50239164", "0.50231946", "0.50213915", "0.5015514", "0.50147414", "0.50105435", "0.500883", "0.5005456", "0.5001966", "0.49978676", "0.49923888", "0.49903807", "0.4989088", "0.49860746", "0.49856615", "0.4985381", "0.49845803", "0.497921", "0.49791113", "0.49791113", "0.4972376", "0.49719006", "0.4967938", "0.4963145", "0.49627617", "0.49626496", "0.49618196", "0.4960619", "0.49603522", "0.49584523", "0.49567518", "0.4956097", "0.49547124", "0.49533918", "0.49506682", "0.49502793", "0.49486414", "0.49471924", "0.4941498", "0.49383828", "0.4936054", "0.49341542", "0.493085", "0.49263868", "0.49263868", "0.49263868", "0.49263868", "0.49263868", "0.49186513", "0.49178258", "0.49173805" ]
0.59667176
9
shutter close, FF on, guider flat, FF off, open FFS 3xguider axes off, guider on
def test_goto_field_apogee_no_slew_shutter_open(self): sopTester.updateModel('apogee', TestHelper.apogeeState['B_open']) cmdState = self.actorState.gotoField cmdState.reinitialize(self.cmd) cmdState.doSlew = False self._goto_feld_apogee(10, 37, 0, 0, cmdState)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def zguider():\n gzero.gxoff = camera.status.guider[0] + gzero.gxoff\n gzero.gyoff = camera.status.guider[1] + gzero.gyoff\n guider(0,0)\n f = open('/data/guidezero','w')\n cPickle.dump(gzero,f)\n f.close()", "def f_fixed(self):\n self.fx_free = self.fy_free = self.fz_free = False\n return self", "def sink_floats(qtile):\n for window in qtile.current_group.windows:\n if window.floating:\n window.toggle_floating()", "def update_focal_axes(self):\n #self.update_sigma()\n self.updateGL()", "def SetStandbyFPMode(self):\n handler = self.get_command_object(\"SetStandbyFPMode\")\n handler()", "def main():\n\n robot = S2Fluke2( \"/dev/rfcomm2\" )\n\n print( \"getVersion :\", robot.getVersion() )\n print( \"identifyRobot :\", robot.identifyRobot() )\n print( \"getBattery :\", robot.getBattery() )\n print( \"setForwardness: SCRIBBLER_FORWARD\" )\n robot.setForwardness( robot.SCRIBBLER_FORWARD )\n print( \"setForwardness: FLUKE_FORWARD\" )\n robot.setForwardness( robot.FLUKE_FORWARD )\n print( \"setForwardness: SCRIBBLER_FORWARD\" )\n robot.setForwardness( robot.SCRIBBLER_FORWARD )\n print( \"getErrors : \" )\n print( robot.getErrors() )\n robot.resetScribbler()\n\n robot.close()", "def event_m20_11_x115(z7=_):\n \"\"\"State 0,1: Flying animation playback\"\"\"\n ChangeObjState(z7, 70)\n assert CompareObjStateId(z7, 20, 0)\n \"\"\"State 2: End state\"\"\"\n return 0", "def go_infFD(self):\n\n response = self.send_lens_cmd(['05', '00', '00', '00'], fast_mode=True)\n self.wait_focus_move()", "def addFluxcal():\n # Overall\n i = s.getScriptInt(odi.INDX_INT_NOBS_FLUX) - 1\n if i < 0: i = 0\n s.setScriptInt(odi.INDX_INT_NOBS_FLUX, i)\n\n # Primary\n i = s.getScriptInt(odi.INDX_INT_NOBS_FLUXPRI) - 1\n if i < 0: i = 0\n s.setScriptInt(odi.INDX_INT_NOBS_FLUXPRI, i)", "def _checkForSixaxis():\n return sixaxis.init(\"/dev/input/js1\")", "def setup(self, flags):\n self.figure = pylab.figure(1)\n self.axes = {}\n self.stream_data = {}\n self.flags = flags", "def OnFloated(self, event):\n self._floating = True\n wx.PostEvent(self, wxDockPaneFloatedEvent())", "def guider(x=0,y=0):\n if x==0 and y==0 and (gzero.gxoff<>0 or gzero.gyoff<>0):\n opticalcoupler.HomeXYStage()\n opticalcoupler.MoveXYStage( x=(x+gzero.gxoff), y=(y+gzero.gyoff) )\n camera.status.guider = (x,y)", "def main_multimodal_fusion(im_vis, im_ir, kernel, levels, window_size):\n\n im_vis = convert_image_to_floats(im_vis)\n im_ir = convert_image_to_floats(im_ir)\n\n im_vis_hsv = rgb2hsv(im_vis)\n value_channel = im_vis_hsv[:, :, 2]\n\n plt.subplot(1, 2, 1)\n plt.imshow(value_channel, cmap='gray')\n plt.subplot(1, 2, 2)\n plt.imshow(im_ir, cmap='gray')\n plt.show()\n\n # kernels to compute visibility\n kernel1 = classical_gaussian_kernel(5, 2)\n kernel2 = classical_gaussian_kernel(5, 2)\n\n # Computation of local entropy, local contrast and visibility for value channel\n local_entropy_value = normalized_local_entropy(value_channel, window_size)\n local_contrast_value = local_contrast(value_channel, window_size)\n visibility_value = visibility(value_channel, kernel1, kernel2)\n # Combination of local entropy, local contrast and visibility for value channel\n weight_value = weight_combination(local_entropy_value, local_contrast_value, visibility_value, 1, 1, 1)\n\n # Computation of local entropy, local contrast and visibility for IR image\n local_entropy_ir = normalized_local_entropy(im_ir, window_size)\n local_contrast_ir = local_contrast(im_ir, window_size)\n visibility_ir = visibility(im_ir, kernel1, kernel2)\n # Combination of local entropy, local contrast and visibility for IR image\n weight_ir = weight_combination(local_entropy_ir, local_contrast_ir, visibility_ir, 1, 1, 1)\n\n plt.subplot(2, 3, 1)\n plt.imshow(local_entropy_value, cmap='gray')\n plt.subplot(2, 3, 2)\n plt.imshow(local_contrast_value, cmap='gray')\n plt.subplot(2, 3, 3)\n plt.imshow(visibility_value, cmap='gray')\n plt.subplot(2, 3, 4)\n plt.imshow(local_entropy_ir, cmap='gray')\n plt.subplot(2, 3, 5)\n plt.imshow(local_contrast_ir, cmap='gray')\n plt.subplot(2, 3, 6)\n plt.imshow(visibility_ir, cmap='gray')\n plt.show()\n\n # Normalising weights of value channel and IR image\n weightN_value, weightN_ir = weight_normalization(weight_value, weight_ir)\n\n plt.subplot(1, 2, 1)\n plt.imshow(weightN_value, cmap='gray')\n plt.subplot(1, 2, 2)\n plt.imshow(weightN_ir, cmap='gray')\n plt.show()\n\n # Creating Gaussian pyramids of the weights maps of respectively the value channel and IR image\n gauss_pyr_value_weights = gaussian_pyramid(weightN_value, kernel, levels)\n gauss_pyr_ir_weights = gaussian_pyramid(weightN_ir, kernel, levels)\n\n # Creating Laplacian pyramids of respectively the value channel and IR image\n lap_pyr_value = laplacian_pyramid(value_channel, kernel, levels)\n lap_pyr_ir = laplacian_pyramid(im_ir, kernel, levels)\n\n # Creating the fused Laplacian of the two modalities\n lap_pyr_fusion = fused_laplacian_pyramid(gauss_pyr_value_weights, gauss_pyr_ir_weights, lap_pyr_value, lap_pyr_ir)\n\n i = 1\n for l in lap_pyr_fusion:\n plt.subplot(1, len(lap_pyr_fusion), i)\n plt.imshow(l, cmap='gray')\n i += 1\n plt.show()\n\n # Creating the Gaussian pyramid of value channel in order to collapse the fused Laplacian pyramid\n gauss_pyr_value = gaussian_pyramid(value_channel, kernel, levels)\n collapsed_image = collapse_pyramid(lap_pyr_fusion, gauss_pyr_value)\n\n # Replacing the value channel in HSV visible image by the collapsed image\n im_vis_hsv_fusion = im_vis_hsv.copy()\n im_vis_hsv_fusion[:, :, 2] = collapsed_image\n im_vis_rgb_fusion = hsv2rgb(im_vis_hsv_fusion)\n\n plt.subplot(1, 2, 1)\n plt.imshow(im_vis)\n plt.subplot(1, 2, 2)\n plt.imshow(im_vis_rgb_fusion)\n plt.show()", "def setup_fermi(self):\n eventclass=5 # 2 (Source) or 5 (UltracleanVeto)\n eventtype=0 # 0 (all), 3 (bestpsf) or 5 (top3 quartiles)\n mask_type='top300'\n force_mask_at_bin_number=10\n\n self.f1 = fp.fermi_plugin(maps_dir,fermi_data_dir=fermi_data_dir,work_dir=work_dir,CTB_en_min=0,CTB_en_max=40,nside=self.nside,eventclass=eventclass,eventtype=eventtype,newstyle=1,data_July16=True)\n\n if mask_type != 'False':\n self.f1.make_ps_mask(mask_type = mask_type,force_energy = True,energy_bin = force_mask_at_bin_number)\n self.f1.add_diffuse_newstyle(comp = 'p7', eventclass = eventclass, eventtype = eventtype)\n self.f1.add_bubbles(comp='bubs') #bubbles\n self.f1.add_iso(comp='iso') #iso\n self.f1.add_ps_model(comp='ps_model')\n\n # Exposure correct J_map_arr\n self.J_map_arr *= self.f1.CTB_exposure_maps\n\n # Add J-factor map with mean 1 in each energy bin\n self.f1.add_template_by_hand('J_map',np.array([self.J_map_arr[i]/np.mean(self.J_map_arr[i]) for i in range(40)]))", "def plot_fppy(self,LAXIS,xbl,xbr,ybu,ybd,ilg): \n\t\t\n # load x GRID\n grd1 = self.xzn0\n\t\n # load DATA to plot\n plt1 = self.fppy\n\t\t\t\t\n # create FIGURE\n plt.figure(figsize=(7,6))\n\t\t\n # format AXIS, make sure it is exponential\n plt.gca().yaxis.get_major_formatter().set_powerlimits((0,0))\t\t\n\n # set plot boundaries \n to_plot = [plt1]\t\t\n self.set_plt_axis(LAXIS,xbl,xbr,ybu,ybd,to_plot)\n\t\t\t\t\n # plot DATA \n plt.title(r'pressure flux y')\n plt.plot(grd1,plt1,color='brown',label = r'f$_{py}$')\n\n # define and show x/y LABELS\n setxlabel = r\"r (cm)\"\n setylabel = r\"$f_{py}$ (erg cm$^{-2}$ s$^{-1}$)\"\n plt.xlabel(setxlabel)\n plt.ylabel(setylabel)\n\t\t\n # show LEGEND\n plt.legend(loc=ilg,prop={'size':18})\n\n # display PLOT\n plt.show(block=False)\n\n # save PLOT\n plt.savefig('RESULTS/'+self.data_prefix+'mean_fppy.png')", "def fc_visual_save(fc, lowweight, savenamefile_prefix):\r\n\r\n\r\n ### text setup for brain areas ###\r\n pos_text_lefttop1 = [-80, 50, 30]\r\n pos_text_middletop1 = [120, 50, 30]\r\n pos_text_lefttop2 = [-80, 70, 10]\r\n pos_text_leftDown1 = [-80, 550, 30]\r\n pos_text_leftDown2 = [-80, 570, 10]\r\n pos_text_leftDown3 = [-80, 580, 10]\r\n \r\n texts_org = dict()\r\n\r\n lowweight = np.round(lowweight, decimals = 2) \r\n\r\n # plot\r\n df_chninf = assign_coord2chnArea(area_coord_file, fc['chnAreas'])\r\n for ci, cond in enumerate(fc['ciCOH'].keys()):\r\n ciCOH = fc['ciCOH'][cond]\r\n ntrials, ntemp = fc['setup']['ntrials_' + cond], fc['setup']['ntemp_' + cond]\r\n\r\n\r\n texts = texts_org.copy()\r\n \r\n text_thred = 'thred = ' + str(np.round(lowweight, decimals = 2))\r\n text_ntrials = 'ntrials = ' + str(ntrials)\r\n\r\n texts[cond] = pos_text_middletop1\r\n texts[text_task] = pos_text_leftDown1\r\n texts[text_ntrials] = pos_text_leftDown2\r\n texts[text_thred] = pos_text_leftDown3\r\n \r\n\r\n saveFCGraph = os.path.join(savefolder, savenamefile_prefix + '_lw' + str(np.round(lowweight, decimals = 2)) + '_' + cond + '.png')\r\n\r\n igplot = ciCOH_visual_save(ciCOH = ciCOH, chnInf = df_chninf, lowweight = lowweight, \r\n savefile = saveFCGraph, texts = texts, threds_edge = None)\r\n\r\n del texts[cond], texts[text_ntrials]\r\n\r\n img = cv2.imread(saveFCGraph)\r\n if ci == 0:\r\n imgs = img\r\n else:\r\n imgs = np.concatenate((imgs, np.zeros((img.shape[0], 5, 3)),img), axis = 1)\r\n\r\n os.remove(saveFCGraph)\r\n\r\n # combine all conditions\r\n print(imgs.shape)\r\n saveFCGraph_comb = os.path.join(savefolder, 'comb_' + savenamefile_prefix + '_lw' + str(np.round(lowweight, decimals = 2)) + '.png')\r\n cv2.imwrite(saveFCGraph_comb, imgs)", "def trigger_set_fetc(self):\n self.write(\"*DDT #15FETC?\")", "def plot_single_hfo(hfo, envelope = False, xlim =[-1,1], cutoff = None, v = True,\n axes = None, figure_size = (15,10),dpi=600,saveplot = None):\n if axes == None:\n # Creating the figure \n fig = plt.figure(figsize=figure_size,dpi=dpi)\n ax1 = fig.add_subplot(311)\n ax2 = fig.add_subplot(312)\n ax3 = fig.add_subplot(313)\n else:\n ax1 = axes[0]\n ax2 = axes[1]\n ax3 = axes[2]\n\n # number of points\n npoints = hfo.waveform.shape[0]\n time_v = np.linspace(-1,1,npoints,endpoint=True)\n # creating the axes\n \n ax1.plot(time_v,hfo.waveform[:,0],'b')\n ax1.plot(time_v[hfo.start_idx:hfo.end_idx],hfo.waveform[hfo.start_idx:hfo.end_idx,0],'k')\n \n adjust_spines(ax1, ['left'])\n ax1.set_xlim(xlim)\n \n \n \n filt = hfo.waveform[:,1]\n ax2.plot(time_v,filt) \n ax2.plot(time_v[hfo.start_idx:hfo.end_idx],filt[hfo.start_idx:hfo.end_idx],'k')\n if envelope:\n env = hfo.waveform[:,2]\n ax4 = ax2.twinx()\n ax4.plot(time_v,env,'g')\n \n\n \n adjust_spines(ax2, ['left', 'bottom'])\n ax2.set_xlim(xlim)\n \n \n hfo.spectrum.plot(cutoff = cutoff, v = v, ax = ax3)\n ax3.set_title('peak freq = ' + str(hfo.spectrum.peak_freq))\n adjust_spines(ax3, ['left', 'bottom'])\n \n if saveplot != None:\n if type(saveplot) == str: \n plt.savefig(saveplot, bbox_inches='tight')\n else:\n raise Exception('saveplot should be a string')\n plt.draw()", "def cehs():\n\tcloseEHShutter()", "def friewallOn():\n pass", "def isFim(self):\r\n return", "def plot_fr_and_spikes(self, t):\n plt.figure(figsize=(10, 8))\n\n plt.subplot(2, 2, 1)\n self.plot_base_image()\n\n plt.subplot(2, 2, 2)\n self.plot_firing_rates(t, mode='ON')\n plt.title('Retinal Image')\n\n # Spikes\n ax = plt.subplot(2, 2, 3)\n self.plot_spikes(ax, t, mode='ON', moving_average=True)\n\n ax = plt.subplot(2, 2, 4)\n self.plot_spikes(ax, t, mode='OFF', moving_average=True)", "def gff_init():\n pass", "def ToggleDrawingTools(self, event):\n pass", "def analysePhotometry(self, fsu='FSUA', channel=0, noFTK=False,\n plot=False, label='', normalized=True,\n xlims=None):\n\n t = self.raw['IMAGING_DATA_'+fsu].data.field('TIME')\n\n if noFTK: # check fluxes outside fringes\n if (fsu=='FSUB' and self.insmode=='NORMAL') or \\\n (fsu=='FSUA' and self.insmode=='SWAPPED'):\n w = np.where((np.interp(t, self.raw['OPDC'].data.field('TIME'),\n self.raw['OPDC'].data.field('STATE'))<5))\n else:\n w = np.where(np.interp(t, self.raw['DOPDC'].data.field('TIME'),\n self.raw['DOPDC'].data.field('STATE'))<5)\n print 'noFTK:', round(100*len(w[0])/float(len(t)), 3), '%'\n else:\n w = (range(len(t)),[])\n\n photA = self.raw['IMAGING_DATA_'+fsu].data.field('DATA1')[w[0],channel]-\\\n self.fsu_calib[(fsu, 'DARK')][channel,0]\n photB = self.raw['IMAGING_DATA_'+fsu].data.field('DATA2')[w[0],channel]-\\\n self.fsu_calib[(fsu, 'DARK')][channel,1]\n photC = self.raw['IMAGING_DATA_'+fsu].data.field('DATA3')[w[0],channel]-\\\n self.fsu_calib[(fsu, 'DARK')][channel,2]\n photD = self.raw['IMAGING_DATA_'+fsu].data.field('DATA4')[w[0],channel]-\\\n self.fsu_calib[(fsu, 'DARK')][channel,3]\n\n phot0 = (photA+photB+photC+photD)/4.0\n s0 = np.argsort(phot0)\n sA = np.argsort(photA)\n sB = np.argsort(photB)\n sC = np.argsort(photC)\n sD = np.argsort(photD)\n\n if plot:\n fig = plt.figure(0)\n fig.clf()\n if normalized:\n plt.hist(phot0/phot0.mean(), bins=50, normed=True,\n alpha=0.8, color='y')\n h = np.histogram(phot0/phot0.mean(), bins=50, normed=True)\n plt.xlabel('flux / mean(flux)')\n else:\n plt.hist(phot0, bins=50, normed=True, alpha=0.8, color='y')\n h = np.histogram(phot0, bins=50, normed=True)\n plt.xlabel('flux (ADU)')\n if not xlims is None:\n plt.xlim(xlims[0], xlims[1])\n plt.title(label)\n poissonDist = lambda x,p:\\\n poisson(p['m']*p['p']).pmf(np.int_(np.floor(x*p['p'])))*p['p'] +\\\n (x*p['p']-np.floor(x*p['p']))/\\\n (np.ceil(x*p['p'])-np.floor(x*p['p']))*\\\n (poisson(p['m']*p['p']).pmf(np.int_(np.ceil(x*p['p'])))*p['p'] -\n poisson(p['m']*p['p']).pmf(np.int_(np.floor(x*p['p'])))*p['p'])\n if not normalized:\n guess = {'m':phot0.mean(), 'p':1/10.}\n X = 0.5*(h[1][:-1]+h[1][1:])\n fit = dpfit.leastsqFit(poissonDist, X, guess, h[0])\n guess = fit['best']\n uncer = fit['uncer']\n chi2 = fit['chi2']\n model = fit['model']\n print 'POISSON: LAMBDA', guess['p']*guess['m']\n print 'POISSON: STD/MEAN', 1/np.sqrt(guess['p']*guess['m'])\n plt.plot(X, poissonDist(X, guess), '-r', linewidth=3,\n alpha=0.8, linestyle='dashed')\n return\n res = {'MEAN':[phot0.mean(), photA.mean(), photB.mean(),\n photC.mean(), photD.mean()],\n 'STD':[phot0.std(), photA.std(), photB.std(), photC.std(), photD.std()],\n '90-10':[phot0[s0[9*len(s0)/10]]-phot0[s0[len(s0)/10]],\n photA[sA[9*len(sA)/10]]-photA[sA[len(sA)/10]],\n photB[sB[9*len(sB)/10]]-photA[sB[len(sB)/10]],\n photC[sC[9*len(sC)/10]]-photA[sC[len(sC)/10]],\n photD[sD[9*len(sD)/10]]-photA[sD[len(sD)/10]]]}\n\n res['STD/MEAN'] = [res['STD'][k]/res['MEAN'][k] for k in range(5)]\n res['(90-10)/MEAN'] = [res['90-10'][k]/res['MEAN'][k] for k in range(5)]\n res['(90-10)/STD'] = [res['90-10'][k]/res['STD'][k] for k in range(5)]\n res['BEAMS']=['(A+B+C+D)/4', 'A', 'B', 'C', 'D']\n return res", "def draw_flower_advanced():\n draw_flower()\n turtle.left(90)\n turtle.up()\n turtle.forward(150)\n turtle.left(90)\n turtle.forward(150)\n turtle.right(90)\n turtle.down()", "def fdplot(self, imx):\n fig = plt.figure()\n maxval = np.max(imx)\n ims = list(map(lambda im: [plt.imshow(np.fabs(im),norm=colors.Normalize(0.0,maxval))], imx))\n animation = anim.ArtistAnimation(fig,ims,interval=50)\n plt.show()", "def set_fence_mode(self, on):\r\n return self._arm.set_fense_mode(on)", "def _fctl(self):\n if self._fctl_written:\n return\n data = struct.pack(\n \">4L2H2B\",\n self.width, self.height, 0, 0,\n self.delay_numerator, self.delay_denominator,\n 1, 0)\n self._chunk(b\"fcTL\", self._seqno() + data)\n self._fctl_written = True", "def teleopPeriodic(self):\n #self.drive.arcadeDrive(-1*self.stick.getRawAxis(0), self.stick.getRawAxis(1))\n '''\n if self.stick.getRawButton(7) == True:\n self.driveFlag=0\n self.drive.setMaxOutput(0.5)\n if self.stick.getRawButton(8) == True:\n self.driveFlag=1\n self.driveb = wpilib.drive.DifferentialDrive(self.right, self.left)\n self.driveb.setMaxOutput(0.5)\n if self.driveFlag==1:\n self.driveb.arcadeDrive(self.stick.getRawAxis(5), self.stick.getRawAxis(4))\n '''\n if self.driveFlag==0:\n self.drive.arcadeDrive(self.stick.getRawAxis(1), self.stick.getRawAxis(0))\n \n #Camera Point Front:\n if self.stick.getPOV()==0:\n self.SV1.set(1.0)\n self.sd.putValue('Camera','Forward')\n #Camera Point Back:\n if self.stick.getPOV()==180:\n self.SV1.set(-1.0)\n self.sd.putValue('Camera','Backward')\n #Orient Servo 2\n if self.stick.getPOV()==90:\n self.SV2.set(0.5)\n #Orient Servo 2\n if self.stick.getPOV()==270:\n self.SV2.set(-0.6)\n \n if self.stick.getRawButton(1) == True:\n self.prepareCubeFlag = 1\n self.EC1.reset()\n if self.prepareCubeFlag > 0:\n self.prepareGrabCube()\n if self.stick.getRawButton(2) == True:\n self.grabCubeFlag = 1\n self.EC1.reset()\n if self.grabCubeFlag > 0:\n self.grabCube()\n self.EC2.reset()\n if self.stick.getRawButton(3) == True:\n self.deliverCubeFlag = 1\n if self.deliverCubeFlag > 0: \n self.deliverCube()\n if self.stick.getRawButton(5) == True:\n self.E.set(-0.3)\n if self.stick.getRawButton(6) == True:\n self.E.set(0.3)\n \n #Dashboard\n self.sd.putNumber('Speed', 0.5)\n self.sd.putNumber('Gyro',self.gyro.getAngle())\n self.sd.putValue(\"Camera\", \"Forwards\")\n self.sd.putValue(\"SW1\", self.SW1.get())\n self.sd.putValue(\"SW0\", self.SW0.get())\n self.sd.putValue(\"EC1\",self.EC1.getDistance())\n self.sd.putValue(\"EC2\",self.EC2.getDistance())", "def one_transition_spectrum_fluor(self,tr):\n \n\n ta = tr[\"ta\"] # TimeAxis\n dd = tr[\"dd\"] # transition dipole strength\n om = tr[\"om\"] # frequency - rwa\n gg = tr[\"gg\"] # natural broadening (constant or time dependent)\n fwhm = tr[\"fwhm\"] # Additional gaussian broadening of the spectra\n sgm = fwhm/(2*numpy.sqrt(2*numpy.log(2)))\n \n # CD and fluorescence can be calculated in this step\n # TODO if rotatory strength defined calculate also circular dichroism spectra\n # TOOD calculate fluorescence spectra (for fluorescence there should be a switch because it should be calculated only for the first transition) \n \n \n if self.system._has_system_bath_coupling:\n# ct = tr[\"ct\"] # correlation function\n re = tr[\"re\"] # reorganisation energy\n \n # convert correlation function to lineshape function\n #gt = self._c2g(ta,ct.data)\n gt = tr[\"gt\"]\n # calculate time dependent response\n at = numpy.exp(-numpy.conjugate(gt) -1j*om*ta.data + 2j*re*ta.data)\n else:\n # calculate time dependent response\n at = numpy.exp(-1j*om*ta.data) \n# plt.figure()\n# plt.title(\"Absorption\")\n# plt.plot(ta.data,numpy.real(at))\n# plt.plot(ta.data,numpy.imag(at))\n \n \n if len(gg) == 1:\n gam = gg[0]\n rt = numpy.exp(gam*ta.data)\n at *= rt\n #print(\"Constant: \", rt[20], len(at))\n else:\n rt = numpy.exp((gg)*ta.data) \n at *= rt\n #print(\"Time dependent: len = \", rt[20], len(rt))\n \n if fwhm!=0.0:\n gauss = numpy.exp(-2*(numpy.pi**2)*(sgm**2)*(ta.data**2))\n at *= gauss\n \n # Fourier transform the result\n ft = dd*numpy.fft.hfft(at)*ta.step\n ft = numpy.fft.fftshift(ft)\n # invert the order because hfft is a transform with -i\n ft = numpy.flipud(ft) \n # cut the center of the spectrum\n Nt = ta.length #len(ta.data) \n return ft[Nt//2:Nt+Nt//2]", "def main():\n save = False\n show = True\n\n #hd_parameter_plots = HDparameterPlots(save=save)\n #hd_parameter_plots.flow_parameter_distribution_for_non_lake_cells_for_current_HD_model()\n #hd_parameter_plots.flow_parameter_distribution_current_HD_model_for_current_HD_model_reprocessed_without_lakes_and_wetlands()\n #hd_parameter_plots.flow_parameter_distribution_ten_minute_data_from_virna_0k_ALG4_sinkless_no_true_sinks_oceans_lsmask_plus_upscale_rdirs()\n #hd_parameter_plots.flow_parameter_distribution_ten_minute_data_from_virna_0k_ALG4_sinkless_no_true_sinks_oceans_lsmask_plus_upscale_rdirs_no_tuning()\n #ice5g_comparison_plots = Ice5GComparisonPlots(save=save)\n #ice5g_comparison_plots.plotLine()\n #ice5g_comparison_plots.plotFilled()\n #ice5g_comparison_plots.plotCombined()\n #ice5g_comparison_plots.plotCombinedIncludingOceanFloors()\n #flowmapplot = FlowMapPlots(save)\n #flowmapplot.FourFlowMapSectionsFromDeglaciation()\n #flowmapplot.Etopo1FlowMap()\n #flowmapplot.ICE5G_data_all_points_0k()\n #flowmapplot.ICE5G_data_all_points_0k_no_sink_filling()\n #flowmapplot.ICE5G_data_all_points_0k_alg4_two_color()\n #flowmapplot.ICE5G_data_all_points_21k_alg4_two_color()\n #flowmapplot.Etopo1FlowMap_two_color()\n #flowmapplot.Etopo1FlowMap_two_color_directly_upscaled_fields()\n #flowmapplot.Corrected_HD_Rdirs_FlowMap_two_color()\n #flowmapplot.ICE5G_data_ALG4_true_sinks_21k_And_ICE5G_data_ALG4_true_sinks_0k_FlowMap_comparison()\n #flowmapplot.Corrected_HD_Rdirs_And_Etopo1_ALG4_sinkless_directly_upscaled_fields_FlowMap_comparison()\n #flowmapplot.Corrected_HD_Rdirs_And_Etopo1_ALG4_true_sinks_directly_upscaled_fields_FlowMap_comparison()\n #flowmapplot.Corrected_HD_Rdirs_And_ICE5G_data_ALG4_sinkless_0k_directly_upscaled_fields_FlowMap_comparison()\n #flowmapplot.Corrected_HD_Rdirs_And_ICE5G_data_ALG4_true_sinks_0k_directly_upscaled_fields_FlowMap_comparison()\n #flowmapplot.Corrected_HD_Rdirs_And_ICE5G_data_ALG4_corr_orog_0k_directly_upscaled_fields_FlowMap_comparison()\n #flowmapplot.Corrected_HD_Rdirs_And_ICE5G_data_ALG4_corr_orog_downscaled_ls_mask_0k_directly_upscaled_fields_FlowMap_comparison()\n #flowmapplot.Corrected_HD_Rdirs_And_ICE5G_data_ALG4_no_true_sinks_corr_orog_0k_directly_upscaled_fields_FlowMap_comparison()\n #flowmapplot.Corrected_HD_Rdirs_And_ICE5G_HD_as_data_ALG4_true_sinks_0k_directly_upscaled_fields_FlowMap_comparison()\n #flowmapplot.Upscaled_Rdirs_vs_Directly_Upscaled_fields_ICE5G_data_ALG4_corr_orog_downscaled_ls_mask_0k_FlowMap_comparison()\n #flowmapplot.Ten_Minute_Data_from_Virna_data_ALG4_corr_orog_downscaled_lsmask_no_sinks_21k_vs_0k_FlowMap_comparison()\n #flowmapplot.Upscaled_Rdirs_vs_Corrected_HD_Rdirs_ICE5G_data_ALG4_corr_orog_downscaled_ls_mask_0k_FlowMap_comparison()\n flowmapplotwithcatchment = FlowMapPlotsWithCatchments(save)\n #flowmapplotwithcatchment.Upscaled_Rdirs_vs_Corrected_HD_Rdirs_ICE5G_data_ALG4_corr_orog_downscaled_ls_mask_0k_FlowMap_comparison()\n #flowmapplotwithcatchment.compare_present_day_and_lgm_river_directions_with_catchments_virna_data_plus_tarasov_style_orog_corrs_for_both()\n #flowmapplotwithcatchment.compare_present_day_river_directions_with_catchments_virna_data_with_vs_without_tarasov_style_orog_corrs()\n #flowmapplotwithcatchment.compare_lgm_river_directions_with_catchments_virna_data_with_vs_without_tarasov_style_orog_corrs()\n #flowmapplotwithcatchment.Upscaled_Rdirs_vs_Corrected_HD_Rdirs_tarasov_upscaled_data_ALG4_corr_orog_downscaled_ls_mask_0k_FlowMap_comparison()\n #flowmapplotwithcatchment.upscaled_rdirs_with_and_without_tarasov_upscaled_data_ALG4_corr_orog_downscaled_ls_mask_0k_FlowMap_comparison()\n #flowmapplotwithcatchment.\\\n #upscaled_rdirs_with_and_without_tarasov_upscaled_north_america_only_data_ALG4_corr_orog_downscaled_ls_mask_0k_FlowMap_comparison()\n #flowmapplotwithcatchment.\\\n #Upscaled_Rdirs_vs_Corrected_HD_Rdirs_tarasov_upscaled_north_america_only_data_ALG4_corr_orog_downscaled_ls_mask_0k_FlowMap_comparison()\n #flowmapplotwithcatchment.\\\n #Upscaled_Rdirs_vs_Corrected_HD_Rdirs_tarasov_upscaled_north_america_only_data_ALG4_corr_orog_glcc_olson_lsmask_0k_FlowMap_comparison()\n #flowmapplotwithcatchment.compare_present_day_and_lgm_river_directions_with_catchments_ICE5G_plus_tarasov_style_orog_corrs_for_both()\n #flowmapplotwithcatchment.compare_present_day_and_lgm_river_directions_with_catchments_ICE6G_plus_tarasov_style_orog_corrs_for_both()\n #flowmapplotwithcatchment.compare_ICE5G_and_ICE6G_with_catchments_tarasov_style_orog_corrs_for_both()\n #flowmapplotwithcatchment.compare_river_directions_with_dynriver_corrs_and_MERIThydro_derived_corrs()\n #flowmapplotwithcatchment.compare_river_directions_with_dynriver_corrs_and_MERIThydro_derived_corrs_original_ts()\n flowmapplotwithcatchment.compare_river_directions_with_dynriver_corrs_and_MERIThydro_derived_corrs_new_ts_10min()\n outflowplots = OutflowPlots(save)\n #outflowplots.Compare_Upscaled_Rdirs_vs_Directly_Upscaled_fields_ICE5G_data_ALG4_corr_orog_downscaled_ls_mask_0k()\n #outflowplots.Compare_Corrected_HD_Rdirs_And_ICE5G_as_HD_data_ALG4_sinkless_all_points_0k()\n #outflowplots.Compare_Corrected_HD_Rdirs_And_ICE5G_as_HD_data_ALG4_true_sinks_all_points_0k()\n #outflowplots.Compare_Corrected_HD_Rdirs_And_ICE5G_ALG4_sinkless_all_points_0k_directly_upscaled_fields()\n #outflowplots.Compare_Corrected_HD_Rdirs_And_ICE5G_ALG4_true_sinks_all_points_0k_directly_upscaled_fields()\n #outflowplots.Compare_Corrected_HD_Rdirs_And_ICE5G_ALG4_corr_orog_all_points_0k_directly_upscaled_fields()\n #outflowplots.Compare_Corrected_HD_Rdirs_And_ICE5G_ALG4_corr_orog_downscaled_ls_mask_all_points_0k_directly_upscaled_fields()\n #outflowplots.Compare_Corrected_HD_Rdirs_And_Etopo1_ALG4_sinkless_directly_upscaled_fields()\n #outflowplots.Compare_Corrected_HD_Rdirs_And_Etopo1_ALG4_true_sinks_directly_upscaled_fields()\n #outflowplots.Compare_Corrected_HD_Rdirs_And_ICE5G_plus_tarasov_upscaled_srtm30_ALG4_corr_orog_0k_directly_upscaled_fields()\n #outflowplots.Compare_Original_Corrections_vs_Upscaled_MERIT_DEM_0k()\n outflowplots.Compare_Original_Corrections_vs_Upscaled_MERIT_DEM_0k_new_truesinks()\n #outflowplots.Compare_Original_Corrections_vs_Upscaled_MERIT_DEM_0k_new_truesinks_individual_rivers()\n #outflowplots.Compare_ICE5G_with_and_without_tarasov_upscaled_srtm30_ALG4_corr_orog_0k_directly_upscaled_fields()\n #hd_output_plots = HDOutputPlots()\n #hd_output_plots.check_water_balance_of_1978_for_constant_forcing_of_0_01()\n #hd_output_plots.plot_comparison_using_1990_rainfall_data()\n #hd_output_plots.plot_comparison_using_1990_rainfall_data_adding_back_to_discharge()\n #coupledrunoutputplots = CoupledRunOutputPlots(save=save)\n #coupledrunoutputplots.ice6g_rdirs_lgm_run_discharge_plot()\n #coupledrunoutputplots.extended_present_day_rdirs_lgm_run_discharge_plot()\n #coupledrunoutputplots.ocean_grid_extended_present_day_rdirs_vs_ice6g_rdirs_lgm_run_discharge_plot()\n #coupledrunoutputplots.extended_present_day_rdirs_vs_ice6g_rdirs_lgm_echam()\n #coupledrunoutputplots.extended_present_day_rdirs_vs_ice6g_rdirs_lgm_mpiom_pem()\n #lake_plots = LakePlots()\n #lake_plots.plotLakeDepths()\n #lake_plots.LakeAndRiverMap()\n #lake_plots.LakeAndRiverMaps()\n if show:\n plt.show()", "def update_focal_axes(self):\n self.update_sigma()\n self.updateGL()", "def set_flammable(self, f):\n self.flammable = f", "def setupconfig():\n from Manager import Studio\n studio = Studio.Instance\n cfgeff = studio.configEffect_st\n cfgeff.bloomToggle.isOn = False\n cfgeff.vignetteToggle.isOn = False\n cfgeff.sunShaftsToggle.isOn = False\n cfgeff.fogToggle.isOn = False\n cfgeff.depthOfFieldToggle.isOn = False\n #cfgeff.ssaoToggle.isOn = True\n #cfgeff.selfShadowToggle.isOn = True\n \n # Turn off backgrounds\n studio.uiBGChanger.onOffToggle.isOn = False", "def wfits(self, filename=None):\n with self.lock:\n dark = self.dark\n if not filename:\n if dark != 0:\n filename = self.getNextFilename(\"dark\")\n else:\n filename = self.getNextFilename(\"object\")\n with self.lock:\n if(self.data.size == 0):\n raise FliError(\"No image available\")\n hdu = pyfits.PrimaryHDU(self.data)\n hdr = hdu.header\n with self.lock:\n hdr.set('DATE', self.timestamp, 'exposure begin date')\n hdr.set('INSTRUME', self.devname, 'this instrument')\n hdr.set('SERIAL', self.devsn, 'serial number')\n hdr.set('EXPTIME', self.exptime, 'exposure time (ms)')\n hdr.set('VBIN', self.vbin, 'vertical binning')\n hdr.set('HBIN', self.hbin, 'horizontal binning')\n hdr.set('CCD-TEMP', self.temp, 'CCD temperature')\n if dark != 0:\n hdr.set('SHUTTER', 'CLOSE', 'shutter status')\n else:\n hdr.set('SHUTTER', 'OPEN', 'shutter status')\n hdr.set('CCDAREA', '[%d:%d,%d:%d]' % self.expArea, 'image area')\n hdu.writeto(filename, overwrite=True, checksum=True)\n with self.lock:\n self.filename = filename", "def switch_frequency_plot_channel_four(self):\n if self.plot_channel_key_booleans[3]:\n self.plot_channel_key_booleans[3] = False\n self.parent_widget.graph_channel_four_button.setStyleSheet(\n \"background-color:rgb(%d,%d,%d)\" % (255, 255, 255))\n else:\n self.plot_channel_key_booleans[3] = True\n self.parent_widget.graph_channel_four_button.setStyleSheet(\n \"background-color:rgb(%d,%d,%d)\" % (LINE_COLORS[3]))", "def setup_fader(self):\n ScreenFader(fade=\"in\")\n self.should_change_scene = False\n self.should_fade_out = False\n self.change_scene_timer = 0.0", "def toggleExposure(self, state):\n if state == False:\n freenect.sync_get_video_with_res(\n resolution=freenect.RESOLUTION_HIGH)\n # print(freenect.sync_set_autoexposure(False))\n freenect.sync_set_autoexposure(False)\n # print(freenect.sync_set_whitebalance(False))\n freenect.sync_set_whitebalance(False)\n else:\n freenect.sync_get_video_with_res(\n resolution=freenect.RESOLUTION_HIGH)\n # print(freenect.sync_set_autoexposure(True))\n freenect.sync_set_autoexposure(True)\n # print(freenect.sync_set_whitebalance(True))\n freenect.sync_set_whitebalance(True)", "def fiducial_evolution():\n \n # fiducial model\n wangle = 180*u.deg\n pk = pickle.load(open('../data/gd1_fiducial.pkl', 'rb'))\n x = pk['x'].to(u.kpc)\n xorig = x[:2]\n \n plt.close()\n fig, ax = plt.subplots(1,1,figsize=(6,6))\n \n plt.sca(ax)\n \n Nsnap = 8\n times = np.linspace(0,0.5,Nsnap)[::-1]\n angles = np.linspace(0,322,Nsnap)[::-1]*u.deg\n\n for e, t in enumerate(times):\n c = mpl.cm.Blues(0.05+0.85*(Nsnap-e)/Nsnap)\n #a = 0.5 + 0.5*(Nsnap-e)/Nsnap\n \n pk = pickle.load(open('../data/gd1_fiducial_t{:.4f}.pkl'.format(t), 'rb'))\n x = pk['x'].to(u.kpc)\n x_, y_ = x[0], x[1]\n \n plt.plot(x_[120:-120], y_[120:-120], '.', color=c, ms=10, zorder=Nsnap-e, rasterized=False)\n \n xt = 24*np.cos(angles[e]+90*u.deg)\n yt = 24*np.sin(angles[e]+90*u.deg)\n if e<Nsnap-1:\n txt = plt.text(xt, yt, '+ {:.2f} Gyr'.format(t), va='center', ha='center', fontsize='small', color='0.2', rotation=(angles[e]).value, zorder=10)\n txt.set_bbox(dict(facecolor='w', alpha=0.7, ec='none'))\n \n plt.text(0, 24, 'Flyby', va='center', ha='center', fontsize='small', color='0.2')\n\n lim = 27\n plt.xlim(-lim,lim)\n plt.ylim(-lim,lim)\n plt.gca().set_aspect('equal')\n \n plt.xlabel('x [kpc]')\n plt.ylabel('y [kpc]')\n \n plt.tight_layout()\n plt.savefig('../plots/loop_evolution.pdf')", "def terminatePlane3D():\n dislin.grffin()", "def fullcore_detectors():\n\n cwd = os.getcwd()\n fname = get_sample_data('%s/oecd-fullcore_geom1.png' % (cwd))\n im = plt.imread(fname)\n\n # crop the image\n height, width, color = np.shape(im)\n y1 = int(height*0.15)\n y2 = int(height*0.6)\n x1 = int(width*0.45)\n x2 = int(width)\n plt.imshow(im[y1:y2,x1:x2,:])\n plt.axis('off')\n\n # Axial 1\n x = 158\n y = 291\n P = 55\n s = P/2/np.cos(np.pi/6)\n plt.plot([s+x, 2*s+x], [0+y, 0+y], 'r-', lw=1.5, label='1- Axial1')\n plt.plot([s+x, 2*s+x], [P+y, P+y], 'r-', lw=1.5)\n plt.plot([s+x, s/2+x], [0+y, P/2+y], 'r-', lw=1.5)\n plt.plot([s/2+x, s+x], [P/2+y, P+y], 'r-', lw=1.5)\n plt.plot([2*s+x, 2*s+s/2+x], [0+y, P/2+y], 'r-', lw=1.5)\n plt.plot([2*s+s/2+x, 2*s+x], [P/2+y, P+y], 'r-', lw=1.5)\n\n plt.text(x=x+37, y=y+40, s='1', fontsize=20, color='w')\n\n # Axial 2\n x = 210\n y = 321\n P = 55\n s = P/2/np.cos(np.pi/6)\n plt.plot([s+x, 2*s+x], [0+y, 0+y], 'r-', lw=1.5, label='2- Axial2')\n plt.plot([s+x, 2*s+x], [P+y, P+y], 'r-', lw=1.5)\n plt.plot([s+x, s/2+x], [0+y, P/2+y], 'r-', lw=1.5)\n plt.plot([s/2+x, s+x], [P/2+y, P+y], 'r-', lw=1.5)\n plt.plot([2*s+x, 2*s+s/2+x], [0+y, P/2+y], 'r-', lw=1.5)\n plt.plot([2*s+s/2+x, 2*s+x], [P/2+y, P+y], 'r-', lw=1.5)\n plt.text(x=x+37, y=y+40, s='2', fontsize=20, color='w')\n\n # Axial 3\n x = 262\n y = 291\n P = 55\n s = P/2/np.cos(np.pi/6)\n plt.plot([s+x, 2*s+x], [0+y, 0+y], 'r-', lw=1.5, label='3- Axial3')\n plt.plot([s+x, 2*s+x], [P+y, P+y], 'r-', lw=1.5)\n plt.plot([s+x, s/2+x], [0+y, P/2+y], 'r-', lw=1.5)\n plt.plot([s/2+x, s+x], [P/2+y, P+y], 'r-', lw=1.5)\n plt.plot([2*s+x, 2*s+s/2+x], [0+y, P/2+y], 'r-', lw=1.5)\n plt.plot([2*s+s/2+x, 2*s+x], [P/2+y, P+y], 'r-', lw=1.5)\n\n plt.text(x=x+37, y=y+40, s='3', fontsize=20, color='w')\n\n # Radial 1\n x = 52\n y = 349\n plt.plot([x, 495+x], [y, y], 'r-', lw=1.5, label='4- Radial1')\n plt.plot([x, 495+x], [y, y], 'r-', lw=1.5, label='5- Radial2')\n plt.text(x=x+380, y=y-10, s='4, 5', fontsize=20, color='black')\n\n # Radial 2\n x = 52\n y = 349\n L = 495\n plt.plot([x, L*np.cos(np.pi/6)+x], [y, -L/2+y], 'r-', lw=1.5, label='6- Radial3')\n plt.text(x=350, y=y-200, s='6', rotation=30, fontsize=20, color='black')\n plt.legend(loc='best')\n\n plt.savefig(\"oecd-fullcore-detectors\", dpi=300, bbox_inches=\"tight\")", "def switch_frequency_plot_channel_six(self):\n if self.plot_channel_key_booleans[5]:\n self.plot_channel_key_booleans[5] = False\n self.parent_widget.graph_channel_six_button.setStyleSheet(\n \"background-color:rgb(%d,%d,%d)\" % (255, 255, 255))\n else:\n self.plot_channel_key_booleans[5] = True\n self.parent_widget.graph_channel_six_button.setStyleSheet(\n \"background-color:rgb(%d,%d,%d)\" % (LINE_COLORS[5]))", "def _fp_setup2(self):\n # TODO: right now it's hard to implement this required stage", "def test_draw():\n circ_m = test_QFTn(3)\n print(launch(1024, circ_m))\n fig = circ_m.draw(output='mpl', filename='C:/Users/RaphaelLambert/Pictures/test.png')\n return fig", "def toggle_wireframe(self):\n self.view['wireframe'] = not self.view['wireframe']\n self.update_flags()", "def paint_focal_axes(self):\n GL.glTranslatef(*self.focus) # translate to focus\n self.paint_axes(self.sigma)\n GL.glTranslatef(*-self.focus) # translate back", "def setSurfaceMeshing(state='off',shading=1):\n sdict = {'off':'OFF','on':'ON'}\n val = sdict[state]\n if not shading:\n val = 'ONLY'\n dislin.surmsh(val)", "def switch_frequency_plot_channel_five(self):\n if self.plot_channel_key_booleans[4]:\n self.plot_channel_key_booleans[4] = False\n self.parent_widget.graph_channel_five_button.setStyleSheet(\n \"background-color:rgb(%d,%d,%d)\" % (255, 255, 255))\n else:\n self.plot_channel_key_booleans[4] = True\n self.parent_widget.graph_channel_five_button.setStyleSheet(\n \"background-color:rgb(%d,%d,%d)\" % (LINE_COLORS[4]))", "def flip_faceup(self):\r\n self.faceup = True", "def butter_hpf(highcut, fs, order):\n nyq = 0.5 * fs\n high = highcut / nyq\n b, a = signal.butter(order, high, btype='highpass')\n w, h = signal.freqz(b, a, worN=1000)\n# plt.figure()\n# plt.plot((fs * 0.5 / np.pi) * w, abs(h))\n return b, a", "def notch_filter_raw_plot(data, fs, fc):\n b, a = sp.iirnotch(w0=fc / fs * 2, Q=100)\n w, h = sp.freqz(b, a)\n f = w / np.pi * fs / 2\n plt.figure()\n plt.plot(f, 10 * np.log10(abs(h)))\n plt.xlabel('frequency (Hz)')\n plt.ylabel('Magnitude (dB)')\n plt.title('frequency response of notch filter at 50Hz')\n plt.grid()\n\n data1 = sp.filtfilt(b, a, data)\n return data1", "def test_guider_start_ffsClosed(self):\n self._guider_start(6, 20, 0, 0)", "def waveform_force_trigger(self):\n self.triggerSettings = [\"Forced\"]\n self.waveform_capture(enable=True, sleep=None)", "def visualize(z_in, azimuth=25., elevation=30.,\n thresholds=[0.95, .9, .75, .5, .25, .125], opacities=[1, .9, .7, .5, .2, .1],\n# thresholds=[0.94, .89, .75, .5, .25, .1], opacities=[.9, .8, .7, .5, .2, .1],\n# thresholds=[0.94, .89, .75], opacities=[.99, .7, .2],\n# thresholds=[0.7, .5, .2], opacities=[.95, .5, .2],\n fourier_label = {'f_x':'f_x', 'f_y':'f_y', 'f_t':'f_t'},\n filename=None, do_axis=True, do_grids=False, draw_projections=True,\n colorbar=False, f_N=2., f_tN=2., figsize=figsize, dpi=300, figpath=figpath, **kwargs):\n z = z_in.copy()\n N_X, N_Y, N_frame = z.shape\n fx, fy, ft = get_grids(N_X, N_Y, N_frame)\n\n # Normalize the amplitude.\n z /= z.max()\n\n from vispy import app, scene, use\n try:\n AffineTransform = scene.transforms.AffineTransform\n except:\n AffineTransform = scene.transforms.MatrixTransform\n\n use(app='pyglet', gl='pyopengl2')\n #from vispy.util.transforms import perspective, translate, rotate\n from vispy.color import Color\n transparent = Color(color='black', alpha=0.)\n import colorsys\n canvas = scene.SceneCanvas(size=figsize, bgcolor='white', dpi=dpi)\n view = canvas.central_widget.add_view()\n\n vol_data = np.rollaxis(np.rollaxis(z, 1), 2)\n# volume = scene.visuals.Volume(vol_data, parent=view.scene)#frame)\n center = scene.transforms.STTransform(translate=( -N_X/2, -N_Y/2, -N_frame/2))\n# volume.transform = center\n# volume.cmap = 'blues'\n\n if draw_projections:\n from vispy.color import Colormap\n cm = Colormap([(1.0, 1.0, 1.0, 1.0), 'k'])\n opts = {'parent':view.scene, 'cmap':cm, 'clim':(0., 1.)}\n\n energy_xy = np.rot90(np.max(z, axis=2)[:, ::-1], 3)#[:, ::-1]\n fourier_xy = scene.visuals.Image(np.rot90(energy_xy), **opts)\n tr_xy = AffineTransform()\n tr_xy.rotate(90, (0, 0, 1))\n tr_xy.translate((N_X/2, -N_Y/2, -N_frame/2))\n fourier_xy.transform = tr_xy\n\n energy_xt = np.rot90(np.max(z, axis=1)[:, ::-1], 3)[::-1, ::-1]\n fourier_xt = scene.visuals.Image(energy_xt, **opts)\n tr_xt = AffineTransform()\n tr_xt.rotate(90, (1, 0, 0))\n tr_xt.translate((-N_X/2, N_Y/2, -N_frame/2))\n fourier_xt.transform = tr_xt\n\n energy_yt = np.max(z, axis=0)[:, ::-1]\n fourier_yt = scene.visuals.Image(energy_yt, **opts)\n tr_yt = AffineTransform()\n tr_yt.rotate(90, (0, 1, 0))\n tr_yt.translate((-N_X/2, -N_Y/2, N_frame/2))\n fourier_yt.transform = tr_yt\n\n # Generate iso-surfaces at different energy levels\n surfaces = []\n for i_, (threshold, opacity) in enumerate(zip(thresholds, opacities)):\n surfaces.append(scene.visuals.Isosurface(z, level=threshold,\n# color=Color(np.array(colorsys.hsv_to_rgb(1.*i_/len(thresholds), 1., 1.)), alpha=opacity),\n color=Color(np.array(colorsys.hsv_to_rgb(.66, 1., 1.)), alpha=opacity),\n shading='smooth', parent=view.scene)\n )\n surfaces[-1].transform = center\n\n # Draw a sphere at the origin\n axis = scene.visuals.XYZAxis(parent=view.scene)\n for p in ([1, 1, 1, -1, 1, 1], [1, 1, -1, -1, 1, -1], [1, -1, 1, -1, -1, 1],[1, -1, -1, -1, -1, -1],\n [1, 1, 1, 1, -1, 1], [-1, 1, 1, -1, -1, 1], [1, 1, -1, 1, -1, -1], [-1, 1, -1, -1, -1, -1],\n [1, 1, 1, 1, 1, -1], [-1, 1, 1, -1, 1, -1], [1, -1, 1, 1, -1, -1], [-1, -1, 1, -1, -1, -1]):\n line = scene.visuals.Line(pos=np.array([[p[0]*N_X/2, p[1]*N_Y/2, p[2]*N_frame/2], [p[3]*N_X/2, p[4]*N_Y/2, p[5]*N_frame/2]]), color='black', parent=view.scene)\n\n axisX = scene.visuals.Line(pos=np.array([[0, -N_Y/2, 0], [0, N_Y/2, 0]]), color='red', parent=view.scene)\n axisY = scene.visuals.Line(pos=np.array([[-N_X/2, 0, 0], [N_X/2, 0, 0]]), color='green', parent=view.scene)\n axisZ = scene.visuals.Line(pos=np.array([[0, 0, -N_frame/2], [0, 0, N_frame/2]]), color='blue', parent=view.scene)\n\n if do_axis:\n t = {}\n for text in ['f_x', 'f_y', 'f_t']:\n t[text] = scene.visuals.Text(fourier_label[text], parent=canvas.scene, face='Helvetica', color='black')\n t[text].font_size = 8\n t['f_x'].pos = canvas.size[0] // 3, canvas.size[1] - canvas.size[1] // 8\n t['f_y'].pos = canvas.size[0] - canvas.size[0] // 8, canvas.size[1] - canvas.size[1] // 6\n t['f_t'].pos = canvas.size[0] // 8, canvas.size[1] // 2\n\n cam = scene.TurntableCamera(elevation=elevation, azimuth=azimuth, up='z')\n cam.fov = 48\n cam.scale_factor = N_X * 1.8\n if do_axis: margin = 1.35\n else: margin = 1\n cam.set_range((-N_X/2*margin, N_X/2/margin), (-N_Y/2*margin, N_Y/2/margin), (-N_frame/2*margin, N_frame/2/margin))\n view.camera = cam\n\n render_im = canvas.render()\n app.quit()\n if not(filename is None):\n import vispy.io as io\n io.write_png(filename, render_im)\n else:\n return render_im", "def plotArt(self):\n self.isArt=True\n warr=self.ws.value(self.xarr)\n asfarr=st.interpolate(warr, self.swarr, self.sfarr, left=0.0, right=0.0)\n asfarr=asfarr*self.farr.max()/asfarr.max()\n self.fpcurve,=self.axes.plot(self.xarr,asfarr,linewidth=0.5,linestyle='-',\n marker='None',color='r')", "def plot(self):\n self.fig = plt.figure('black hole')\n self.fig.clf() #clear the graph to avoir superposing data from the same set (can be deactivated if need to superpose)\n self.ax = plt.subplot()\n\n if self.img2 is not None:\n self.ax.imshow(self.img2)\n else:\n print(\"No black hole deformation in the memory, displayed the original image instead.\")\n self.ax.imshow(self.img_debut)\n\n self.fig.canvas.set_window_title('Black hole')\n self.ax.set_title(\"scrool to zoom in or out \\nright click to add an offset in the background \\nleft click to refresh image \\n close the option windows to stop the program\")\n self.fig.canvas.mpl_connect('scroll_event', self.onscroll)\n self.fig.canvas.mpl_connect('button_press_event', self.onclick)\n self.fig.canvas.mpl_connect('axes_leave_event', self.disconnect)\n self.fig.canvas.mpl_connect('axes_enter_event', self.connect)\n\n self.draw()", "def plot5(self):\n\n cond = ((self.ds.freq<32.6) & (self.ds.freq>17))\n freq = self.ds.freq[cond]\n power = self.ds.power[cond]\n\n # the modes for KIC 9205705\n m = pd.read_csv('/home/mxs191/Desktop/MathewSchofield/TRG/GetData/Modes/modes_9205705.csv')\n m1 = [17.65, 20.6, 23.7, 26.9, 30.1] # l=1\n\n plt.rc('font', size=18)\n fig, ax = plt.subplots()\n plt.plot(freq, power, zorder=1, alpha=0.4)\n\n # NOTE: annotate mode angular degrees\n plt.scatter(m['f0'].as_matrix(), np.full(len(m), 150000), c='k', zorder=2, s=80)\n plt.scatter(m['f2'].as_matrix(), np.full(len(m), 130000), c='mediumseagreen', zorder=2, s=80, marker='^')\n plt.scatter(m1, np.full(len(m1), 140000), c='grey', zorder=2, s=80, marker='v')\n\n # NOTE: plot envelope\n numax = info['numax'].as_matrix() # mu Hz\n env_width = 0.66 * numax**0.88\n plt.plot(freq, 40004*np.exp( -( (freq-24.7)**2 / (2*7.**2) ) ), c='k', linestyle='--')\n\n # NOTE: annotate envelope\n style = dict(size=16, color='k')\n ax.text(24.1, 49167, r\"$\\nu_{\\rm max}$\", color='k', size=18)\n ax.text(24.1, 20994, r\"$\\Gamma_{\\rm env}$\", color='k', size=18)\n ax.text(23, 162944, r\"$\\Delta \\nu$\", **style)\n plt.annotate(s='', xy=(25.3, 158610), xytext=(21.91, 158610),\n arrowprops=dict(arrowstyle='<->')) # dnu\n plt.annotate(s='', xy=((24.7-env_width/2.), 15861), xytext=((24.7+env_width/2.), 15861),\n arrowprops=dict(arrowstyle='<->')) # env width\n\n ax.set_xlabel(r'Frequency ($\\rm \\mu Hz$)')\n ax.set_ylabel(r'PSD ($\\rm ppm^{2} \\, \\mu Hz^{-1}$)')\n plt.xlim(17, 32.6)\n plt.ylim(17, 195181)\n plt.tight_layout()\n plt.show()\n fig.savefig(os.getcwd() + '/DetTest1_plots/Plot5_ps_' + str(self.ds.epic) + '.pdf')", "def front_wheel_from_axis():", "def wfsInit():\n wfs.init()\n wfs.setExposure()", "def app(ngr=100,c2a=1.6235):\n import matplotlib.pyplot as plt\n\n ## small donuts\n # plt.gcf().clf()\n grs = main(mu=0,ngrains=ngr,tilt_1=30.,sigma=15)\n plt.gcf().savefig('small_doughnut.pdf',bbox_inches='tight')\n plt.gcf().clf()\n f = gen_file(lab='sm_doughnut',ngr=ngr)\n write_gr(f,grs)\n\n ## Big donuts\n grs = main(mu=0,ngrains=ngr,tilt_1=50.,sigma=15)\n plt.gcf().savefig('big_doughnut.pdf',bbox_inches='tight')\n plt.gcf().clf()\n f = gen_file(lab='big_doughnut',ngr=ngr)\n write_gr(f,grs)\n\n ## twin tilts (30).\n gr1=main(mu=0,ngrains=ngr/2,tilts_about_ax1=30.,sigma=45)\n plt.gcf().clf()\n gr2=main(mu=0,ngrains=ngr/2,tilts_about_ax1=-30.,sigma=45)\n plt.gcf().clf()\n grs =[]\n for i in range(len(gr1)):\n grs.append(gr1[i])\n grs.append(gr2[i])\n grs=np.array(grs)\n mypf=upf.polefigure(grains=grs,csym='hexag',cdim=[1,1,c2a])\n mypf.pf_new(poles=[[0,0,0,1],[1,0,-1,0]],cmap='jet',ix='TD',iy='RD')\n plt.gcf().savefig('t30.pdf',bbox_inches='tight')\n f = gen_file(lab='dbl_lets_30',ngr=ngr)\n write_gr(f,grs)\n\n ## twin tilts (50).\n gr1=main(mu=0,ngrains=ngr/2,tilts_about_ax1=50.,sigma=45)\n plt.gcf().clf()\n gr2=main(mu=0,ngrains=ngr/2,tilts_about_ax1=-50.,sigma=45)\n plt.gcf().clf()\n gr =[]\n for i in range(len(gr1)):\n gr.append(gr1[i])\n gr.append(gr2[i])\n gr=np.array(gr)\n mypf=upf.polefigure(grains=gr,csym='hexag',cdim=[1,1,c2a])\n mypf.pf_new(poles=[[0,0,0,1],[1,0,-1,0]],cmap='jet',ix='TD',iy='RD')\n plt.gcf().savefig('t50.pdf',bbox_inches='tight')\n plt.gcf().clf()\n f = gen_file(lab='dbl_lets_50',ngr=ngr)\n write_gr(f,gr)", "def displayFiducial(self):\n #obsolete?\n profbox()\n modelNodes = slicer.util.getNodes('vtkMRMLModelNode*')\n for modelNode in modelNodes.values():\n displayNode = modelNode.GetDisplayNode()\n if modelNode.GetAttribute(\"segmented\") == \"1\" and modelNode.GetAttribute(\"nth\")!=None:\n if 1:\n i = int(modelNode.GetAttribute(\"nth\"))\n if self.fiducialnode[i] == 0: \n polyData = modelNode.GetPolyData()\n nb = int(polyData.GetNumberOfPoints()-1)\n coord = [0,0,0]\n if nb>10:\n self.fiducialnode[i] = slicer.vtkMRMLAnnotationFiducialNode()\n polyData.GetPoint(nb,coord) \n self.fiducialnode[i].SetName(self.option[i])\n self.fiducialnode[i].SetFiducialCoordinates(coord) \n self.fiducialnode[i].Initialize(slicer.mrmlScene)\n self.fiducialnode[i].SetLocked(1)\n self.fiducialnode[i].SetSelectable(0)\n fidDN = self.fiducialnode[i].GetDisplayNode()\n fidDN.SetColor(modelNode.GetDisplayNode().GetColor())\n fidDN.SetGlyphScale(0)\n fidTN = self.fiducialnode[i].GetAnnotationTextDisplayNode()\n fidTN.SetTextScale(3)\n fidTN.SetColor(modelNode.GetDisplayNode().GetColor())\n \n self.fiducialnode[i].SetDisplayVisibility(modelNode.GetDisplayNode().GetVisibility())\n else: \n if modelNode.GetDisplayNode().GetVisibility():\n self.fiducialnode[i].SetDisplayVisibility(abs(self.fiducialnode[i].GetDisplayVisibility()-1))\n if self.fiducialnode[i].GetDisplayVisibility()==1:\n self.displayFiducialButton.text = \"Hide Labels on Needles\"\n else:\n self.displayFiducialButton.text = \"Display Labels on Needles\"", "def set_cartesian_velo_continuous(self, on_off):\r\n return self._arm.set_cartesian_velo_continuous(on_off)", "def autoExposureChk(self, state):\n if state == Qt.Checked and self.kinect.kinectConnected == True:\n self.kinect.toggleExposure(True)\n else:\n self.kinect.toggleExposure(False)", "def show_filters(self):\n w_mat = np.transpose(self.sess.run(self.W_fc1))\n\n plt.figure(figsize=(10,10), facecolor='w', edgecolor='w')\n plot_positions = [(0,0),(0,1),(1,0),(1,1)]\n for ch in range(self.n_input_channels):\n grid,_ = ia.image_grid_RGB( w_mat,\n n_channels=self.n_input_channels,\n image_size=(self.y_res,self.x_res), n_x=6, n_y=6,\n channel_order=(ch,ch,ch), amplitude_scaling=(1,1,1),\n line_color=1, auto_scale=True, return_borders=False )\n colormax = np.abs(grid).max()\n with sns.axes_style(\"white\"):\n ax = plt.subplot2grid( (2,2), plot_positions[ch] )\n ax.imshow( grid[:,:,0], interpolation='nearest',\n cmap=plt.get_cmap('seismic'),\n clim=(-1*colormax,colormax) )\n ax.set_title(\"Hidden units, channel {}\".format(ch))\n plt.axis('tight')\n plt.axis('off')\n plt.tight_layout()", "def fbank(signal,samplerate=16000,winlen=0.025,winstep=0.01,\n nfilt=26,nfft=512,lowfreq=0,highfreq=None,preemph=0.97): \n highfreq= highfreq or samplerate/2\n print \"preemph %s\"%(preemph)\n signal = sigproc.preemphasis(signal,preemph)\n frames = sigproc.framesig(signal, winlen*samplerate, winstep*samplerate)\n matchframes(frames[0], frames[1])\n pspec = sigproc.powspec(frames,nfft)\n energy = pylab.sum(pspec,1) # this stores the total energy in each frame\n energy = pylab.where(energy == 0, pylab.finfo(float).eps, energy) # if energy is zero, we get problems with log\n fb = get_filterbanks(nfilt, nfft, samplerate, lowfreq, highfreq)\n print \"len(fb) %s\"%(len(fb))\n colour = \"k-\"\n for i in range(len(fb)):\n if colour == \"k-\":\n colour = \"r-\"\n else:\n colour = \"k-\"\n startedplot = False\n midpoint = 0\n for j in range(len(fb[i])):\n if fb[i][j] > 0:\n if startedplot == False:\n startedplot = j\n if j > 0:\n pylab.plot([j-1, j], [fb[i][j-1], fb[i][j]], colour)\n if fb[i][j] == 1.0:\n midpoint = j\n else:\n if not startedplot == False:\n pylab.plot([j-1, j], [fb[i][j-1], 0], colour)\n try:\n print \"slope to midpoint %.3f, slope from midpoint %.3f\"%(1.0/float(midpoint-startedplot), 1.0/float(midpoint-j+1))\n except:\n pass\n break\n pylab.show()\n feat = pylab.dot(pspec, fb.T) # compute the filterbank energies\n feat = pylab.where(feat == 0, pylab.finfo(float).eps, feat) # if feat is zero, we get problems with log\n return feat, energy", "def toggle_fore_mod(self, checked):\n for tile in self.tiles:\n tile.toggle_fore_mod(checked)", "def main(argv=None):\n\n if argv is None:\n argv = sys.argv\n\n parser = E.OptionParser(\n version=\"%prog version: $Id: gff2plot.py 2781 2009-09-10 11:33:14Z andreas $\", usage=globals()[\"__doc__\"])\n\n parser.add_option(\"-f\", \"--file\", dest=\"filenames\", type=\"string\",\n help=\"files[s] to take data from,stdin = -.\")\n parser.add_option(\"\", \"--symbols\", dest=\"symbols\", type=\"string\",\n help=\"symbols to use for each histogram [steps|...].\")\n parser.add_option(\"--slide-show\", dest=\"slide_show\", type=\"choice\",\n choices=(\"first\", \"all\", \"sequence\"),\n help=\"do a slide show - otherwise, write image to file.\")\n parser.add_option(\"--config\", dest=\"filename_config\", type=\"string\",\n help=\"filename of track configuration file.\")\n parser.add_option(\"--dpi\", dest=\"dpi\", type=\"int\",\n help=\"dpi for hardcopy output.\")\n parser.add_option(\"--window-size\", dest=\"window_size\", type=\"int\",\n help=\"window-size.\")\n parser.add_option(\"--output-filename-pattern\", dest=\"output_pattern_image\", type=\"string\",\n help=\"output pattern for images. Should contain a '%(contig)s' pattern .\")\n parser.add_option(\"--global-colours\", dest=\"global_colours\", action=\"store_true\",\n help=\"cycle through colours for all tracks.\")\n\n parser.set_defaults(\n filenames=None,\n symbols=\"k-,b-,r-,c-,m-,y-,g-\",\n output_pattern_image=\"%(contig)s.png\",\n slide_show=None,\n window_size=None,\n filename_config=None,\n dpi=None,\n global_colours=False,\n )\n\n (options, args) = E.Start(parser)\n options.symbols = options.symbols.split(\",\")\n\n #--------------------------------------------------------\n # collect all the data\n # list of data per source and contig\n tracks = {}\n extra_features = {}\n\n if options.filenames:\n options.filenames = options.filenames.split(\",\")\n\n if len(args) > 0:\n options.filenames = args\n\n if options.filenames:\n\n for filename in options.filenames:\n\n if filename == \"-\":\n infile = sys.stdin\n else:\n infile = IOTools.openFile(filename)\n\n data = readData(infile)\n\n if filename != \"-\":\n infile.close()\n\n track[filename] = Track(title=filename, data=data)\n\n elif options.filename_config:\n # get track information from config file\n config = ConfigParser.ConfigParser()\n config.read(os.path.expanduser(options.filename_config))\n\n # first extract special sections\n for section in config.sections():\n if section == \"vlines\":\n infile = IOTools.openFile(config.get(section, \"filename\"), \"r\")\n data = readData(infile)\n infile.close()\n extra_features[section] = Track(title=section,\n data=data,\n config=config)\n config.remove_section(section)\n elif section in (\"figure\", \"legend\"):\n extra_features[section] = Track(title=section,\n data=None,\n config=config)\n config.remove_section(section)\n n = 0\n for section in config.sections():\n\n if config.has_option(section, \"filename\"):\n infile = IOTools.openFile(config.get(section, \"filename\"), \"r\")\n data = readData(infile)\n infile.close()\n\n tracks[section] = Track(title=section,\n data=data,\n priority=n,\n config=config)\n\n elif config.has_option(section, \"tracks\"):\n subtracks = config.get(section, \"tracks\")\n subtracks = map(lambda x: x.strip(), subtracks.split(\",\"))\n\n tracks[section] = Track(title=section,\n data=None,\n config=config,\n priority=n,\n subtracks=subtracks)\n n += 1\n\n # compile set of all contigs\n contigs = set()\n for track in tracks.values():\n if track.mData:\n contigs = contigs.union(track.mData.keys())\n\n # re-arrange tracks and subtracks\n tracks = layoutTracks(tracks)\n\n nplots = 0\n figures = []\n legend = None\n for contig in contigs:\n figure, l = plotContig(contig, tracks, options,\n plot_legend=legend is None,\n extra_features=extra_features)\n figures.append(figure)\n if l:\n legend = l\n\n if options.slide_show:\n if options.slide_show == \"first\":\n pylab.show()\n elif options.slide_show == \"all\":\n pylab.show()\n elif options.slide_show == \"sequence\":\n pylab.show()\n else:\n\n extra_args = {}\n if options.dpi:\n extra_args['dpi'] = options.dpi\n\n for contig, figure in zip(contigs, figures):\n params = {'contig': contig}\n filename = options.output_pattern_image % params\n E.info(\"# creating image: %s\" % filename)\n figure.savefig(os.path.expanduser(filename), **extra_args)\n if legend:\n params = {'contig': \"legend\"}\n filename = options.output_pattern_image % params\n E.info(\"creating image: %s\" % filename)\n legend.savefig(os.path.expanduser(filename), **extra_args)\n\n E.info(\"ninput=%i, ncontigs=%i, nplots=%i\" %\n (len(tracks), nplots, len(contigs)))\n\n E.Stop()", "def set_xfade(self, track, xclip, ident, args):\n if track != self.song().master_track:\n if args in XFADE_STATES:\n track.mixer_device.crossfade_assign = XFADE_STATES[args]\n else:\n if track.mixer_device.crossfade_assign == 2:\n track.mixer_device.crossfade_assign = 0\n else:\n track.mixer_device.crossfade_assign += 1", "def cygx3IndFlux(self):\n # --------------------------------------------------------------------------------------------- #\n # Read data\n fitsNnam = os.path.join(self.workpath, 'LCresults.fits')\n lcTab = Table.read(fitsNnam)\n detect = lcTab['ts'] >= self.tsmin\n lcTab = lcTab[detect] \n\n ind08 = (lcTab['mjd'] > 54700) & (lcTab['mjd'] < 54900) \n flux08 = lcTab['flux'][ind08]\n fluxerr08 = lcTab['fluxerr'][ind08]\n index08 = lcTab['index'][ind08]\n indexerr08 = lcTab['indexerr'][ind08]\n\n ind09 = (lcTab['mjd'] > 54900) & (lcTab['mjd'] < 55100) \n flux09 = lcTab['flux'][ind09]\n fluxerr09 = lcTab['fluxerr'][ind09]\n index09 = lcTab['index'][ind09]\n indexerr09 = lcTab['indexerr'][ind09]\n\n scale = 10**int(np.floor(np.log10( np.mean( np.concatenate( (flux08, flux09), axis=0) ) ))) \n\n # --------------------------------------------------------------------------------------------- #\n # Plot\n indplt = FermiPlot(savepath='', xsize=8.5, ysize=6)\n indplt.figname = os.path.join(self.workpath, 'IndvsFlux.pdf')\n indplt.xlabel = r'Flux ($10^{%d}$ ph\\,cm$^{-2}$\\,s$^{-1}$)'%(int(np.log10(scale)))\n indplt.ylabel = r'Index'\n indplt.mksize = 2\n indplt.color = self.lblue\n indplt.label = r'2008'\n indplt.plot(x=flux08/scale, xerr=fluxerr08/scale, y=index08, yerr=indexerr08)\n indplt.color = self.loran\n indplt.label = r'2009'\n indplt.plot(x=flux09/scale, xerr=fluxerr09/scale, y=index09, yerr=indexerr09)\n indplt.save()\n\n print(\"\\t=== Figure '{}' created ===\".format(indplt.figname)) \n return", "def imagetrack(self, trackdata, mode='split', i=0, pol='i', size=48000, res=500, clean=True, gain=0.01, tol=1e-4, newbeam=0, save=0, show=0):\n\n # reduce pol axis\n if ((pol == 'i') | (pol == 'I')):\n if len(trackdata) == 2:\n print 'Making Stokes I image as mean of two pols...'\n else:\n print 'Making Stokes I image as mean over all pols. Hope that\\'s ok...'\n td = trackdata.mean(axis=0)\n elif isinstance(pol, types.IntType):\n print 'Making image of pol %d' % (pol)\n td = trackdata[pol]\n\n # apply w phase rotation. generally this is done externally (e.g., by data writing software) and is not needed here.\n# wrot = lambda w: n.exp(-2j*n.pi*n.outer(w, self.freq/self.freq_orig[0]))\n# td = td*wrot(self.w[i])\n\n # define handling of freq axis\n if mode == 'split':\n td = td.flatten()\n uu = n.outer(self.u[i], self.freq/self.freq_orig[0]).flatten()\n vv = n.outer(self.v[i], self.freq/self.freq_orig[0]).flatten()\n ww = n.outer(self.w[i], self.freq/self.freq_orig[0]).flatten()\n elif mode == 'mean':\n td = td.mean(axis=1)\n uu = self.u[i]\n vv = self.v[i]\n ww = self.w[i]\n else:\n print 'Mode must be \\'mean\\' or \\'split\\'.'\n return 0\n\n fov = n.degrees(1./res)*3600. # field of view in arcseconds\n p.clf()\n\n # make image\n ai = aipy.img.Img(size=size, res=res)\n uvw_new, td_new = ai.append_hermitian( (uu, vv, ww), td)\n ai.put(uvw_new, td_new)\n image = ai.image(center = (size/res/2, size/res/2))\n image_final = image\n\n # optionally clean image\n if clean:\n print 'Cleaning image...'\n beam = ai.bm_image()\n beamgain = aipy.img.beam_gain(beam[0])\n (clean, dd) = aipy.deconv.clean(image, beam[0], verbose=True, gain=gain, tol=tol)\n\n try:\n import gaussfitter\n if (len(self.beam_params) == 1) | (newbeam == 1) :\n print 'Restoring image with new fit to beam shape...'\n beam_centered = ai.bm_image(center=(size/res/2, size/res/2))\n peak = n.where(beam_centered[0] >= 0.1*beam_centered[0].max(), beam_centered[0], 0.)\n self.beam_params = gaussfitter.gaussfit(peak)\n kernel = n.roll(n.roll(gaussfitter.twodgaussian(self.beam_params, shape=n.shape(beam[0])), size/res/2, axis=0), size/res/2, axis=1) # fit to beam at center, then roll to corners for later convolution step\n except ImportError:\n print 'Restoring image with peak of beam...'\n kernel = n.where(beam[0] >= 0.4*beam[0].max(), beam[0], 0.) # take only peak (gaussian part) pixels of beam image\n\n restored = aipy.img.convolve2d(clean, kernel)\n image_restored = (restored + dd['res']).real/beamgain\n image_final = image_restored\n\n if show or save:\n ax = p.axes()\n ax.set_position([0.2,0.2,0.7,0.7])\n# im = p.imshow(image_final, aspect='auto', origin='upper', interpolation='nearest', extent=[-fov/2, fov/2, -fov/2, fov/2])\n im = p.imshow(image_final, aspect='auto', origin='lower', interpolation='nearest', extent=[fov/2, -fov/2, -fov/2, fov/2])\n cb = p.colorbar(im)\n cb.set_label('Flux Density (Jy)',fontsize=12,fontweight=\"bold\")\n ax.spines['top'].set_visible(False)\n ax.spines['right'].set_visible(False)\n ax.spines['bottom'].set_position(('outward', 20))\n ax.spines['left'].set_position(('outward', 30))\n ax.yaxis.set_ticks_position('left')\n ax.xaxis.set_ticks_position('bottom')\n p.xlabel('RA/l Offset (arcsec)',fontsize=12,fontweight=\"bold\")\n p.ylabel('Dec/m Offset (arcsec)',fontsize=12,fontweight=\"bold\")\n\n peak = n.where(n.max(image_final) == image_final)\n print 'Image peak of %e at (%d,%d)' % (n.max(image_final), peak[0][0], peak[1][0])\n print 'Peak/RMS = %e' % (image_final.max()/image_final[n.where(image_final <= 0.9*image_final.max())].std()) # estimate std without image peak\n\n if save:\n if save == 1:\n savename = self.file.split('.')[:-1]\n savename.append(str(self.nskip/self.nbl) + '_im.png')\n savename = string.join(savename,'.')\n elif isinstance(save, string):\n savename = save\n print 'Saving file as ', savename\n p.savefig(self.pathout+savename)\n\n return image_final", "def on_floated(self, event):\n if not self._guard & FLOATED_GUARD:\n self._guard |= FLOATED_GUARD\n try:\n self.declaration.floating = True\n finally:\n self._guard &= ~FLOATED_GUARD", "def draw_flower_bed():\n turtle.up()\n turtle.left(180)\n turtle.forward(200)\n turtle.right(180)\n turtle.down()\n for x in range(3):\n draw_flower_advanced()", "def _cutoff(xdata, ydata, btype, fs, ff):\r\n try:\r\n# print ff\r\n if ff != None:\r\n nPts = int(1./(((xdata.max()-xdata.min())/xdata.shape[0])*(ff/10.)))\r\n else:\r\n nPts = 0\r\n if nPts%2 == 0:\r\n nPts = nPts + 1\r\n if nPts < xdata.shape[0]:\r\n nPts = xdata.shape[0]\r\n# print nPts\r\n window = np.hanning(ydata.shape[0])\r\n freq = FourierFrequency(xdata, nPts)\r\n index = np.argsort(freq)\r\n tdf = FourierTransform(ydata*window, nPts)\r\n tdf = abs(tdf)\r\n pp = _maxima(tdf[index], freq[index], lookahead = 1)\r\n# mm = _minima(tdf[index], freq[index], lookahead=1)\r\n pp, hh = np.array(np.array(pp).T[0]), np.array(np.array(pp).T[1])\r\n# mm = np.array(np.array(mm).T[0])#, np.array(np.array(mm).T[1])\r\n ind = np.where(pp == min(abs(pp)))[0][0]\r\n ind2 = np.where(hh == max(hh[(ind+1):]))[0][0]\r\n for u, i in enumerate(freq):\r\n if i > abs(pp[ind2])*1.5 or i < -abs(pp[ind2])*1.5 or (i < abs(pp[ind2])/2. and i > -abs(pp[ind2])/2.) or (tdf[u] > hh[ind2]*1.05): #(abs(i) < abs(mm[indmin])) or \r\n tdf[u] = 0.\r\n def lor2(x, A0, x0, gamma0):\r\n return A0*(1/np.pi)*(gamma0/2)/((x-x0)**2+(gamma0/2)**2)+A0*(1/np.pi)*(gamma0/2)/((x+x0)**2+(gamma0/2)**2)\r\n lmod2 = lmf.Model(lor2)\r\n lmod2.make_params()\r\n lmod2.set_param_hint('A0', value=max(tdf), min=max(tdf)/1000.)\r\n lmod2.set_param_hint('x0', value=abs(pp[ind2]), min=0.)\r\n lmod2.set_param_hint('gamma0', value=1., min=0.)\r\n result2 = lmod2.fit(tdf[index], x=freq[index])\r\n# print result2.values.get('x0'), result2.values.get('gamma0')\r\n if btype=='high':\r\n if result2.values.get('x0')-result2.values.get('gamma0') > 0.:\r\n# print \"frequency: \", result2.values.get('x0')-result2.values.get('gamma0')\r\n if hh[ind2] != max(hh[(ind+1):]):\r\n print \"False\", \" maximum\", \"\\n\", \"\\n\", \"\\n\"\r\n return result2.values.get('x0')-result2.values.get('gamma0')\r\n else:\r\n# print \"failed: 0\"\r\n return 0.\r\n elif btype=='low':\r\n return result2.values.get('x0')+result2.values.get('gamma0')\r\n except Exception:\r\n pass\r\n finally:\r\n pass", "def toggle_dockablefigures_mode(self, checked):\r\n CONF.set('figure', 'dockable', checked)", "def _set_draw_mode(draw_mode):\n###############################################################################\n global _draw_mode\n _draw_mode = draw_mode", "def no_gradient_fusion():\n pass", "def driver(datacube,xstart=0,ystart=0,xr=None,yr=None,xsgn=1,ysgn=1,outfile=None,\n plotxr=None,trackplot=False,noplot=True,silent=False,\n noback=False,backret=True,wander=False,startfile=None,\n savestep=10000,clobber=False):\n\n global BTRACK, GSTRUC, NPIX\n\n savetime = time.time()\n endflag = False\n count = 0\n tstart = time.time() \n\n\n # Load the cube\n if type(datacube) is str:\n datacubefile = datacube\n print('Loading '+datacubefile)\n datacube = Cube.read(datacubefile)\n \n # Setting parameters\n if xr is None:\n xr = [0,datacube.nx-1]\n if yr is None:\n yr = [0,datacube.ny-1] \n if xstart is None:\n if xsgn == 1: \n xstart = xr[0] \n else: \n xstart = xr[1]\n if ystart is None:\n if ysgn == 1 : \n ystart = yr[0] \n else: \n ystart = yr[1]\n if wander:\n backret = False\n if noback:\n backret = False\n\n # No mode selected, using default mode (backret) \n if (backret == False) and (noback == False) and (wander == False): \n print('' )\n print('!!! WARNING !!! NO MODE SELECTED -> USING DEFAULT (BACKRET) MODE' )\n print('')\n sleep(3) \n backret = True\n\n\n # Checking the file\n if outfile is None:\n logtime = datetime.now().strftime(\"%Y%m%d%H%M%S\") \n outfile = 'gaussdecomp_'+logtime+'.fits' \n\n # STARTING WITH BTRACK, RESTORING THE LAST STATE\n if startfile is not None:\n print('Starting with last state of input file '+str(startfile))\n with open(startfile,'rb') as f: \n BTRACK = pickle.load(f)\n GSTRUC = pickle.load(f)\n count = BTRACK['count']\n x = BTRACK['x'][count-1]\n y = BTRACK['y'][count-1]\n track = BTRACK['data'][count-1]\n back = track['back']\n redo = track['redo']\n redo_fail = track['redo_fail']\n skip = False\n count += 1\n xstart = x\n ystart = y\n lastx = x \n lasty = y\n # STARTING TRACKING FRESH\n else:\n initialize_tracking(wander,yr)\n redo_fail = False \n redo = False\n back = False\n lastx = None\n lasty = None\n \n # Printing out the inputs\n if silent==False:\n print(' RUNNING GAUSSIAN ANALYSIS WITH THE FOLLOWING PARAMETERS')\n print('-----------------------------------------------------------')\n print(' STARTING POSITION = (%d,%d)' % (xstart,ystart))\n print(' X RANGE = [%d,%d]' % (xr[0],xr[1]))\n print(' Y RANGE = [%d,%d]' % (yr[0],yr[1]))\n print(' X DIRECTION = '+str(xsgn))\n print(' Y DIRECTION = '+str(ysgn))\n print(' OUTFILE = '+outfile)\n print('-----------------------------------------------------------')\n if (backret == 1) : \n print(' USING (BACKRET) MODE')\n if (noback == 1) : \n print(' USING (NOBACK) MODE')\n if (wander == 1) : \n print(' USING (WANDER) MODE')\n print('-----------------------------------------------------------')\n print('')\n \n # Initializing some parameters \n p0 = False\n p1 = False\n p2 = False\n p3 = False\n p4 = False \n # Where are we starting \n x = xstart \n y = ystart \n\n track_dict = {'count':None,'x':None,'y':None,'rms':None,'noise':None,'par':None,\n 'guesspar':None,'guessx':None,'guessy':None,'back':None,'redo':None,\n 'redo_fail':None,'skip':None,'lastx':None,'lasty':None,'npix':None}\n gstruc_dict = {'x':None,'y':None,'rms':None,'noise':None,'par':None,\n 'sigpar':None,'lon':None,'lat':None,'npix':None}\n\n \n \n # STARTING THE LARGE LOOP \n while (endflag == False): \n t00 = time.time() \n skip,guessx,guessy,guesspar = False,None,None,None \n tstr = {'par':None}\n tstr1 = {'par':None}\n tstr2 = {'par':None} \n\n # FIGURE OUT THE NEXT MOVE \n #------------------------- \n if (count > 0):\n lastx,lasty = x,y\n out = nextmove(x,y,xr,yr,count,xsgn,ysgn,backret=backret,noback=noback,\n wander=wander,redo=redo,back=back,redo_fail=redo_fail,silent=silent)\n x,y,guessx,guessy,guesspar,back,redo,skip,endflag = out\n \n # The end\n if endflag:\n break\n \n # Starting the tracking structure, bad until proven good\n track = track_dict.copy()\n track['count'] = count \n track['x'] = x \n track['y'] = y \n track['lastx'] = lastx \n track['lasty'] = lasty \n track['guesspar'] = guesspar \n track['guessx'] = guessx \n track['guessy'] = guessy \n track['back'] = back \n track['redo'] = redo \n track['skip'] = skip \n\n # Minimal structure, in case we skip\n tstr = {'x':x,'y':y,'rms':np.inf,'noise':None,'par':None,\n 'sigpar':None,'lon':None,'lat':None}\n \n # Some bug checking \n if x is None: \n import pdb; pdb.set_trace() \n if (x == lastx) and (y == lasty): \n import pdb; pdb.set_trace() \n \n if skip:\n if silent==False:\n print('SKIP')\n \n # FITTING THE SPECTRUM, UNLESS WE'RE SKIPPING IT \n #------------------------------------------------ \n if skip == False: \n t0 = time.time() \n \n # Initial Printing\n if silent==False:\n print('Fitting Gaussians to the HI spectrum at (%d,%d)' % (x,y))\n strout = ''\n if redo:\n strout = strout+'REDO '\n if back:\n strout = strout+'BACK'\n if back is False:\n strout = strout+'FORWARD' \n print(strout) \n \n # Getting the HI spectrum\n spec = datacube(x,y) # Get the new spectrum\n # No good spectrum \n if spec is None or np.sum(spec.flux)==0:\n if silent==False:\n print('No spectrum to fit')\n skip = True\n count += 1\n btrack_add(track)\n continue\n\n lon,lat = datacube.coords(x,y)\n noise = spec.noise\n npts = spec.n\n sigma = np.ones(npts,float)*noise\n NPIX = npts\n \n # Zero-velocity region INCLUDED\n #==================================== \n if np.min(spec.vel) < 0:\n # GETTIING THE VELOCITY RANGE around the zero-velocity MW peak\n if silent==False:\n print('Zero-velocity region INCLUDED. Fitting it separately')\n smspec = dln.savgol(spec.flux,21,2) \n dum,vindcen = dln.closest(spec.vel,0)\n \n # Finding the vel. low point \n lflag = 0 \n i = vindcen\n lo = 0\n while (lflag == 0): \n if smspec[i] <= noise: \n lo = i \n if smspec[i] <= noise: \n lflag = 1\n i -= 1 \n if i < 0: \n lflag = 1 \n lo = np.maximum(0,(lo-20))\n \n # Finding the vel. high point \n hflag = 0 \n i = vindcen\n hi = npts-1\n while (hflag == 0): \n if smspec[i] <= noise : \n hi = i \n if smspec[i] <= noise : \n hflag = 1 \n i += 1 \n if i > npts-1: \n hflag = 1 \n hi = np.minimum((npts-1),(hi+20))\n \n vmin = spec.vel[lo] \n vmax = spec.vel[hi] \n \n # RUNNING GAUSSFITTER ON ZERO VELOCITY REGION, WITH GUESS \n v0results = fitter.gaussfitter(spec,vmin=vmin,vmax=vmax,initpar=guesspar,silent=True,noplot=True) \n \n # FIT WITH NO GUESS (if first time and previous fit above with guess) \n tp0,tres0 = gfind(x,y,xr=xr,yr=yr) \n if (tp0 == 0) and (guesspar is not None):\n v0results_noguess = fitter.gaussfitter(spec,vmin=vmin,vmax=vmax,silent=True,noplot=True)\n b,dbic = gbetter(v0results,v0results_noguess)\n # The fit without the guess is better \n if (dbic>0):\n v0results = v0results_noguess.copy()\n \n # ADDING THE BEST RESULTS TO THE STRUCTURE, TSTR1\n if v0results['par'] is not None:\n ngauss = len(v0results['par'])//3\n tstr1 = gstruc_dict.copy()\n for n in ['par','sigpar','rms','npix']:\n tstr1[n] = v0results[n] \n tstr1['x'] = x \n tstr1['y'] = y\n tstr1['noise'] = spec.noise \n tstr1['lon'] = lon\n tstr1['lat'] = lat\n else:\n tstr1 = {'par':None}\n \n # REMOVING ZERO-VELOCITY parameters and spectrum\n guesspar2 = None\n inspec = spec.copy()\n if v0results['par'] is not None:\n th = utils.gfunc(spec.vel,*v0results['par'])\n inspec = spec.copy()\n inspec.flux -= th\n npts = spec.n\n if guesspar is not None:\n guesspar2 = np.array([],float)\n inpar1 = np.copy(guesspar)\n inpar2 = np.copy(guesspar)\n inpar1 = utils.gremove(inpar1,spec.vel[0:lo],spec.flux[0:lo],noise)\n if inpar1 is not None:\n guesspar2 = np.hstack((guesspar2,inpar1))\n inpar2 = utils.gremove(inpar2,spec.vel[hi:npts],spec.flux[hi:npts],noise)\n if inpar2 is not None:\n guesspar2 = np.hstack((guesspar2,inpar2))\n if len(guesspar2)==0:\n guesspar2 = None\n \n \n # RUNNING GAUSSFITTER ON EVERYTHING WITHOUT THE ZERO-VELOCITY REGION, WITH GUESS \n results = fitter.gaussfitter(inspec,initpar=guesspar2,noplot=True,silent=True)\n \n \n # FIT WITH NO GUESS (if first time and previous fit above with guess) \n if (tp0 == 0) and (guesspar is not None):\n results_noguess = fitter.gaussfitter(inspec,silent=True,noplot=True) \n b,dbic34 = gbetter(results,results_noguess)\n # The fit without the guess is better \n if (b == 1):\n results = results_noguess.copy()\n \n # ADDING THE RESULTS TO THE STRUCTURE, TSTR2\n if results['par'] is not None:\n ngauss = len(results['par'])//3 \n tstr2 = gstruc_dict.copy()\n for n in ['par','sigpar','rms','npix']:\n tstr2[n] = results[n] \n tstr2['x'] = x \n tstr2['y'] = y\n tstr2['noise'] = spec.noise\n tstr2['lon'] = lon \n tstr2['lat'] = lat\n else:\n tstr2 = {'par':None}\n \n # ADDING THE STRUCTURES TOGETHER, TSTR = [TSTR1,TSTR2]\n if tstr1['par'] is not None and tstr2['par'] is not None:\n tstr = tstr1.copy()\n tstr['par'] = np.hstack((tstr1['par'],tstr2['par']))\n tstr['sigpar'] = np.hstack((tstr1['sigpar'],tstr2['sigpar']))\n tstr['rms'] = utils.computerms(spec.vel,spec.flux,tstr['par'])\n if tstr1['par'] is not None and tstr2 is None:\n tstr = tstr1.copy()\n if tstr1['par'] is None and tstr2['par'] is not None:\n tstr = tstr2.copy()\n if tstr1['par'] is None and tstr2['par'] is None: # no gaussians\n tstr = gstruc_dict.copy()\n tstr['x'] = x \n tstr['y'] = y \n tstr['lon'] = lon \n tstr['lat'] = lat \n tstr['rms'] = np.inf\n tstr['noise'] = spec.noise \n tstr['npix'] = len(spec.flux)\n \n \n # Does NOT cover zero-velocity region\n #====================================\n else:\n if silent==False:\n print('Zero-velocity NOT covered')\n # RUNNING GAUSSFITTER ON EVERYTHING WITH GUESS \n results = fitter.gaussfitter(spec,initpar=guesspar,noplot=True,silent=True) \n \n # FIT WITH NO GUESS (if first time and previous fit above with guess)\n tp0,res0 = gfind(x,y,xr=xr,yr=yr)\n results2 = None\n if (tp0 == 0) and (guesspar is not None):\n results2 = fitter.gaussfitter(spec,silent=True,noplot=True) \n b,dbic = gbetter(results,results2)\n # The fit without the guess is better \n if (dbic>0): \n results = results2.copy() \n \n # Creating the structure with the results\n if results['par'] is not None:\n ngauss = len(results['par'])//3\n tstr = gstruc_dict.copy()\n for n in ['par','sigpar','rms','npix']:\n tstr[n] = results[n] \n tstr['x'] = x\n tstr['y'] = y\n tstr['noise'] = spec.noise\n tstr['lon'] = lon \n tstr['lat'] = lat\n else:\n tstr = {'par':None}\n \n \n # PLOTTING/PRINTING, IF THERE WAS A FIT \n if tstr['par'] is not None:\n # Getting the rms of all the components of the whole spectrum\n tstr['rms'] = utils.computerms(spec.vel,spec.flux,tstr['par'])\n \n # Printing and plotting\n if noplot==False:\n utils.gplot(spec.vel,spec.flux,tstr['par'],xlim=plotxr)\n if silent==False:\n utils.printgpar(tstr['par'],sigpar=tstr['sigpar'],rms=tstr['rms'],noise=tstr['noise'])\n if trackplot:\n utils.gtrackplot(x,y,lastx,lasty,redo, count,xr=xr,yr=yr,pstr=pstr,xstr=xstr,ystr=ystr)\n else:\n if silent==False:\n print('No gaussians found at this position!')\n\n # ADDING SOLUTION TO GSTRUC\n if tstr['par'] is not None:\n if count == 0: \n gstruc_add(tstr)\n if count > 0: \n old,res1 = gfind(x,y,xr=xr,yr=yr)\n \n # This is a re-decomposition \n if (old==1) and redo: \n # Checking the two decompositions \n b,dbic = gbetter(tstr,res1)\n # New one is better \n if (b == False): \n gstruc_replace(tstr) # replacing the solution\n redo_fail = False\n else: # re-decomposition failed \n redo_fail = True\n if silent==False:\n print('REDO FAILED!')\n \n # This is NOT a re-decomposition, add it \n if (old==0) or (redo == False): \n t1 = time.time()\n gstruc_add(tstr)\n redo_fail = False\n\n\n # SKIP FITTING PART\n else: \n # Creating a dummy structure \n tstr = {'par':None}\n redo_fail = False\n redo = False\n back = False\n \n if trackplot:\n utils.gtrackplot(x,y,lastx,lasty,redo,count,xr=xr,yr=yr,pstr=pstr,xstr=xstr,ystr=ystr)\n\n \n # FINISHING UP THE TRACKING STRUCTURE\n if tstr['par'] is not None:\n npar = len(tstr['par'])\n track['par'] = tstr['par']\n track['rms'] = tstr['rms']\n track['noise'] = tstr['noise']\n track['npix'] = tstr['npix']\n else:\n npar = 0\n track['redo_fail'] = redo_fail \n \n # UPDATING THE TRACKING STRUCTURE\n btrack_add(track)\n\n # The end\n if ((x>=xr[1]) and (y>=yr[1])):\n break\n \n count += 1 \n \n # Saving the last position \n lastx = x \n lasty = y \n \n # SAVING THE STRUCTURES, periodically\n if (count % savestep == 0) and (time.time()-savetime) > 1800:\n gstruc = savedata(outfile)\n savetime = time.time()\n \n # FINAL SAVE\n ngauss = GSTRUC['ngauss']\n print(str(ngauss)+' final Gaussians')\n gstruc = savedata(outfile)\n \n # Clean up the tracking structures\n del BTRACK\n del GSTRUC\n\n print('Total time = %.2f sec.' % (time.time()-tstart))\n \n return gstruc", "def test_flipflop(self):\n circ = FlipFlop(size=2)\n #test basic flip flop functionality\n circ.d = 3\n self.assertSigEq(circ.q, 0)\n circ.clk.pulse()\n self.assertSigEq(circ.q, 3)\n #test reset circuit\n circ.r.set()\n circ.clk.pulse()\n self.assertSigEq(circ.q, 0)\n circ.r.reset()\n #test load\n circ.l.set()\n circ.d = 3\n circ.clk.pulse()\n self.assertSigEq(circ.q, 0)\n #test enable\n circ.l.reset()\n circ.e.set()\n circ.clk.pulse()\n self.assertSigEq(circ.q, 0)", "def toggle_exposure(self):\n\n checked1 = self.exp1_radio.isChecked()\n if checked1:\n self.exp2_radio.setChecked(True)\n else:\n self.exp1_radio.setChecked(True)\n self.select_exposure()", "def main():\n\n varList = {'beta': 6., 'convSpeed': 1.2, 'Mark': 0., 'axi': 1, 'acModes': 4, 'Nr': 801, 'Tf': 600., 'xf': 0.51}\n\n # Solve steady flame.\n # BC1: I have the attachment BC at r = 1, always\n # BC2: I need to set dF/dr = 0 at r = 0 iff Mark != 0\n [qMean, r, FMean] = steady_flame_area_FD3(varList['Mark'], varList['beta'], varList['axi'], varList['Nr'])\n r = r * varList['beta']\n\n # Calculate mean flame derivatives\n dFMeanDr = derivsnew.FD1_CT2_D(FMean, r[1] - r[0])\n d2FMeanDr2 = derivsnew.FD2_CT2_D(FMean, r[1] - r[0])\n\n #Apply BC smooth tip:\n if(varList['Mark']!=0.0):\n dFMeanDr[-1] = 0.0\n\n # Use correct number of points. Remember that the extrems need to be set depending on the BC!\n # The attach BC (first point) is always assumed to be true and removed from the vector list\n if(varList['Mark']==0):\n Nr = varList['Nr'] / 2\n dFMeanDr = dFMeanDr[1:]\n d2FMeanDr2 = d2FMeanDr2[1:]\n r = r[1:]\n # The smooth BC holds only if Mark!=0 (second derivatives appear): remove also the last point\n else:\n Nr = varList['Nr'] / 2 - 1\n dFMeanDr = dFMeanDr[1:-1]\n d2FMeanDr2 = d2FMeanDr2[1:-1]\n r = r[1:-1]\n\n # Calculate geometric values\n den = 1 + varList['beta'] * varList['beta'] * dFMeanDr * dFMeanDr\n dR = r[1] - r[0]\n # Set Nx equal to Nr for now.\n # The implementation is more complicated if they differ, and need to interpolate between values.\n Nx = Nr\n\n # Nonuniform grid spacing along x!\n # Nx = length(dx) has to hold.\n dx = np.empty(len(FMean) - 1)\n for ii in range(1, len(FMean)):\n dx[ii - 1] = FMean[ii] - FMean[ii - 1]\n\n [A, B, C, tau] = loadAcoustics(varList['xf'], varList['Tf'], varList['acModes'], varList['beta'])\n\n Matrix = buildMatrix(Nr, dR, varList['beta'], den, r, FMean, dFMeanDr, d2FMeanDr2, varList['Mark'], varList['acModes'], A,\n B, C, Nx, dx, tau, qMean, varList['convSpeed'])\n\n [d, W, V] = eigProblem.solveEigProb(Matrix)\n [dnew, Wnew, Vnew] = eigProblem.selectUnstable(d, W, V)\n\n print dnew / (2. * np.pi)", "def flip(self, xbool, ybool):\n self._surf = pygame.transform.flip(self._surf, xbool, ybool).convert_alpha()", "def __init__(self, options, imgs, frq_sim_guess, otf=None,\n wiener_parameter=1, fbounds=(0.01, 1), fbounds_shift=(0.01, 1),\n use_wicker=True, normalize_histograms=True, background_counts=100,\n do_global_phase_correction=True, determine_amplitudes=False, find_frq_first=True,\n default_to_guess_on_bad_phase_fit=True, max_phase_err=20*np.pi/180,\n default_to_guess_on_low_mcnr=True, min_mcnr=1,\n size_near_fo_to_remove=0,\n phases_guess=None, mod_depths_guess=None, pspec_params_guess=None,\n use_fixed_phase=False, use_fixed_frq=False, use_fixed_mod_depths=False,\n plot_diagnostics=True, interactive_plotting=False, save_dir=None, figsize=(20, 10)):\n # #############################################\n # saving information\n # #############################################\n self.save_dir = save_dir\n self.hold_figs_open = False\n self.figsize = figsize\n\n if self.save_dir is not None:\n self.log_file = open(os.path.join(self.save_dir, \"sim_log.txt\"), 'w')\n else:\n self.log_file = None\n\n # #############################################\n # setup plotting\n # #############################################\n if not interactive_plotting:\n plt.ioff()\n plt.switch_backend(\"agg\")\n\n # #############################################\n # analysis settings\n # #############################################\n self.wiener_parameter = wiener_parameter\n self.use_wicker = use_wicker\n self.global_phase_correction = do_global_phase_correction\n self.normalize_histograms = normalize_histograms\n self.size_near_fo_to_remove = size_near_fo_to_remove\n self.default_to_guess_on_bad_phase_fit = default_to_guess_on_bad_phase_fit\n self.max_phase_error = max_phase_err\n self.default_to_guess_on_low_mcnr = default_to_guess_on_low_mcnr\n self.min_mcnr = min_mcnr\n self.determine_amplitudes = determine_amplitudes\n self.use_fixed_phase = use_fixed_phase\n self.use_fixed_frq = use_fixed_frq\n self.use_fixed_mod_depths = use_fixed_mod_depths\n self.find_frq_first = find_frq_first\n self.plot_diagnostics = plot_diagnostics\n\n # #############################################\n # images\n # #############################################\n self.background_counts = background_counts\n self.imgs = imgs.astype(np.float64)\n self.nangles, self.nphases, self.ny, self.nx = imgs.shape\n \n # #############################################\n # get basic parameters\n # #############################################\n self.dx = options['pixel_size']\n self.dy = options['pixel_size']\n self.na = options['na']\n self.wavelength = options['wavelength']\n\n self.fmax = 1 / (0.5 * self.wavelength / self.na)\n self.fbounds = fbounds\n self.fbounds_shift = fbounds_shift\n\n self.frqs_guess = frq_sim_guess\n self.phases_guess = phases_guess\n self.mod_depths_guess = mod_depths_guess\n self.power_spectrum_params_guess = pspec_params_guess\n\n # #############################################\n # get frequency data and OTF\n # #############################################\n self.fx = tools.get_fft_frqs(self.nx, self.dx)\n self.fy = tools.get_fft_frqs(self.ny, self.dy)\n\n if otf is None:\n otf = psf.circ_aperture_otf(self.fx[None, :], self.fy[:, None], self.na, self.wavelength)\n self.otf = otf\n\n # #############################################\n # print current time\n # #############################################\n now = datetime.datetime.now()\n\n self.print_tee(\"####################################################################################\", self.log_file)\n self.print_tee(\"%d/%02d/%02d %02d:%02d:%02d\" % (now.year, now.month, now.day, now.hour, now.minute, now.second), self.log_file)\n self.print_tee(\"####################################################################################\", self.log_file)\n\n # #############################################\n # normalize histograms for input images\n # #############################################\n if self.normalize_histograms:\n tstart = time.process_time()\n\n for ii in range(self.nangles):\n for jj in range(1, self.nphases):\n self.imgs[ii, jj] = match_histograms(self.imgs[ii, jj], self.imgs[ii, 0])\n\n tend = time.process_time()\n self.print_tee(\"Normalizing histograms took %0.2fs\" % (tend - tstart), self.log_file)\n\n # #############################################\n # remove background\n # #############################################\n self.imgs = self.imgs - self.background_counts\n self.imgs[self.imgs <= 0] = 1e-12\n\n # #############################################\n # Fourier transform SIM images\n # #############################################\n tstart = time.process_time()\n\n self.imgs_ft = np.zeros((self.nangles, self.nphases, self.ny, self.nx), dtype=np.complex)\n for jj in range(self.nangles):\n for kk in range(self.nphases):\n # use periodic/smooth decomposition instead of traditional apodization\n img_to_xform, _ = psd.periodic_smooth_decomp(self.imgs[jj, kk])\n self.imgs_ft[jj, kk] = fft.fftshift(fft.fft2(fft.ifftshift(img_to_xform)))\n\n tend = time.process_time()\n\n self.print_tee(\"FT images took %0.2fs\" % (tend - tstart), self.log_file)\n\n # #############################################\n # get widefield image\n # #############################################\n tstart = time.process_time()\n\n self.widefield = get_widefield(self.imgs)\n wf_to_xform, _ = psd.periodic_smooth_decomp(self.widefield)\n self.widefield_ft = fft.fftshift(fft.fft2(fft.ifftshift(wf_to_xform)))\n\n tend = time.process_time()\n self.print_tee(\"Computing widefield image took %0.2fs\" % (tend - tstart), self.log_file)\n\n # #############################################\n # get optically sectioned image\n # #############################################\n tstart = time.process_time()\n\n sim_os = np.zeros((self.nangles, self.imgs.shape[-2], self.imgs.shape[-1]))\n for ii in range(self.nangles):\n sim_os[ii] = sim_optical_section(self.imgs[ii])\n # todo: maybe want to weight by power/mod depth?\n self.imgs_os = np.mean(sim_os, axis=0)\n\n tend = time.process_time()\n self.print_tee(\"Computing OS image took %0.2fs\" % (tend - tstart), self.log_file)", "def show():\n setup()\n plt.show()", "def use_fscale(self,use_fscale):\n if type(use_fscale).__name__ == 'bool':\n self._use_fscale = use_fscale\n else:\n raise KINSOL_Exception(\"The variable sent to 'use_fscale' must be a boolean.\")", "def main():\n\n\n ## Groups showing similar noise profile\n #grp1 = [ 1, 4, 5, 8, 9 ]\n #grp2 = [ 18, 19, 22, 23, 30, 31 ]\n grp1 = [ 0, 1, 6, 7, 4, 5 ]\n grp2 = [ 12, 13, 16, 17, 18, 19 ]\n #grp3 = [ 18, 19, 22, 23, 26, 27 ]\n with tb.open_file(sys.argv[1], 'r') as dataF:\n\n npm = len(dataF.root.Sensors.DataPMT)#len(dataF.root.RD.pmtrwf[0])\n nevt = len(dataF.root.RD.pmtrwf)\n\n ## Filter definition\n fSample = 40E6\n freqLPF = 100E3\n freqLPFd = 2*freqLPF / fSample\n b, a = signal.butter(1, freqLPFd, 'low', analog=False)\n ##\n fig, axes = plt.subplots(nrows=3, ncols=2, figsize=(20,6))\n #fig.tight_layout()\n fig.show()\n wf_len = len(dataF.root.RD.pmtrwf[0][0])\n if len(sys.argv) > 3:\n wf_len = wf_len/2+1 \n elif len(sys.argv) == 3:\n g1_first = np.zeros(wf_len, np.float64)\n g2_first = np.zeros(wf_len, np.float64)\n g3_first = np.zeros(wf_len, np.float64)\n mean_first = np.zeros(wf_len, np.float64)\n ##\n for ievt in range(nevt):\n ## clear the axies\n for ax in axes.flatten():\n ax.cla()\n plt_frq = np.zeros(wf_len, np.float64)\n fwf_mean = np.zeros(wf_len, np.float64)\n wf_mean = np.zeros(wf_len, np.float64) # No filter\n g1_mean = np.zeros(wf_len, np.float64)\n g2_mean = np.zeros(wf_len, np.float64)\n g3_mean = np.zeros(wf_len, np.float64)\n for ipm in range(npm):\n\n sg = getWF(dataF, ipm, ievt)\n sg = sg - np.mean(sg)\n\n sgf = signal.lfilter(b, a, sg)\n ## remove mean again just in case\n sgf = sgf - np.mean(sgf)\n #sgf = sg\n\n pmID = getPMid(dataF, ipm)\n\n if len(sys.argv) == 3:\n axes[0][0].plot(sgf, label='pmt '+str(pmID))\n fwf_mean += sgf/npm\n wf_mean += sg/npm\n if pmID in grp1:\n g1_mean += sgf/len(grp1)\n elif pmID in grp2:\n g2_mean += sgf/len(grp2)\n elif pmID in grp3:\n g3_mean += sgf/len(grp3)\n else:\n ft = np.fft.rfft(sgf)\n freq = np.fft.rfftfreq(len(sgf), d=25E-9)\n if ipm == 0:\n plt_frq = freq\n if sys.argv[2] == 'mag':\n ft_mag = np.absolute(ft)\n axes[0][0].plot(freq, ft_mag, label='pmt '+str(pmID))\n fwf_mean += ft_mag/npm\n if pmID in grp1:\n g1_mean += ft_mag/len(grp1)\n elif pmID in grp2:\n g2_mean += ft_mag/len(grp2)\n elif pmID in grp3:\n g3_mean += ft_mag/len(grp3)\n elif sys.argv[2] == 'phase':\n ft_pha = np.angle(ft)\n axes[0][0].plot(freq, ft_pha, label='pmt '+str(pmID))\n fwf_mean += ft_pha/npm\n if pmID in grp1:\n g1_mean += ft_pha/len(grp1)\n elif pmID in grp2:\n g2_mean += ft_pha/len(grp2)\n elif pmID in grp3:\n g3_mean += ft_pha/len(grp3)\n \n \n ## The axes not set\n if len(sys.argv) == 3:\n axes[0][1].plot(g1_mean)\n axes[0][1].set_title('Group 1 mean waveform')\n axes[1][0].plot(g2_mean)\n axes[1][0].set_title('Group 2 mean waveform')\n axes[1][1].plot(g3_mean)\n axes[1][1].set_title('Group 3 mean waveform')\n axes[2][0].plot(fwf_mean)\n axes[2][0].set_title('Mean waveform')\n if ievt == 0:\n g1_first = g1_mean\n g2_first = g2_mean\n g3_first = g3_mean\n mean_first = fwf_mean\n else:\n axes[0][1].plot(g1_first)\n axes[1][0].plot(g2_first)\n axes[1][1].plot(g3_first)\n axes[2][0].plot(mean_first)\n axes[2][1].plot(wf_mean)\n axes[2][1].set_title('Mean waveform and corrected')\n axes[2][1].plot(wf_mean-fwf_mean)\n axes[2][1].set_xlim(0, 1000)\n else:\n axes[0][0].set_xlim(0,50000)\n axes[0][1].plot(plt_frq, g1_mean)\n axes[0][1].set_title('Group 1 mean '+sys.argv[2])\n axes[0][1].set_xlim(0,50000)\n axes[1][0].plot(plt_frq, g2_mean)\n axes[1][0].set_title('Group 2 mean '+sys.argv[2])\n axes[1][0].set_xlim(0,50000)\n axes[1][1].plot(plt_frq, g3_mean)\n axes[1][1].set_title('Group 3 mean '+sys.argv[2])\n axes[1][1].set_xlim(0,50000)\n axes[2][0].plot(plt_frq, fwf_mean)\n axes[2][0].set_title('Mean '+sys.argv[2])\n axes[2][0].set_xlim(0,50000)\n plt.draw()\n #fig.legend(loc=0)\n catcher = input(\"next plot?\")\n if catcher == 'q':\n exit()\n plt.cla()", "def flip(self):\n \n if self.faceup:\n self.faceup = False\n else:\n self.faceup = True", "def set_fluorescence(self, flag):\n flag_ = c.c_int(flag)\n logger.debug('StSetFluorFlg(%i)', flag)\n self._lib.StSetFluorFlg(flag_)", "def plot_carrier_frequency(self):\r\n roi = self.phase_roi\r\n phase_slice = (slice(roi[2], roi[3]), slice(roi[0], roi[1]))\r\n # calculation\r\n S = self.image[phase_slice] # signal in phase_roi\r\n t_axis = self.image.x_axis[roi[0]:roi[1]] # [ns] time axis\r\n y_axis = self.image.y_axis[roi[2]:roi[3]] # [mic] spatial scale\r\n N = S.shape[0]//2\r\n\r\n s = np.fft.fft(S, axis=0) / N # fft\r\n s_abs = np.abs(s[:N,:])\r\n f_axis = np.arange(N) / (2 * N) # spatial frequency axis\r\n\r\n s_mean = np.log10(np.mean(s_abs, axis=1))\r\n i0 = np.argmax(s_mean[3:])\r\n f0 = f_axis[3+i0] # [px^-1] fringe carrier frequency (estimate)\r\n s0 = s_mean[3+i0]\r\n sys.stdout.write(\"{} VISAR-{} fringe period = {:.1f} px\\n\".format(\r\n self.h5.shot_id[:11], self.leg, 1/f0))\r\n\r\n # plot calcs\r\n vlim_0 = dataimage.thresh_vlim(S, 0.01)\r\n vlim_1 = dataimage.thresh_vlim(np.log10(s_abs), (0.02, 0.005))\r\n tlim = (t_axis[0], t_axis[-1])\r\n ylim = (y_axis[0], y_axis[-1])\r\n flim = (0, 0.5) # [1/px]\r\n extent_0 = tlim + (0, S.shape[0]) # extent for signal\r\n# extent_0 = tlim + ylim # extent for signal\r\n extent_1 = tlim + flim # extent for fft\r\n\r\n # figure\r\n fig = plt.figure(figsize=(7,7), dpi=100)\r\n axs = []\r\n axs.append(fig.add_subplot(221, ylabel='[px]', title='signal'))\r\n axs.append(fig.add_subplot(222, sharey=axs[0], title='spatial lineout'))\r\n axs.append(fig.add_subplot(223, sharex=axs[0], title='log(fft(signal))',\r\n xlabel='time [ns]', ylabel=\"spatial frequency [px^-1]\"))\r\n axs.append(fig.add_subplot(224, sharey=axs[2], xlabel='log10(power)', title='spectral lineout'))\r\n\r\n axs[0].imshow(S, extent=extent_0,\r\n aspect='auto', vmin=vlim_0[0], vmax=vlim_0[1])\r\n axs[2].imshow(np.log10(s_abs), extent=extent_1,\r\n aspect='auto', vmin=vlim_1[0], vmax=vlim_1[1])\r\n axs[1].plot(np.mean(S, axis=1), np.arange(S.shape[0]))\r\n axs[3].plot(s_mean, f_axis)\r\n axs[0].set_ylim(*extent_0[2:])\r\n \r\n axs[3].annotate(\"fringe period\\n= {:.1f} px\".format(1/f0),\r\n (s0, f0), (0.95, 0.5), textcoords='axes fraction',\r\n arrowprops=dict(width=1, headwidth=6, facecolor='k',\r\n shrink=0.03), ha='right',)\r\n\r\n axs[3].axhline(f0*0.7, color='r', linestyle='dashed')\r\n axs[3].axhline(f0*1.4, color='r', linestyle='dashed')\r\n\r\n fig.tight_layout()\r\n fig.canvas.window().move(0,0)\r\n return fig", "def plot_true(self, ax):\n t = self.t\n x_true = self.x_true\n b = self.b\n\n ax.plot(t, x_true, 'k-', label='true image', lw=1.5)\n ax.plot(t, b, 'ro', label='blurred')\n ax.set_title(r'True')\n ax.set_xlabel(r'$t$')\n ax.set_ylabel(r'$x$')\n leg = ax.legend(loc='upper left')\n leg.get_frame().set_alpha(0.5)\n ax.grid()", "def FFT_brickwallHPF(filename,cutoff,wout=True,plot=True):\n n, data, data_dB,sr,ch=inputwav(filename)\n print('Applying FFT...')\n yfreq=rfft(data,axis=0)\n xfreq=np.linspace(0,sr/(2.0),n)\n yfreqBHPF=np.zeros((n,ch))\n yfreqBHPF[0:n,:]=yfreq\n print('Applying brickwall at '+str(cutoff)+' Hz...')\n yfreqBHPF[0:np.searchsorted(xfreq,cutoff),:]=0.0\n data_filtered=(irfft(yfreqBHPF,axis=0))\n if wout==True:\n print('Exporting...')\n sf.write(filename[0:len(filename)-4]+'_brickwallHPF.wav',data_filtered,sr,'PCM_16')\n if plot==True:\n print('Plotting...')\n py.close()\n fig, (ax1, ax2) = py.subplots(nrows=2)\n ax1.semilogx(xfreq,20*np.log10(abs(yfreq[0:n,0]+.0001)),'k-',lw=0.5)\n ax1.semilogx(xfreq,20*np.log10(abs(yfreqBHPF[0:n//1,0]+.0001)),'m-',lw=0.1)\n ax1.set_xlabel('Frequency (Hz)')\n ax1.set_ylabel('Amplitude')\n ax2.plot(data,'k-',label='Raw')\n ax2.plot(data_filtered,'m-',label='Filtered')\n ax2.set_xlim(0,10000)\n ax2.set_ylim(-1,1)\n ax2.set_ylabel('Amplitude (Norm Bits)')\n ax2.set_xlabel('Samples')\n ax2.legend(loc=2)\n print('Done!')\n return data_filtered", "def HasFOV(self):\n return _gmat_py.Hardware_HasFOV(self)", "def init_plot_force(nb_mus):\n # --- Curve graph --- #\n # app = pg.mkQApp(\"force\")\n # remote = []\n # layout = pg.LayoutWidget()\n # layout.resize(800, 800)\n # label = QtGui.QLabel()\n # box = []\n # rplt = []\n # row_count = 0\n # col_span = 4 if nb_mus > 8 else 8\n # for mus in range(nb_mus):\n # remote.append(rgv.RemoteGraphicsView())\n # remote[mus].pg.setConfigOptions(antialias=True)\n # app.aboutToQuit.connect(remote[mus].close)\n # box.append(QtGui.QCheckBox(f\"muscle_{mus}\"))\n # if mus >= 8:\n # layout.addWidget(box[mus], row=1, col=mus-8)\n # layout.addWidget(remote[mus], row=mus - 8 + 2, col=4, colspan=col_span)\n # else:\n # layout.addWidget(box[mus], row=0, col=mus)\n # layout.addWidget(remote[mus], row=mus + 2, col=0, colspan=col_span)\n # rplt.append(remote[mus].pg.PlotItem())\n # rplt[mus]._setProxyOptions(deferGetattr=True) ## speeds up access to rplt.plot\n # remote[mus].setCentralItem(rplt[mus])\n # layout.addWidget(label)\n # layout.show()\n # row_count += 1\n # return rplt, layout, app , box\n\n # --- Progress bar graph --- #\n # app = pg.mkQApp(\"force\")\n # layout = pg.LayoutWidget()\n # layout.resize(400, 800)\n # layout.move(0, 0)\n # box = []\n # rplt = []\n # row_count = 0\n # for mus in range(nb_mus):\n # rplt.append(QProgressBar())\n # rplt[mus].setMaximum(1000)\n # layout.addWidget(rplt[mus], row=mus, col=0)\n # layout.show()\n # row_count += 1\n # return rplt, layout, app\n\n # --- Bar graph --- #\n app = pg.mkQApp()\n layout = pg.plot()\n layout.resize(800, 800)\n rplt = pg.BarGraphItem(x=range(nb_mus), height=np.zeros((nb_mus)), width=0.3, brush=\"r\")\n layout.addItem(rplt)\n return rplt, layout, app", "def FG2semiFG(self, FG, flag_FG2semiFG):\n # USER IMPOSED\n # Actual FG\n if 'JUSTE FG' in flag_FG2semiFG:\n return FG\n # Antonio's files where a 'phase' parameter is used to permit changin\n # evolution condition (CR insertion) at 2 points: 15,30 MWd/tU thus\n # defining 3cicles\n if 'ESTEBAN' in flag_FG2semiFG:\n \"\"\"\n Esteban's files. 'phase' and 'burnup step' and 'burnup' depends on a single parameter. No additional point required\n Example\n input FG:[[1], [1], [1], [1], [1], [1], [1], [1], [1, 2,..., 70]]\n Output FG:[[1], [1], [1], [1], [1], [1], [1], [1], array([ 1, 2,..., 70, 71, 72])]\n \"\"\"\n FG[6] = [1] # the PHASE is a dependant variables and thus does not participates in the cartesian product\n FG[7] = [1] # the BURNUP_step is a dependant variable...\n # Antonio's files where a 'phase' parameter is used to permit changin\n # evolution condition (CR insertion) at 2 points: 15,30 MWd/tU thus\n # defining 3cicles\n if 'ANTONIO' in flag_FG2semiFG:\n \"\"\"\n Antonio's files. 'phase' and 'burnup step' and 'burnup' depends on rules over a special index\n Example\n input FG:[[1], [1], [1], [1], [1], [1], [1], [1], [1, 2,..., 70]]\n Output FG:[[1], [1], [1], [1], [1], [1], [1], [1], array([ 1, 2,..., 70, 71, 72])]\n Two points are added, as A2 calculation at 0 burnup are mandatory, and at each new cicle a new 0 burnup calcvulation takes place\n \"\"\"\n FG[6] = [1] # the PHASE is a dependant variables and thus does not participates in the cartesian product\n FG[7] = [1] # the BURNUP_step is a dependant variable...\n # the phase depended on the index in burnup by this relationship. The number 24,48 are the indexed defined by the discretization criterion of burnup chosen by the user of A2\n # print FG\n if FG[8][-1] <= 24:\n ph = 1\n if FG[8][-1] > 24 and FG[8][-1] <= 48:\n ph = 2\n if FG[8][-1] > 48:\n ph = 3\n # print ph\n # it adds indexes as required by ph. if ph=1 then nothing gets added, if\n # ph=3 and FG[8][-1]=70 then '71' and '72' get added\n FG[8] = np.append(FG[8], range(FG[8][-1] + 1, FG[8][-1] + ph))\n if 'CR' in flag_FG2semiFG:\n \"\"\"\n In reference calculation where CR position may change, the lecture of the phase space given a CR value of [1,2] i.e. in and out. However no branch calculation acctualy takes place, and un-existing points are requested.\n \"\"\"\n FG[4] = [1]\n if 'NO BURNUP' in flag_FG2semiFG:\n FG[8] = [1]\n # Format: Making sure that index are int\n return [[int(value) for value in vec] for vec in FG]", "def set_f_exp(self):\n self.flow_data.velocity = self.flow_data.flow / self.exh.area\n self.flow_data.set_TempPres_dependents()\n self.flow_data.Re_D = ( self.flow_data.velocity *\n self.flow_data.rho * self.exh.D / self.flow_data.mu )\n self.flow_data.f_exp = ( self.flow_data.pressure_drop * 1.e3 /\n (0.5 * self.exh.length * self.exh.perimeter / self.exh.area *\n self.flow_data.rho * self.flow_data.velocity**2) )", "def test_guider_start_ffsOpen(self):\n sopTester.updateModel('mcp', TestHelper.mcpState['boss_science'])\n self._guider_start(5, 17, 0, 0)", "def float_to_front(qtile):\n for window in qtile.current_group.windows:\n if window.floating:\n window.cmd_bring_to_front()", "def setUseGizmos(value=True):\n global cc\n cc = not value", "def artFluidAttrCtx(*args, accopacity: bool=False, activeListChangedProc: Union[AnyStr,\n bool]=\"\", afterStrokeCmd: Union[AnyStr, bool]=\"\", alphaclamp: Union[AnyStr,\n bool]=\"none\", alphaclamplower: Union[float, bool]=0.0, alphaclampupper:\n Union[float, bool]=1.0, attrSelected: Union[AnyStr, bool]=\"\", autoSave:\n Union[AnyStr, bool]=\"\", beforeStrokeCmd: Union[AnyStr, bool]=\"\",\n brushalignment: bool=True, brushfeedback: bool=True, clamp: Union[AnyStr,\n bool]=\"none\", clamplower: Union[float, bool]=0.0, clampupper: Union[float,\n bool]=1.0, clear: bool=True, colorAlphaValue: Union[float, bool]=0.0,\n colorRGBAValue: Union[List[float, float, float, float], bool]=None,\n colorRGBValue: Union[List[float, float, float], bool]=None, colorRamp:\n Union[AnyStr, bool]=\"\", colorfeedback: bool=False, colorfeedbackOverride:\n bool=False, colorrangelower: Union[float, bool]=0.0, colorrangeupper:\n Union[float, bool]=1.0, currentPaintableFluid: Union[AnyStr, bool]=\"\",\n dataTypeIndex: Union[int, bool]=0, delaySelectionChanged: bool=True,\n disablelighting: bool=False, displayAsRender: bool=True, displayVelocity:\n bool=True, doAutoSave: bool=True, dragSlider: AnyStr=\"\", duringStrokeCmd:\n Union[AnyStr, bool]=\"\", dynclonemode: bool=True, exists: bool=True,\n expandfilename: bool=True, exportaspectratio: Union[float, bool]=0.0,\n exportfilemode: Union[AnyStr, bool]=\"luminance/rgb\", exportfilesave:\n AnyStr=\"\", exportfilesizex: Union[int, bool]=0, exportfilesizey: Union[int,\n bool]=0, exportfiletype: Union[AnyStr, bool]=\"\", filterNodes: bool=True,\n history: bool=True, image1: Union[AnyStr, bool]=\"\", image2: Union[AnyStr,\n bool]=\"\", image3: Union[AnyStr, bool]=\"\", importfileload: AnyStr=\"\",\n importfilemode: Union[AnyStr, bool]=\"alpha\", importreassign: bool=False,\n interactiveUpdate: bool=True, lastRecorderCmd: Union[AnyStr, bool]=\"\",\n lastStampName: Union[AnyStr, bool]=\"\", lowerradius: Union[float, bool]=0.0,\n makeStroke: Union[int, List[int], bool]=0, mappressure: Union[AnyStr,\n bool]=\"none\", maxvalue: Union[float, bool]=1.0, minvalue: Union[float,\n bool]=0.0, name: AnyStr=\"\", objattrArray: Union[AnyStr, bool]=\"\", opacity:\n Union[float, bool]=1.0, outline: bool=True, outwhilepaint: bool=False,\n paintNodeArray: Union[AnyStr, bool]=\"\", paintattrselected: AnyStr=\"\",\n paintmode: Union[AnyStr, bool]=\"screen\", paintoperationtype: Union[AnyStr,\n bool]=\"Paint\", pickColor: bool=True, pickValue: bool=True, playbackCursor:\n Union[List[float, float], List[List[float, float]], bool]=None,\n playbackPressure: Union[float, List[float], bool]=0.0, preserveclonesource:\n bool=True, profileShapeFile: Union[AnyStr, bool]=\"\", projective: bool=False,\n property: Union[AnyStr, bool]=\"\", radius: Union[float, bool]=1.0,\n rampMaxColor: Union[List[float, float, float], bool]=None, rampMinColor:\n Union[List[float, float, float], bool]=None, record: bool=True, reflection:\n bool=False, reflectionaboutorigin: bool=True, reflectionaxis: Union[AnyStr,\n bool]=\"x\", rgbValue: Union[List[float, float, float], bool]=None,\n screenRadius: Union[float, bool]=0.0, selectclonesource: bool=True,\n selectedattroper: Union[AnyStr, bool]=\"absolute\", showactive: bool=True,\n stampDepth: Union[float, bool]=0.0, stampProfile: Union[AnyStr, bool]=\"\",\n stampSpacing: Union[float, bool]=1.0, strokesmooth: Union[AnyStr, bool]=\"\",\n surfaceConformedBrushVertices: bool=True, tablet: bool=True,\n tangentOutline: bool=True, toolOffProc: Union[AnyStr, bool]=\"\", toolOnProc:\n Union[AnyStr, bool]=\"\", useColorRamp: bool=True, useMaxMinColor: bool=True,\n useStrokeDirection: bool=True, usepressure: bool=False, value: Union[float,\n bool]=0.0, velocity: Union[List[float, float, float], bool]=None,\n whichTool: Union[AnyStr, bool]=\"\", worldRadius: Union[float, bool]=0.0,\n q=True, query=True, e=True, edit=True, **kwargs)->Union[None, Any]:\n pass", "def segmentNeedle(self):\n #productive #event\n profprint()\n if self.fiducialButton.isEnabled():\n print \"new checked state: \",not self.fiducialButton.checked\n self.onStartStopGivingNeedleTipsToggled(not self.fiducialButton.checked)" ]
[ "0.56922233", "0.54869103", "0.5327231", "0.5301241", "0.52902806", "0.52806854", "0.52649724", "0.5241042", "0.5236491", "0.52350116", "0.5205736", "0.5187515", "0.517721", "0.5160973", "0.5148137", "0.5135481", "0.51285934", "0.51083225", "0.5073106", "0.5068788", "0.5058575", "0.5057781", "0.50527257", "0.50441545", "0.50282645", "0.50250334", "0.50101626", "0.5007648", "0.50043297", "0.49866042", "0.49715844", "0.49704993", "0.49680015", "0.49643153", "0.49638706", "0.495764", "0.49558094", "0.49445888", "0.49401978", "0.49393222", "0.49380484", "0.4928959", "0.49279308", "0.49261242", "0.49241832", "0.4908303", "0.49019", "0.4901706", "0.48923945", "0.48807535", "0.4880184", "0.4878454", "0.48738366", "0.48696315", "0.48684195", "0.4867967", "0.48651835", "0.48611557", "0.48568207", "0.4848095", "0.48470727", "0.48357138", "0.48299783", "0.4821877", "0.48171332", "0.48140204", "0.48134825", "0.48081908", "0.48078844", "0.48047534", "0.4793818", "0.4787285", "0.47860718", "0.47835022", "0.47822648", "0.47769636", "0.47760272", "0.4774478", "0.4769074", "0.47655806", "0.4765268", "0.47631893", "0.47616652", "0.47615886", "0.4760704", "0.47556", "0.47555444", "0.47552428", "0.47546914", "0.47534114", "0.47524783", "0.4747605", "0.4742785", "0.47399327", "0.47372648", "0.4734204", "0.47315544", "0.47284997", "0.47253278", "0.47211054", "0.47201604" ]
0.0
-1
axis status, axis init, slew
def test_goto_field_boss_slew(self): sopTester.updateModel('mcp', TestHelper.mcpState['all_off']) cmdState = self.actorState.gotoField cmdState.reinitialize(self.cmd) cmdState.doGuider = False cmdState.doHartmann = False cmdState.doCalibs = False cmdState.arcTime = 0 cmdState.flatTime = 0 self._goto_field_boss(3, 26, 0, 0, cmdState)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __init__(self, axis=-1):\n self.axis = axis", "def __init__(self, axes=()):\n self._axes = []\n self._dimension = 0\n for axis in axes:\n self.add_axis(axis)", "def _set_axes(self):\n self += helper.line(stroke=\"black\", x1=self.__dict__['x'], x2=self.__dict__['x'], y1=0, y2=self.__dict__['y']*2)\n self += helper.line(stroke=\"black\", x1=0, x2=self.__dict__['x']*2, y1=self.__dict__['y'], y2=self.__dict__['y'])", "def _handle_setup_axis(self, axis_args):\n axis_name = axis_args['name']\n axes_dict = self.server.axes\n\n if axis_name not in [name for name, _ in axes_dict.items()]:\n print \"Adding a new axis:\", axis_name\n axis_count = len(axes_dict)\n newaxis = self.server.figure.add_subplot(axis_count+1, 1, axis_count+1)\n axes_dict[axis_name] = newaxis\n axes_dict[axis_name].grid(True)\n axes_dict[axis_name].set_xlabel(axis_args['x_label'])\n axes_dict[axis_name].set_ylabel(axis_args['y_label'])\n # TODO: support *.set_title(\"Title\")\n if FLAGS.logy:\n axes_dict[axis_name].set_yscale('log', nonposy='clip')\n\n if axis_count != 0:\n # Resize other axes if the above wasn't the first.\n axis_count = len(axes_dict)\n for row,(name, _) in enumerate(axes_dict.items(), 1):\n print name, axis_count, row\n axes_dict[name].change_geometry(axis_count, 1, row)", "def __init__(self):\n self.isMoving = 0#0 is stop, 1 is moving forward, -1 is moving backward\n self.isRoutating = False\n pygame.init()\n pygame.joystick.init()\n self.controller = pygame.joystick.Joystick(0)\n self.controller.init()\n if not self.axis_data:\n self.axis_data = {}\n\n if not self.button_data:\n self.button_data = {}\n for i in range(self.controller.get_numbuttons()):\n self.button_data[i] = False\n\n if not self.hat_data:\n self.hat_data = {}\n for i in range(self.controller.get_numhats()):\n self.hat_data[i] = (0, 0)", "def __init__(self, axes: int):\n self.axes = axes", "def _InitAxes( self ):\n self.ax = self.fig.add_subplot( 111 )", "def pylab_setup(figure, stream_data, original_width, runlimits, runflags):\n\n def on_key(event):\n \"\"\"on_key\"\"\"\n print('you pressed', event.key, event.xdata, event.ydata)\n\n #def diag_event(event):\n # \"\"\"diag_event\"\"\"\n # print event.name\n # if hasattr(event, 'height'):\n # print event.height, event.width\n # print event.name, event.canvas, event.guiEvent\n\n def pause_axis(unused_event):\n \"\"\"pause_axis\"\"\"\n # stops update of axis when updating lines\n # allows smooth scrolling by user\n print \"PAUSE pause axis\"\n runflags.update_axis = False\n\n def unpause_axis(event):\n \"\"\"unpause_axis\"\"\"\n # continues updating scrolling\n print \"RESUME axis\"\n runflags.update_axis = True\n if hasattr(event, 'height'):\n print event.height, event.width\n new_ratio = float(event.width)/float(event.height)\n default_ratio = 1.3\n print \"BEFORE: \", FLAGS.width\n FLAGS.width = original_width * new_ratio / default_ratio\n print \"AFTER: \", FLAGS.width\n\n figure.canvas.mpl_connect('key_press_event', on_key)\n figure.canvas.mpl_connect('resize_event', unpause_axis)\n figure.canvas.mpl_connect('scroll_event', pause_axis)\n\n timer = figure.canvas.new_timer(interval=500)\n timer.add_callback(plot_refresh_handler, (stream_data, runlimits, runflags))\n timer.start()\n print \"SHOW\"\n pylab.show()\n print \"AFTER\"", "def axInit():\n ax.init()", "def setup(self, flags):\n self.figure = pylab.figure(1)\n self.axes = {}\n self.stream_data = {}\n self.flags = flags", "def toggle_axis(self):\n # cycle through three axis states\n states = [False, 'world', 'all']\n # the state after toggling\n index = (states.index(self.view['axis']) + 1) % len(states)\n # update state to next index\n self.view['axis'] = states[index]\n # perform gl actions\n self.update_flags()", "def __init__( self, container, id = -1, **kwargs ):\n\n self.ax = None\n self.axline = None # axis line representing state\n self.canvas = None\n self.curSize = None\n self.cursor = None\n self.cursorLine = None # axis line following the cursor\n self.cursorLine2 = None # axis line following the cursor\n self.fig = None\n self.timer = None\n self.toolbar = None\n\n self.callbackIds = {}\n #self.isLoaded = False\n self.refAxis = kwargs.get( 'ref_axis', 'y' )\n self.refAxis2 = kwargs.get( 'ref_axis2' )\n self.showCursor = kwargs.get( 'show_cursor', True )\n #self.stateIndex = -1\n self.timeValue = -1.0\n self.titleFontSize = 16\n\n super( PlotWidget, self ).__init__( container, id )", "def initAxisValues(self, axis):\n \n if (axis != None):\n if self.isTime:\n self.axisValues = [repr(t.tocomponent())\n for t in axis.asRelativeTime()]\n else:\n self.axisValues = axis.getValue()\n else:\n raise TypeError(\"Error: axis is not defined\")\n\n self.axisIndices = range(len(self.axisValues))\n self.updateMin(0)\n self.updateMax(len(self.axisValues) - 1)", "def _update_axes(self):\n data_shape = self.data.shape\n if len(self.axes) < self.data.ndim + 1:\n self._axes.append(Axis())\n for index in range(self.data.ndim):\n if len(self.axes[index].values) != data_shape[index]:\n self.axes[index].values = np.arange(data_shape[index],\n dtype=np.float64)", "def _configure_axis(self, source, title):\n conf = source.conf[title]\n if source.data_type[title] == 'image':\n self.plot.getView().invertY(True)\n else:\n self.plot.getView().invertY(False)\n if(self.settingsWidget.ui.flipy.currentText() == 'Yes' or\n (self.settingsWidget.ui.flipy.currentText() == 'Auto' and\n \"flipy\" in conf and conf['flipy'] == True)):\n self.plot.getView().invertY(not self.plot.getView().getViewBox().yInverted())\n if(self.settingsWidget.ui.flipx.currentText() == 'Yes' or\n (self.settingsWidget.ui.flipx.currentText() == 'Auto' and\n \"flipx\" in conf and conf['flipx'] == True)):\n self.plot.getView().invertX(not self.plot.getView().getViewBox().xInverted())\n\n # Tranpose images to make x (last dimension) horizontal\n axis_labels = ['left', 'bottom']\n xlabel_index = 0\n ylabel_index = 1\n if (source.data_type[title] == 'image') or (source.data_type[title] == 'triple'):\n xlabel_index = (xlabel_index+1)%2\n ylabel_index = (ylabel_index+1)%2\n\n if(self.settingsWidget.ui.transpose.currentText() == 'Yes' or\n (self.settingsWidget.ui.transpose.currentText() == 'Auto' \n and \"transpose\" in conf)):\n xlabel_index = (xlabel_index+1)%2\n ylabel_index = (ylabel_index+1)%2\n\n self.x_axis_name = axis_labels[xlabel_index]\n self.y_axis_name = axis_labels[ylabel_index]\n if(self.actionX_axis.isChecked()):\n if(self.settingsWidget.ui.x_label_auto.isChecked() and \n \"xlabel\" in conf):\n self.plot.getView().setLabel(axis_labels[xlabel_index], conf['xlabel']) #pylint: disable=no-member\n else:\n self.plot.getView().setLabel(axis_labels[xlabel_index], self.settingsWidget.ui.x_label.text()) #pylint: disable=no-member\n\n if(self.actionY_axis.isChecked()):\n if(self.settingsWidget.ui.y_label_auto.isChecked() and \n \"ylabel\" in conf):\n self.plot.getView().setLabel(axis_labels[ylabel_index], conf['ylabel']) #pylint: disable=no-member\n else:\n self.plot.getView().setLabel(axis_labels[ylabel_index], self.settingsWidget.ui.y_label.text()) #pylint: disable=no-member", "def front_wheel_from_axis():", "def effect(self):\n AxisType = self.options.AxisType\n AxisDescription = self.options.AxisDescription\n AxisUnit = self.options.AxisUnit\n AxisLabel = self.options.AxisLabel\n AxisMaxValue = self.options.AxisMaxValue\n AxisMinValue = self.options.AxisMinValue\n AxisScale = self.options.AxisScale\n \n \n for id, node in self.selected.iteritems():\n axis = node #TODO: This selection should be further tested\n axis.set(inkex.addNS(\"Type\",\"TimeAnalysis\"), \"Axis\")\n axis.set(inkex.addNS(\"AxisType\",\"TimeAnalysis\"), AxisType)\n axis.set(inkex.addNS(\"AxisDescription\",\"TimeAnalysis\"), AxisDescription)\n #TODO: The label should be unique.\n axis.set(inkex.addNS(\"AxisLabel\",\"TimeAnalysis\"), AxisLabel) \n axis.set(inkex.addNS(\"AxisUnit\",\"TimeAnalysis\"), AxisUnit)\n axis.set(inkex.addNS(\"AxisMaxValue\",\"TimeAnalysis\"), AxisMaxValue)\n axis.set(inkex.addNS(\"AxisMinValue\",\"TimeAnalysis\"), AxisMinValue)\n axis.set(inkex.addNS(\"AxisScale\",\"TimeAnalysis\"), AxisScale)\n # sys.stderr.write(\"The max value of the axis is: \" + str(axis.get(inkex.addNS(\"AxisMaxValue\",\"TimeAnalysis\"))))", "def enableaxes(self):\n debug('ControllerStartup.enableaxes()')\n if not self.pidevice.HasEAX() or self.prop['skipeax']:\n return\n for axis in self.pidevice.axes:\n try:\n self.pidevice.EAX(axis, True)\n except GCSError as exc:\n if exc != gcserror.E2_PI_CNTR_UNKNOWN_COMMAND:\n raise\n waitonready(self.pidevice, **self._kwargs)", "def __init__(self):\n self.rot_axis = 1", "def _handle_axes(self, drawable, option):\n # If we already have an axes object, ignore this one\n if self._axes_object is not None:\n return\n\n # Grab the histogram used for axes style/range manipulation\n if is_stack(drawable) or is_graph(drawable):\n axes_histogram = drawable.GetHistogram()\n else:\n axes_histogram = drawable\n\n # Grab the histogram used for title manipulation\n if is_stack(drawable):\n title_histogram = drawable.GetHists()[0]\n else:\n title_histogram = drawable\n\n # Set the plot title\n title_histogram.SetTitle(self._title)\n\n # Grab axes\n x_axis, y_axis = axes_histogram.GetXaxis(), axes_histogram.GetYaxis()\n\n # Grab titles from first histogram if not set explicitly\n if self._x_title is None:\n self._x_title = title_histogram.GetXaxis().GetTitle()\n if self._y_title is None:\n self._y_title = title_histogram.GetYaxis().GetTitle()\n\n # Style x-axis, or hide it if this plot has a ratio plot\n if self._x_range is not None:\n x_axis.SetRangeUser(*self._x_range)\n if self._ratio_plot:\n x_axis.SetLabelOffset(999)\n x_axis.SetTitleOffset(999)\n else:\n x_axis.SetTitle(self._x_title)\n x_axis.SetTitleSize(self.PLOT_X_AXIS_TITLE_SIZE)\n x_axis.SetTitleOffset(self.PLOT_X_AXIS_TITLE_OFFSET)\n x_axis.SetLabelSize(self.PLOT_X_AXIS_LABEL_SIZE)\n if self._x_integer_ticks:\n x_axis.SetNdivisions(11) # hack for integer ticks \n\n # Style y-axis\n y_axis.SetTitle(self._y_title)\n y_axis.SetLabelFont(self.PLOT_ATLAS_STAMP_TEXT_FONT)\n y_axis.SetTitleSize(\n (self.PLOT_Y_AXIS_TITLE_SIZE_WITH_RATIO\n if self._ratio_plot\n else self.PLOT_Y_AXIS_TITLE_SIZE)\n )\n y_axis.SetTitleOffset(\n (self.PLOT_Y_AXIS_TITLE_OFSET_WITH_RATIO\n if self._ratio_plot\n else self.PLOT_Y_AXIS_TITLE_OFFSET)\n )\n y_axis.SetNdivisions(5,5,0)\n \n # set axis text sizes \n if self._ratio_plot:\n y_axis.SetLabelSize(self.PLOT_Y_AXIS_LABEL_SIZE_WITH_RATIO)\n else:\n y_axis.SetLabelSize(self.PLOT_Y_AXIS_LABEL_SIZE) \n y_axis.SetTitleSize(self.PLOT_Y_AXIS_TITLE_SIZE)\n y_axis.SetTitleOffset(self.PLOT_RATIO_Y_AXIS_TITLE_OFFSET)\n\n # Redraw the drawable with the new style\n drawable.Draw(option)", "def tick(self):", "def getAxisValuesEvent(self): \n varID = self.myParent.getVar().id\n axisVar = MV2.array(self.axis)\n axisVar.setAxis(0, self.axis)\n axisVar.id = varID +'_' + self.axis.id + '_axis'\n\n # Generate teaching command string\n fileID = 'fid2'\n teachingCommand = \"\\n## Getting axis %s\\n\" % self.axis.id\n teachingCommand += \"%s = MV2.array(%s[\\\"%s\\\"].getAxisList(axes = \\\"%s\\\")[0][:])\\n\" % (axisVar.id, fileID, varID, self.axis.id)\n teachingCommand += \"%s.setAxis(0, %s[\\\"%s\\\"].getAxisList(axes = \\\"%s\\\")[0])\\n\" % (axisVar.id, fileID, varID, self.axis.id)\n teachingCommand += \"%s.id = \\\"%s\\\"\\n\" % (axisVar.id, axisVar.id)\n\n # Record teaching commands associate 'get axis values' and\n # define a new variable/tab with only the axis' values \n self.myParent.defineVarAxis(axisVar, teachingCommand)", "def _finalize_axis(self, key, **kwargs):\n axis = self.handles['axis']\n self.handles['fig'].set_frameon(False)\n axis.grid(self.show_grid)\n axis.view_init(elev=self.elevation, azim=self.azimuth)\n try:\n axis._dist = self.distance\n except Exception:\n # axis.dist is deprecated see here:\n # https://github.com/matplotlib/matplotlib/pull/22084\n axis.dist = self.distance\n\n if self.xaxis is None:\n axis.w_xaxis.line.set_lw(0.)\n axis.w_xaxis.label.set_text('')\n if self.yaxis is None:\n axis.w_yaxis.line.set_lw(0.)\n axis.w_yaxis.label.set_text('')\n if self.zaxis is None:\n axis.w_zaxis.line.set_lw(0.)\n axis.w_zaxis.label.set_text('')\n if self.disable_axes:\n axis.set_axis_off()\n\n if mpl_version <= Version('1.5.9'):\n axis.set_axis_bgcolor(self.bgcolor)\n else:\n axis.set_facecolor(self.bgcolor)\n return super()._finalize_axis(key, **kwargs)", "def __init__(self, axis: int, validate: bool = True):\n super().__init__(validate)\n self._axis = axis", "def _checkForSixaxis():\n return sixaxis.init(\"/dev/input/js1\")", "def calc_axes(self):\n self.y_axis = np.linspace(0, self.image_shape[0] - 1, self.image_shape[0])\n self.x_axis = np.linspace(0, self.image_shape[1] - 1, self.image_shape[1])\n if hasattr(self, 'pixelsize'):\n self.y_axis *= self.pixelsize[0]\n self.x_axis *= self.pixelsize[1]\n\n # %%RETRIEVING FUNCTIONS", "def drawAxes(t):\r\n t.speed(0)\r\n t.pd()\r\n t.forward(500)\r\n t.back(500)", "def set_up(self):\n self.h, = self.ax.plot(self.x, lw=2)\n self.ax.set_ylim(0,100)\n self.ax.set_xlim(0,100)\n self.ax.title.set_text(self.config[\"title\"])\n self.ax.set_xlabel(self.config[\"x_label\"])\n self.ax.set_ylabel(self.config[\"y_label\"])", "def populate_plot_axis(self,plot,ax='x'):\n\n fig=plt.gcf()\n\n extra_ax=[]\n\n if ax=='x':\n\n ticks=plot.get_xticks()\n\n lim=plot.get_xlim()\n\n for i in range(len(self.names)):\n\n if i==0:\n\n axn=plot\n\n axn.spines['bottom'].set_position(('outward',10))\n\n axn.spines['bottom'].set_visible(True)\n\n else:\n\n dy_fig=0.08\n\n prev_ax_position=axn.get_position()\n\n extra_ax.append(fig.add_axes(\\\n (prev_ax_position.x0,\\\n prev_ax_position.y0-2*dy_fig,\\\n prev_ax_position.width,\\\n 0),'autoscalex_on',True))\n\n axn=extra_ax[i-1]\n\n axn.yaxis.set_visible(False)\n\n for side in axn.spines.keys():\n\n axn.spines[side].set_linewidth(1)\n\n axn.set_xticks(ticks)\n\n ticksnames=[float(str(x)) for x in self.values[i]]\n\n axn.set_xticklabels(\\\n [\"{:.2f}\".format(x).rstrip('0').rstrip('.') for x in ticksnames],\\\n rotation = 45)\n\n xlab=axn.set_xlabel(self.names[i])\n\n xlab.set_fontsize(10)\n\n axn.tick_params(axis='x',labelsize=10)\n\n axn.set_xlim(lim)\n\n\n\n elif ax=='y':\n\n ticks=plot.get_yticks()\n\n lim=plot.get_ylim()\n\n for i in range(len(self.names)):\n\n if i==0:\n\n axn=plot\n\n axn.spines['left'].set_position(('outward',10))\n\n axn.spines['left'].set_visible(True)\n\n else:\n\n dx_fig=0.08\n\n plot_position=plot.get_position()\n\n prev_ax_position=axn.get_position()\n\n extra_ax.append(fig.add_axes(\\\n (prev_ax_position.x0-2*dx_fig,\\\n prev_ax_position.y0,\\\n 0,\\\n prev_ax_position.height),'autoscalex_on',True))\n\n axn=extra_ax[i-1]\n\n axn.xaxis.set_visible(False) # hide the yaxis\n\n for side in axn.spines.keys(): # 'top', 'bottom', 'left', 'right'\n\n axn.spines[side].set_linewidth(1)\n\n axn.set_yticks(ticks)\n\n ticksnames=[float(str(x)) for x in self.values[i]]\n\n axn.set_yticklabels(\\\n [\"{:.2f}\".format(x).rstrip('0').rstrip('.') for x in ticksnames],\\\n rotation = 45)\n\n ylab=axn.set_ylabel(self.names[i])\n\n ylab.set_fontsize(10)\n\n axn.tick_params(axis='y',labelsize=10)\n\n axn.set_ylim(lim)\n\n else:\n\n raise ValueError(\"Axis can be 'x' or 'y'\")", "def __init__(self, skin_directory):\n self.ax = None\n self.generate_axis()\n self.skin_directory = skin_directory\n self.figure = plt.gcf()", "def set_axes(self, a):\r\n self.axes = a", "def side_wheel_from_axis():", "def setup_axes():\n\taxes = visuals.subplots(1, 2, figsize = (14, 7))\n\taxes[1].set_yscale(\"log\")\n\taxes[0].set_xlabel(\"[Fe/H]\")\n\taxes[0].set_ylabel(\"[Sr/Fe]\")\n\taxes[1].set_xlabel(\"[Sr/Fe]\")\n\taxes[1].set_ylabel(\"Stellar Probability Density\")\n\taxes[0].set_xlim([-2.2, 0.2])\n\taxes[0].set_ylim([-2.4, 0.4])\n\taxes[1].set_xlim([-1.4, 0.4])\n\taxes[1].set_ylim([0.05, 50])\n\treturn axes", "def __init__(self, vals, model, data_visualize, latent_axes=None, sense_axes=None, latent_index=[0,1]):\r\n if vals == None:\r\n vals = model.X[0]\r\n\r\n matplotlib_show.__init__(self, vals, axes=latent_axes)\r\n\r\n if isinstance(latent_axes,mpl.axes.Axes):\r\n self.cid = latent_axes.figure.canvas.mpl_connect('button_press_event', self.on_click)\r\n self.cid = latent_axes.figure.canvas.mpl_connect('motion_notify_event', self.on_move)\r\n self.cid = latent_axes.figure.canvas.mpl_connect('axes_leave_event', self.on_leave)\r\n self.cid = latent_axes.figure.canvas.mpl_connect('axes_enter_event', self.on_enter)\r\n else:\r\n self.cid = latent_axes[0].figure.canvas.mpl_connect('button_press_event', self.on_click)\r\n self.cid = latent_axes[0].figure.canvas.mpl_connect('motion_notify_event', self.on_move)\r\n self.cid = latent_axes[0].figure.canvas.mpl_connect('axes_leave_event', self.on_leave)\r\n self.cid = latent_axes[0].figure.canvas.mpl_connect('axes_enter_event', self.on_enter)\r\n\r\n self.data_visualize = data_visualize\r\n self.model = model\r\n self.latent_axes = latent_axes\r\n self.sense_axes = sense_axes\r\n self.called = False\r\n self.move_on = False\r\n self.latent_index = latent_index\r\n self.latent_dim = model.input_dim\r\n\r\n # The red cross which shows current latent point.\r\n self.latent_values = vals\r\n self.latent_handle = self.latent_axes.plot([0],[0],'rx',mew=2)[0]\r\n self.modify(vals)\r\n self.show_sensitivities()", "def setup_axes(self):\n fig = plt.figure(1)\n axs = fig.add_subplot(1, 1, 1)\n fig.clf()\n axs = plt.subplots(1, 2)\n ax1 : plt.axis = axs[0]\n ax2 : plt.axis = axs[1]\n fig.canvas.draw()\n \n line1_t, = ax1.plot([], label='train')\n line1_v, = ax1.plot([], label='val')\n\n ax1.set_title('Loss vs Iterations')\n ax1.set_xlabel('Iterations')\n ax1.set_ylabel('Loss')\n ax1.grid(True)\n ax1.autoscale()\n # ax1.legend()\n\n line2_t, = ax2.plot([], label='train')\n line2_v, = ax2.plot([], label='val')\n\n ax2.set_title('Accuracy vs Iterations')\n ax2.set_xlabel('Time')\n ax2.set_ylabel('Percent Accuracy')\n ax2.grid(True)\n ax2.autoscale()\n # ax2.legend()\n\n lines = [line1_t, line1_v, line2_t, line2_v]\n\n return fig, ax1, ax2, lines", "def __init__(self, time_axis: Union[np.ndarray, are_ax.Axis], state_vectors: np.ndarray):\n\n if isinstance(time_axis, np.ndarray):\n if time_axis.dtype == PreciseDateTime:\n time_axis_start = time_axis[0]\n relative_time_axis = (time_axis - time_axis_start).astype(float)\n self._time_axis = are_ax.Axis(relative_time_axis, time_axis_start)\n else:\n raise ValueError(\"Axis should be a vector of PreciseDateTime objects\")\n else:\n self._time_axis = time_axis\n\n _check_init_input(self._time_axis, state_vectors)\n\n # state_vector are stored as (3, N) numpy array\n self._state_vectors = np.vstack((state_vectors[::3], state_vectors[1::3], state_vectors[2::3]))\n self._interpolator = GeometryInterpolator(self._time_axis, self._state_vectors)", "def _plot_init(self):\n pass", "def _plot_init(self):\n pass", "def __init__(self):\n self.state_dim = 12\n self.measurement_dim = 6", "def _lazy_axis(self):\n raise NotImplementedError", "def draw_axes(self, cr):\n # en gris\n cr.set_line_width(0.02)\n cr.set_source_rgb(0.3, 0.3, 0.3)\n cr.move_to( -1,0 )\n cr.line_to( 1,0 )\n cr.move_to( 0, -1 )\n cr.line_to( 0, 1 )\n cr.stroke()\n #self.draw_value( cr, \"0\", 0, 0 )\n #self.draw_value( cr, \"1\", 5-0.3, 0 )\n #self.draw_value( cr, \"2\", 2+0.3, 4-0.5 )", "def on_axes_update(self):\n\n if self.connected:\n tab_open = self.tab_open()\n\n # Update axes\n for i, series in enumerate(self.measurements_list):\n if i == tab_open:\n self.chart_list[i].setXRange(self.worker.start_range,\n self.worker.samples_count + NUM_GUI_SAMPLES, padding=0.075)\n\n # for i, series in enumerate(self.measurements_list):\n #\n # # An optimization to prevent unnecessary rendering\n # if i == tab_open:\n #\n # # Remove old x-axis\n # series.detachAxis(self.xaxis_list[i])\n # self.chart_list[i].chart().removeAxis(self.xaxis_list[i])\n # self.xaxis_list[i] = QValueAxis()\n #\n # # Add new x-axis\n # self.chart_list[i].chart().addAxis(self.xaxis_list[i], Qt.AlignBottom)\n # self.xaxis_list[i].setRange(self.worker.samples_count, self.worker.samples_count +\n # NUM_GUI_SAMPLES)\n # series.attachAxis(self.xaxis_list[i])", "def _get_axis_state_ps90(self, control_unit: int, axis: int) -> Tuple[Union[int, bool, str]]:\n control_unit = ctypes.c_long(control_unit)\n axis = int(axis)\n axis = ctypes.c_long(axis)\n sleep(time_ps_delay)\n res = self.lib.PS90_GetAxisState(control_unit, axis)\n error = self.__get_read_error_ps90(control_unit)\n if error != 0:\n res = False\n return res, self._error_OWIS_ps90(error, 1)", "def __init__(self,options,pos):\n self.options = options\n numobjects = pos.shape[1]\n plt.ion() # turn on interactive plotting mode\n dpi=72.0 # set dpi (I think this is appropriate on mac)\n # fig accepts size in inches\n # so divide desired pixel width, height by dpi to get inches\n w,h=(self.options.width/dpi,self.options.height/dpi)\n fig = plt.figure(1,figsize=(w,h),dpi=dpi)\n fig.clear()\n\n #w = self.options.width/fig.get_dpi() # desired width in inches\n #h = self.options.height/fig.get_dpi() # desired height in inches\n #fig.set_size_inches(w,h,forward=True) # last arg resizes the canvas to match\n\n self.ax = plt.axes()\n self.ax.set_xlim(self.options.xmin,self.options.xmax)\n self.ax.set_ylim(self.options.ymin,self.options.ymax)\n #pyplot.axis('scaled')\n\n # I don't know why axis('scaled') doesn't work here\n # But I think the next two commands are equivalent\n self.ax.set_aspect('equal', adjustable='box', anchor='C')\n self.ax.set_autoscale_on(False)\n\n #self.redraw()\n\n\n #facecolors = [cm.jet(x) for x in np.random.rand(len(vicon_objects))]\n facecolors = [cm.jet(x) for x in np.linspace(0,1,numobjects)]\n if self.options.visualize_switch_xy:\n if self.options.axis==1:\n self.ax.axvline(linewidth=4, c='k')\n else:\n self.ax.axhline(linewidth=4, c='k')\n self.col = plt.scatter(pos[:,1],pos[:,0],c=facecolors,s=3000)\n else:\n if self.options.axis==1:\n self.ax.axhline(linewidth=4, c='k')\n else:\n self.ax.axvline(linewidth=4, c='k')\n self.col = plt.scatter(pos[:,0],pos[:,1],c=facecolors,s=3000)\n\n # scores\n self.tpos = self.ax.text(0.75*self.options.xmax,0.75*self.options.ymin,str(50),\n size=72,color='k',ha='center',va='center')\n self.tneg = self.ax.text(0.75*self.options.xmin,0.75*self.options.ymin,str(50),\n size=72,color='k',ha='center',va='center')\n\n self.canvas = agg.FigureCanvasAgg(fig)\n self.canvas.draw()\n self.renderer = self.canvas.get_renderer()\n raw_data = self.renderer.tostring_rgb()\n\n pygame.init()\n \n self.window = pygame.display.set_mode((options.width,options.height), DOUBLEBUF)\n self.screen = pygame.display.get_surface()\n\n self.set_caption(\"Possession: Waiting for Vicon\")\n \n size = self.canvas.get_width_height()\n \n surf = pygame.image.fromstring(raw_data, size, \"RGB\")\n self.screen.blit(surf, (0,0))\n pygame.display.flip()", "def _update_ax(self):\n raise NotImplementedError(\"Implement _update_ax(self) in subclass\")", "def reflect(self, axis):\n if axis == \"x\":\n self.y = - self.y\n elif axis == \"y\":\n self.x = - self.x\n else:\n print(\"The argument axis only accepts values 'x' and 'y'!\")", "def __init__(self, x_axis_p, y_axis_p, x_speed=0, y_speed=0, direction=0):\r\n self.__x_axis = x_axis_p\r\n self.__y_axis = y_axis_p\r\n self.__X_speed = x_speed\r\n self.__y_speed = y_speed\r\n self.__direction = direction\r\n self.__lives = 3", "def __init__(self):\n\n # The Microsoft XBox 360 Wired controller has 11 buttons and 8 axes.\n # Buttons can be 0 (not pressed) or 1 (pressed)\n # Axes are floats and range between -1 and 1. Note that for LT and RT, their \"not pressed\" value is 1 and for the others it is 0. Cross keys only have values -1, 0, and 1. The others have be any value in between -1 and 1.\n num_buttons = 11\n num_axes = 8\n self.inputs = [0 for i in range(num_buttons + num_axes)]\n self.inputs[JoyInput.LT] = self.inputs[JoyInput.RT] = 1\n\n # Dictionary of saved inputs. If an input is not currently saved, you must set it to None.\n # For example, the LS_Y (\"left stick Y\") axis may be saved in self.saved[JoyInput.LS_Y]\n self.saved = {\n JoyInput.LS_Y: None,\n Joystick.RS_ANGLE: None,\n }\n\n # Field variables\n self.depth_state = None # stores the depth state\n self.depth_last_received = 0 # how long since the last depth state callback\n self.depth_pwm_input = 0 # tracks pwm given to depth thrusters\n\n # ROS Subscribers\n rospy.Subscriber(\"/joy\", Joy, self.joy_callback)\n rospy.Subscriber(Topic.YAW_STATE, Float64, self.yaw_state_callback)\n rospy.Subscriber(Topic.DEPTH_STATE, Float64, self.depth_state_callback)\n rospy.Subscriber(Topic.YAW_SETPOINT, Float64, self.yaw_setpoint_callback)\n rospy.Subscriber(Topic.DEPTH_SETPOINT, Int16, self.depth_setpoint_callback)\n\n # ROS Publishers\n # self.topics is a dictionary of dictionaries.\n # 'publisher' contains the rospy.Publisher()\n # 'msg' contains the Int16(), Float64(), or Bool() related to the publisher\n # Use self.publish() rather than using self.topics directly.\n self.topics = {\n Topic.YAW_PWM: {'publisher':rospy.Publisher(Topic.YAW_PWM, Int16, queue_size=10), 'msg':Int16()},\n Topic.YAW_PWM_FEEDBACK: {'publisher':rospy.Publisher(Topic.YAW_PWM_FEEDBACK, Int16, queue_size=10), 'msg':Int16()},\n Topic.YAW_PID: {'publisher':rospy.Publisher(Topic.YAW_PID, Bool, queue_size=10), 'msg':Bool()},\n Topic.YAW_SETPOINT: {'publisher':rospy.Publisher(Topic.YAW_SETPOINT, Float64, queue_size=10), 'msg':Float64()},\n\n Topic.DEPTH_PWM: {'publisher':rospy.Publisher(Topic.DEPTH_PWM, Int16, queue_size=10), 'msg':Int16()},\n Topic.DEPTH_PID: {'publisher':rospy.Publisher(Topic.DEPTH_PID, Bool, queue_size=10), 'msg':Bool()},\n Topic.DEPTH_SETPOINT: {'publisher':rospy.Publisher(Topic.DEPTH_SETPOINT, Int16, queue_size=10), 'msg':Int16()},\n }", "def listen(self):\n\n if not self.axis_data:\n self.axis_data = {}\n\n if not self.button_data:\n self.button_data = {}\n for i in range(self.controller.get_numbuttons()):\n self.button_data[i] = False\n\n if not self.hat_data:\n self.hat_data = {}\n for i in range(self.controller.get_numhats()):\n self.hat_data[i] = (0, 0)\n\n for event in pygame.event.get():\n if event.type == pygame.JOYAXISMOTION:\n self.axis_data[event.axis] = round(event.value,2)\n elif event.type == pygame.JOYBUTTONDOWN:\n self.button_data[event.button] = True\n elif event.type == pygame.JOYBUTTONUP:\n self.button_data[event.button] = False\n elif event.type == pygame.JOYHATMOTION:\n self.hat_data[event.hat] = event.value\n\n axis=self.axis_data\n\n if 0 in axis:\n self.x=axis[0]\n self.y=-axis[1]\n\n # Turbo\n if self.button_data[7]:\n self.x*=2\n self.y*=2\n # Start Camera\n if self.button_data[3]:\n subprocess.Popen([\"firefox\",otraip+\"/html\"],\n stdout=subprocess.PIPE,\n stdin=subprocess.PIPE,\n stderr=subprocess.PIPE)\n return \"camera\"\n\n # Measure\n if self.button_data[1]:\n return \"measure\"\n\n # Exit\n if self.button_data[2]:\n return \"exit\"\n return \"move \"+str(self.x)+\" \"+str(self.y)+\"\\n\"", "def _appendAxisDefinition(self, axis):\n length = len(axis)\n\n self.na_dict[\"NX\"].append(length)\n self.na_dict[\"XNAME\"].append(xarray_utils.getBestName(axis))\n\n # If only one item in axis values\n if length < 2:\n self.na_dict[\"DX\"].append(0)\n self.na_dict[\"NXDEF\"].append(length)\n self.na_dict[\"X\"].append(axis.data.tolist()) \n return\n\n incr = xarray_utils.get_interval(axis, 0, 1)\n\n for i in range(1, length):\n if (axis[i] - axis[i - 1]) != incr:\n self.na_dict[\"DX\"].append(0)\n self.na_dict[\"NXDEF\"].append(length)\n self.na_dict[\"X\"].append(axis.data.tolist())\n break\n\n else: # If did not break out of the loop\n max_length = length\n if length > 3: \n max_length = 3\n\n self.na_dict[\"DX\"].append(incr)\n self.na_dict[\"NXDEF\"].append(max_length)\n self.na_dict[\"X\"].append(axis[:max_length])", "def getAxis(self,axis):\n\n\t\tif axis == \"u\":\n\t\t\tif len(self.usr) != 0:\n\t\t\t\treturn np.append([0], self.usr)\n\n\t\tif axis == \"s\":\n\t\t\tif len(self.seg) != 0:\n\t\t\t\tif self.radiograph:\n\t\t\t\t\treturn self.seg\n\t\t\t\telse:\n\t\t\t\t\tfirst = self.seg[0] - 1.\n\t\t\t\t\treturn np.append([first], self.seg)\n\n\t\tif axis == \"c\":\n\t\t\tif len(self.cos) != 0:\n\t\t\t\tif self.radiograph:\n\t\t\t\t\treturn self.cos\n\t\t\t\telse:\n\t\t\t\t\tfirst = -1.\n\t\t\t\t\treturn np.append([first], self.cos)\n\n\t\tif axis == \"e\":\n\t\t\tif len(self.erg) != 0:\n\t\t\t\tfirst = self.erg[0] - 1.\n\t\t\t\treturn np.append([first], self.erg)\n\n\t\tif axis == \"t\":\n\t\t\tif len(self.tim) != 0:\n\t\t\t\tfirst = self.tim[0] - 1.\n\t\t\t\treturn np.append([first], self.tim)\n\n\t\tif axis == \"i\":\n\t\t\treturn self.cora\n\n\t\tif axis == \"j\":\n\t\t\treturn self.corb\n\n\t\tif axis == \"k\":\n\t\t\treturn self.corc\n\n\t\treturn []", "def axes_coupled(robot):\n\n target_ctrl_path = get_target_ctrl_path(robot)\n attr_name = 'axisCoupling'\n\n attr_path = target_ctrl_path + '.' + attr_name\n\n if not pm.objExists(attr_path):\n return False\n else:\n return pm.getAttr(attr_path)", "def _use_data_bounds_changed_for_axes(self):\n self.update_pipeline()", "def rotate_axis(self):\n try:\n self.obj.rotate(angle=self.rotation_speed * self.time_scale / self.refresh_rate, axis=vector(0, 1, 0))\n except ZeroDivisionError:\n print(\"ERROR: REFRESH_RATE is 0\")\n except (AttributeError, TypeError):\n print(\"ERROR: wrong arguments type while initializing!!\")", "def axes(self,a_len,b_len,c_len,beta):\n self.a = np.array([0,0,a_len])\n self.b = np.array([0,b_len,0])\n self.c = Ry(-beta) @ np.array([0,0,c_len])", "def axis(self):\r\n return self._arm.axis", "def __init__(self):\n #Call the base class constructor\n inkex.Effect.__init__(self)\n \n #Define custom namespace\n inkex.NSS['TimeAnalysis'] = 'http://www.arcex.no/workpackage/basin-analysis/'\n \n #Define string options\n self.OptionParser.add_option('-t', '--AxisType', action = 'store',\n type = 'string', dest = 'AxisType', default = '',\n help = 'Which axis would you like to define?')\n self.OptionParser.add_option('-s', '--AxisScale', action = 'store',\n type = 'string', dest = 'AxisScale', default = '',\n help = 'Is the axis logaritmic or linear?')\n self.OptionParser.add_option('-d', '--AxisDescription', action = 'store',\n type = 'string', dest = 'AxisDescription', default = '',\n help = 'How would you the describe the axis?')\n self.OptionParser.add_option('-u', '--AxisUnit', action = 'store',\n type = 'string', dest = 'AxisUnit', default = '',\n help = 'What unit is represented by you axis?')\n self.OptionParser.add_option('-l', '--AxisLabel', action = 'store',\n type = 'string', dest = 'AxisLabel', default = '',\n help = 'What would you like to call your axis')\n self.OptionParser.add_option('-x', '--AxisMaxValue', action = 'store',\n type = 'string', dest = 'AxisMaxValue', default = '',\n help = 'What is the maximum value represented by you axis?')\n self.OptionParser.add_option('-i', '--AxisMinValue', action = 'store',\n type = 'string', dest = 'AxisMinValue', default = '',\n help = 'What is the minimum value represented by you axis?')\n self.OptionParser.add_option('-a', '--Notebook', action = 'store',\n type = 'string', dest = 'Notebook', default = '',\n help = 'What is the minimum value represented by you axis?')", "def setupVariableAxes(self):\n if self.var is None:\n return\n \n if (self.axisList is None):\n self.axisList = self.var.getAxisList()\n self.axisOrder = range(len(self.axisList))\n\n self.clear() \n self.setAxesNames()\n \n # Iterate through the variables axes & init each axis widget\n axisIndex = 0\n for axis, axisName in zip(self.axisList, self.axesNames):\n # Create the axis widget\n axisWidget = QAxis(axis, axisName, axisIndex, self)\n axisWidget.setAxisButtonText(axisName)\n self.axisWidgets.append(axisWidget)\n\n # Setup the layout for each axis\n row = self.gridLayout.rowCount()\n self.gridLayout.addWidget(axisWidget.getAxisButton(), row, 0)\n self.gridLayout.addWidget(axisWidget, row, 1) \n self.gridLayout.addWidget(axisWidget.getAxisOperationsButton(), row, 2)\n\n # Create separator line between each axis widget\n vline = QtGui.QFrame()\n vline.setFrameStyle(QtGui.QFrame.HLine | QtGui.QFrame.Sunken)\n self.gridLayout.addWidget(vline, row+1, 0, 1,\n self.gridLayout.columnCount())\n\n axisIndex += 1\n\n self.gridLayout.setRowStretch(self.gridLayout.rowCount(), 1)", "def get_axis_vals(self):\n return self._x_axis, self._y_axis", "def get_axis(self):\n self.current_axis = self.gui.comboBox_axis.currentText()\n self.logger.debug('current axis:' + str(self.current_axis))\n\n if 'Stepper' in self.current_axis:\n #self.gui.groupBox_configurate.setEnabled(True)\n self.gui.groupBox_configurate.setStyleSheet(\"QGroupBox#Colored_configure {border: 1px solid blue; border-radius: 9px;}\")\n\n self.gui.groupBox_actions.setStyleSheet(\"QGroupBox default\")\n\n self.gui.stackedWidget_actions.setCurrentWidget(self.gui.page_configure_stepper)\n self.gui.stackedWidget_stepper.setCurrentWidget(self.gui.stackedWidgetMoving)\n self.gui.stackedWidgetMoving.setEnabled(False)\n\n if 'Z' in self.current_axis:\n #Disable the xy groupboxes, enable the z groupboxes,\n # choose the page_amplZ of the stackedWidget_configure\n self.gui.groupBox_XY.setEnabled(False)\n self.gui.groupBox_Z.setEnabled(True)\n\n self.gui.stackedWidget_configure.setCurrentWidget(self.gui.page_amplZ)\n\n self.gui.pushButton_up.setEnabled(False)\n self.gui.pushButton_down.setEnabled(False)\n self.gui.pushButton_left.setText('closer')\n self.gui.pushButton_right.setText('away')\n else:\n #Enable the xy groupboxes, disable the z groupboxes,\n # choose the page_amplXY of the stackedWidget_configure.\n\n self.gui.groupBox_XY.setEnabled(True)\n self.gui.groupBox_Z.setEnabled(False)\n\n self.gui.stackedWidget_configure.setCurrentWidget(self.gui.page_amplXY)\n\n self.gui.pushButton_up.setEnabled(True)\n self.gui.pushButton_down.setEnabled(True)\n self.gui.pushButton_left.setText('left')\n self.gui.pushButton_right.setText('right')\n\n elif 'Scanner' in self.current_axis:\n #Choose the page_move_scanner of the stackedWidget_actions and the stackedWidgetEmpty of the stackedWidget_stepper\n self.gui.stackedWidget_actions.setCurrentWidget(self.gui.page_move_scanner)\n self.gui.stackedWidget_stepper.setCurrentWidget(self.gui.stackedWidgetempty)\n\n #Give the configurate box a border and the action box none\n self.gui.groupBox_configurate.setStyleSheet(\"QGroupBox#Colored_configure {border: 1px solid blue; border-radius: 9px;}\")\n self.gui.groupBox_actions.setStyleSheet(\"QGroupBox default\")\n\n #Choose either the page_scannerZ or page_scannerXY of the stackedWidget_voltScanner\n if 'Z' in self.current_axis:\n self.gui.stackedWidget_voltScanner.setCurrentWidget(self.gui.page_scannerZ)\n else:\n self.gui.stackedWidget_voltScanner.setCurrentWidget(self.gui.page_scannerXY)", "def test_config(self):\n\n p = SyncProto(packet_port, None)\n\n d = make_axes(500, .1, usteps=16, steps_per_rotation=200)\n p.config(4, 18, 32, False, False, axes=d['axes1']);\n p.info()\n\n d = make_axes(1000, .2, usteps=16, steps_per_rotation=200,\n output_mode=OutMode.OUTPUT_OPENDRAIN, highval=OutVal.LOW)\n p.config(4, 7, 9, False, False, axes=d['axes1']);\n p.info()", "def _UpdatePlotImpl( self ):\n if self.ax is not None:\n self.axline = None\n self.cursorLine = \\\n self.cursorLine2 = None\n\n# self.ax.clear()\n# if hasattr( self, 'ax2' ) and self.ax2 is not None:\n# self.ax2.clear()\n self.fig.clear()\n self._InitAxes()\n\n#\t\t-- Scale fonts\n#\t\t--\n wd, ht = self.GetClientSize()\n label_font_size = 14\n tick_font_size = 12\n self.titleFontSize = 16\n if 'wxMac' not in wx.PlatformInfo and wd < 800:\n\tdecr = (800 - wd) / 50.0\n\tlabel_font_size -= decr\n\ttick_font_size -= decr\n\tself.titleFontSize -= decr\n\n# self.ax.grid(\n# True, 'both', 'both',\n#\t color = '#c8c8c8', linestyle = ':', linewidth = 1\n#\t )\n self._DoUpdatePlot( wd, ht )\n self._DoUpdateRedraw()\n self.canvas.draw()\n #end if", "def noAxisSystem():\n dislin.nograf()", "def updatePlot(self,*args):\n # set x limits\n timeDisplayOptions = {'10 minutes':10,'1 hour':60,'6 hours':6*60,'24 hours':24*60,'All':0}\n try:\n lastDatetime = mpl.dates.num2date(self.stage60K.get_xdata()[-1])\n firstDatetime = mpl.dates.num2date(self.stage60K.get_xdata()[0])\n except IndexError: # no data yet\n now = datetime.datetime.utcnow().toordinal()\n firstDatetime = mpl.dates.num2date(now)\n lastDatetime = firstDatetime\n xMin = lastDatetime-datetime.timedelta(minutes=timeDisplayOptions[self.wScale.get()])\n xMin = max([ firstDatetime, xMin ])\n if self.wScale.get() == 'All':\n xMin = firstDatetime\n xMinIndex = numpy.searchsorted( self.stage60K.get_xdata(), mpl.dates.date2num(xMin) )\n # rescale axes, with the x being scaled by the slider\n if self.toolbar._active == 'HOME' or self.toolbar._active == None:\n ymin,ymax = 10000000, -10000000\n lineAndVar = { self.stage60K: self.t60K,\n self.stage03K: self.t3K,\n self.stageGGG: self.tGGG,\n self.stageFAA: self.tFAA }\n if len(self.stage60K.get_xdata()) > 1:\n for line in lineAndVar.keys():\n if lineAndVar[line].get() == 0:\n line.set_visible(False)\n else:\n line.set_visible(True)\n ydata = line.get_ydata()[xMinIndex:-1]\n try:\n ymin = min(ymin, numpy.nanmin(ydata))\n ymax = max(ymax, numpy.nanmax(ydata))\n except ValueError as e:\n pass\n self.ax.set_xlim(xMin,lastDatetime)\n self.ax.set_ylim(ymin - (ymax-ymin)/10, ymax + (ymax-ymin)/10)\n hfmt = mpl.dates.DateFormatter('%H:%M:%S', tz=tz.tzlocal())\n self.ax.xaxis.set_major_formatter(hfmt)\n self.fig.autofmt_xdate()\n self.fig.tight_layout()\n #draw\n self.canvas.draw()", "def on_click(event):\n ax = event.inaxes\n \n if ax is None:\n # Occurs when a region not in an axis is clicked...\n return\n \n if self.current_plot == 'single':\n if event.button is 1:\n if not self.ax_zoomed:\n # Change over to a single baseline plot\n try:\n self.ax_zoomed = True\n self.current_ax = ax\n ax.set_position([0.1, 0.05, 0.85, 0.80])\n ax.set_xlabel(\"Frequency\")\n #ax.set_ylabel(\"Time\")\n \n for axis in self.sp_fig.axes:\n if axis is not ax:\n axis.set_visible(False)\n \n except ValueError:\n raise\n self.sp_fig.canvas.mpl_disconnect(self.fig_connect)\n \n elif event.button is 3:\n if self.ax_zoomed:\n self.ax_zoomed = False\n #self.sp_fig.canvas.mpl_disconnect(self.fig_connect)\n self.updatePlot()\n \n else:\n # No need to re-draw the canvas if it's not a left or right click\n return\n \n elif self.current_plot == 'multi':\n if ax is None:\n # Occurs when a region not in an axis is clicked...\n return\n if event.button is 1:\n if not self.ax_zoomed:\n # Change over to a single baseline plot\n try:\n ant1, ant2 = ax.get_title().split(\" \")\n except:\n ant1 = int(ax.get_title().strip('Tile').strip('Antenna').strip('Stand'))\n ant2 = ant1 \n try:\n self.spin_ref_ant.setValue(int(ant1))\n self.spin_ref_ant2.setValue(int(ant2))\n self.plot_select.setCurrentIndex(0)\n self.current_plot = 'single'\n \n self.updatePlot()\n except:\n raise\n self.sp_fig.canvas.mpl_disconnect(self.fig_connect)\n \n elif event.button is 3:\n if not self.ax_zoomed:\n ax.set_position([0.1, 0.1, 0.85, 0.85])\n # TODO: fix labelling of zoom plots\n ax.set_xlabel(\"Frequency\")\n #ax.set_ylabel(\"Time\")\n self.orig_position = ax.get_position()\n for axis in event.canvas.figure.axes:\n # Hide all the other axes...\n if axis is not ax:\n axis.set_visible(False)\n self.ax_zoomed=True\n else:\n self.updatePlot()\n \n else:\n # No need to re-draw the canvas if it's not a left or right click\n return\n \n event.canvas.draw()", "def update_flags(self):\n # view mode, filled vs wirefrom\n if self.view['wireframe']:\n gl.glPolygonMode(gl.GL_FRONT_AND_BACK, gl.GL_LINE)\n else:\n gl.glPolygonMode(gl.GL_FRONT_AND_BACK, gl.GL_FILL)\n\n # set fullscreen or windowed\n self.set_fullscreen(fullscreen=self.view['fullscreen'])\n\n # backface culling on or off\n if self.view['cull']:\n gl.glEnable(gl.GL_CULL_FACE)\n else:\n gl.glDisable(gl.GL_CULL_FACE)\n\n # case where we WANT an axis and NO vertexlist\n # is stored internally\n if self.view['axis'] and self._axis is None:\n from .. import creation\n # create an axis marker sized relative to the scene\n axis = creation.axis(origin_size=self.scene.scale / 100)\n # create ordered args for a vertex list\n args = rendering.mesh_to_vertexlist(axis)\n # store the axis as a reference\n self._axis = self.batch.add_indexed(*args)\n\n # case where we DON'T want an axis but a vertexlist\n # IS stored internally\n elif not self.view['axis'] and self._axis is not None:\n # remove the axis from the rendering batch\n self._axis.delete()\n # set the reference to None\n self._axis = None", "def update(self):\n\t\tprint(\"Plotting \" + str(str(self.values[\"Trial1\"][1]) + \" at \" + str(self.values[\"Trial1\"][0]) + \"\\n\"))\n\t\tif self.clear:\n\t\t\tself.stream1.write(dict(x=[], y=[]))\n\t\t\tself.stream2.write(dict(x=[], y=[]))\n\t\t\tself.stream3.write(dict(x=[], y=[]))\n\t\telse:\n\t\t\tself.stream1.write(dict(x=self.values[\"Trial1\"][0], y=self.values[\"Trial1\"][1]))#, trace=Bar)\n\t\t\tself.stream2.write(dict(x=self.values[\"Trial2\"][0], y=self.values[\"Trial2\"][1]))\n\t\t\tself.stream3.write(dict(x=self.values[\"Trial3\"][0], y=self.values[\"Trial3\"][1]))", "def xaxis ( self ) :\n return self.__xaxis", "def xaxis ( self ) :\n return self.__xaxis", "def update(self):\n\t\tprint(\"Plotting \" + str(str(self.values[\"Trial\"][1]) + \" at \" + str(self.values[\"Trial\"][0]) + \"\\n\"))\n\t\tif self.clear:\n\t\t\tself.stream.write(dict(x=[], y=[]))\n\t\telse:\n\t\t\tself.stream.write(dict(x=self.values[\"Trial\"][0], y=self.values[\"Trial\"][1]))", "def xaxis ( self ) :\n return self.__xaxis", "def status(self,axis):\n \n if not self.enabled:\n return (False,False)\n \n enabled = True\n self.send_cmd(axis, ' PRINT MVG')\n\n flag = self.ser.read(100)\n moving = True\n \n if flag[:4] == b'FALS': \n moving = False\n elif flag[:4] == b'TRUE':\n moving = True\n\n non_moving = not moving\n return (enabled, non_moving)", "def ready(self):\n plt.ion()\n self.figure = plt.figure()\n axes = self.figure.add_subplot(111)\n self.line, = axes.plot(self.xs, self._get_y_data(), self.colour)\n\n if self.y_range is not None:\n plt.ylim(*self.y_range)\n plt.xlim(self.x.lower, self.x.upper)\n\n plt.xlabel(self.x.tex_name if self.use_tex else self.x.name)\n plt.ylabel(self.y.tex_name if self.use_tex else self.y.name)\n\n self.figure.canvas.draw()", "def updatePlot(self):\n if len(self.baslin):\n X = list(t[0] for t in self.baslin)\n Y = list(t[1] for t in self.baslin)\n self.BLplt.set_xdata(X)\n self.BLplt.set_ydata(Y)\n if self.BLtyp == 'S':\n if self.BL is None:\n self.BL, = self.axes.plot(self.data[0], self.data[2], linestyle='-', color='green')\n else:\n self.BL.set_ydata(self.data[2])\n self.canvas.draw()", "def initialize_axes(self):\r\n self.x_lim = np.array([self.vals[:, 0].min(), self.vals[:, 0].max()])\r\n self.y_lim = np.array([self.vals[:, 1].min(), self.vals[:, 1].max()])\r\n self.z_lim = np.array([self.vals[:, 2].min(), self.vals[:, 2].max()])", "def listen_and_send(self):\n hadEvent = False\n\n if not self.axis_data:\n self.axis_data = {}\n\n if not self.button_data:\n self.button_data = {}\n for i in range(self.controller.get_numbuttons()):\n self.button_data[i] = False\n\n if not self.hat_data:\n self.hat_data = {}\n for i in range(self.controller.get_numhats()):\n self.hat_data[i] = (0, 0)\n\n while True:\n for event in pygame.event.get():\n if event.type == pygame.JOYAXISMOTION:\n self.axis_data[event.axis] = round(event.value, 2)\n elif event.type == pygame.JOYBUTTONDOWN:\n self.button_data[event.button] = True\n elif event.type == pygame.JOYBUTTONUP:\n self.button_data[event.button] = False\n elif event.type == pygame.JOYHATMOTION:\n self.hat_data[event.hat] = event.value\n\n if event.type == pygame.JOYBUTTONDOWN:\n # A button on the joystick just got pushed down\n hadEvent = True\n elif event.type == pygame.JOYAXISMOTION:\n # A joystick has been moved\n hadEvent = True\n\n if hadEvent:\n\n # If platform is linux we need to change some values in axis_data\n os.system('clear')\n print(\"Axis before\")\n pprint.pprint(self.axis_data)\n if sys.platform == 'linux':\n #self.axis_data[2], self.axis_data[3], self.axis_data[4] = self.axis_data[4], self.axis_data[2], self.axis_data[3]\n temp2 = self.axis_data[2]\n temp3 = self.axis_data[3]\n temp4 = self.axis_data[4]\n self.axis_data[2] = temp4\n self.axis_data[3] = temp2\n self.axis_data[4] = temp3\n\n\n self.event_dict['axis'] = self.axis_data\n self.event_dict['button'] = self.button_data\n message = pickle.dumps(self.event_dict, protocol=4)\n message = bytes(f\"{len(message):<{HEADERSIZE}}\", 'utf-8') + message\n self.sock.sendall(message)\n\n #if self.button_data[4]:\n # self.verbose = not self.verbose\n\n if self.verbose:\n\n # print(\"Button \")\n # pprint.pprint(self.button_data)\n print(\"Axis \")\n pprint.pprint(self.axis_data)\n # print(\"Motion \")\n # pprint.pprint(self.hat_data)", "def update_velocities(self, wx, wy):\r\n self.wx = wx\r\n self.wy = wy", "def set_axis(self, axis_list):\n if self.table_ready:\n final_axis_list = []\n for i, axis in enumerate(axis_list):\n if axis:\n final_axis_list.append(\"1 \" + str(i + 1))\n else:\n final_axis_list.append(\"0 \" + str(i + 1))\n\n command = self.build_command(\n self.device, (\"set_axis\", final_axis_list), single_commands=True\n )\n self.vcw.write(self.device, command)", "def plot_config(self):\n self.dynamic.current_plot.setTitle(\n \"Dynamic IV waiting time analysis\", **self.titleStyle\n )\n self.dynamic.current_plot.setLabel(\n \"left\", \"current\", units=\"A\", **self.labelStyle\n )\n self.dynamic.current_plot.setLabel(\n \"bottom\", \"time\", units=\"s\", **self.labelStyle\n )\n self.dynamic.current_plot.showAxis(\"top\", show=True)\n self.dynamic.current_plot.showAxis(\"right\", show=True)\n self.dynamic.current_plot.plotItem.showGrid(x=True, y=True)\n self.dynamic.current_plot.getPlotItem().invertY(True)\n\n change_axis_ticks(self.dynamic.current_plot, self.ticksStyle)", "def x_axis_changed(self):\n\t\tx_coord_cbox = self.ui.findChild(QWidget, \"x_coord_cbox\")\n\t\tnew_axis = x_coord_cbox.currentText()\n\t\tprint new_axis", "def setup_datavault(self, x_axis, y_axis):\n\n yield self.dv.cd(['', self.name], True)\n\n # datasets for each laser\n self.dataset = yield self.dv.new(self.name + ' ML', [( 't', 'num')], [('GHz', '', 'num')])", "def centerAxis():\n dislin.center()", "def show_axes(self):\n if hasattr(self, 'axes_widget'):\n self.axes_widget.EnabledOn()\n self.axes_widget.SetCurrentRenderer(self)\n else:\n self.add_axes()\n self.Modified()", "def __init__(self, *args, **kwargs):\n # Set tick length to zero so azimuthal labels are not too offset\n # Change default radial axis formatter but keep default theta one\n super().__init__(*args, **kwargs)\n formatter = axistools.Formatter('auto')\n self.yaxis.set_major_formatter(formatter)\n self.yaxis.isDefault_majfmt = True\n for axis in (self.xaxis, self.yaxis):\n axis.set_tick_params(which='both', size=0)", "def __init__(self):\n self.wnd = WindSensor()", "def __init__(self, x):\n self.W = np.zeros((x+1,10))\n self.alpha = 0.00000001\n self.epochs = 100\n self.reg_const = 0.01", "def __init__(self, ax=None):\n\n if ax is None:\n f = plt.figure()\n self.ax = f.add_subplot(111)\n else:\n self.ax = ax\n\n self.e2 = [] # list to store RMS error results\n self.labels = []\n self.colors = []", "def tick(self):\r\n pass", "def get_toggle_axes_args(self,comArgs):\n params, flags = self.get_params(comArgs)\n args = [flags, False]\n return args", "def init(self, info):\r\n# info.object.mpl_setup()\r\n return True", "def __init__(self, slit_width_xaxis, slit_height_zaxis):\n super(SwScreen, self).__init__()\n self.sw = self.create_instance()\n\n self.set_output_files(fwrite=0, f_angle=0) #write all, TODO: remove\n\n n_screen = 1\n i_screen = np.array([1, 0, 0, 0, 0, 0, 0, 0, 0, 0])\n i_abs = np.zeros(10)\n i_slit = np.zeros(10)\n i_stop = np.zeros(10)\n k_slit = np.zeros(10)\n thick = np.zeros(10)\n file_abs = np.array(['', '', '', '', '', '', '', '', '', ''])\n rx_slit = np.zeros(10)\n rz_slit = np.zeros(10)\n sl_dis = np.zeros(10)\n file_src_ext = np.array(['', '', '', '', '', '', '', '', '', ''])\n cx_slit = np.zeros(10)\n cz_slit = np.zeros(10)\n\n i_abs[0] = 0 # NO ABSORPTION\n i_slit[0] = 0 # APERTURING\n i_stop[0] = 0 # SLIT\n k_slit[0] = 0 # RECTANGULAR\n\n rx_slit[0] = slit_width_xaxis\n rz_slit[0] = slit_height_zaxis\n cx_slit[0] = 0.0\n cz_slit[0] = 0.0\n\n self.sw._oe.set_screens(n_screen,\n i_screen,\n i_abs,\n sl_dis,\n i_slit,\n i_stop,\n k_slit,\n thick,\n file_abs,\n rx_slit,\n rz_slit,\n cx_slit,\n cz_slit,\n file_src_ext)", "def drawAxis(image, cameraMatrix, distCoeffs, rvec, tvec, length):\n pass", "def set_data(self, x = None, y = None):\n self.x_axis = x\n self.y_axis = y", "def __init__(self):\n self.Robot = Robot()\n self.Omega = matrix()\n # self.Omega.value[0][0] = 1.0\n # self.Omega.value[1][1] = 1.0\n self.Xi = matrix()\n # Xi.value[0][0] = 0.0\n # Xi.value[1][0] = 0.0\n self.measure = {}\n self.landMarkCount = 0\n self.init = False\n self.bearing = 0\n self.x = 0\n self.y = 0\n \n # TODO", "def plotWavelet(self):\n\n a = 0\n if(self.axQT is not None):\n self._bxPlot()\n a += 1\n if(self.bxQT is not None):\n self._cxPlot()\n a += 1\n\n if(a > 0):\n return\n\n self._setupPlot()\n self._axPlot()\n self._bxPlot()\n self._cxPlot()\n self._dxPlot()\n self._endingPlot()", "def __init__(self, ds, check_dims=True, x_periodic=True, y_periodic=True,\n z_periodic=False):\n self._ds = ds\n self._check_dims = check_dims\n self._periodic = {'X': x_periodic, 'Y': y_periodic, 'Z': z_periodic}\n\n self._axes = OrderedDict()\n for ax in ['X', 'Y']:\n # figure out what the grid dimensions are\n coord_names = comodo.get_axis_coords(ds, ax)\n ncoords = len(coord_names)\n if ncoords == 0:\n # didn't find anything for this axis\n pass\n else:\n if ncoords != 2:\n raise ValueError('Must have two different %s coordinates. '\n 'Instead got %s' % (ax, repr(coord_names)))\n axis_data = OrderedDict()\n for name in coord_names:\n coord = ds[name]\n axis_shift = coord.attrs.get('c_grid_axis_shift')\n if (axis_shift is None) or (axis_shift == 0):\n # we found the center coordinate\n axis_data['c'] = name\n axis_data['c_coord'] = coord\n elif (axis_shift==0.5) or (axis_shift==-0.5):\n # we found the face coordinate\n axis_data['g'] = name\n axis_data['g_coord'] = coord\n # TODO: clearly document the sign convention\n axis_data['shift'] = 1 if axis_shift==0.5 else -1\n else:\n raise ValueError('Invalid c_grid_axis_shift (%g) for '\n 'coord %s' % (axis_shift, name))\n self._axes[ax] = axis_data\n\n # check grid size consistency\n # we can deal with two cases:\n # * the c dim and g dim are the same size\n # * the g dim is one element longer than the c dim\n # define a slice used to subset\n for ax, info in iteritems(self._axes):\n clen = len(info['c_coord'])\n glen = len(info['g_coord'])\n if clen==glen:\n # all good\n self._axes[ax]['pad'] = 0\n elif clen==(glen - 1):\n self._axes[ax]['pad'] = 1\n else:\n raise ValueError(\"Incompatible c and g dimension lengths on \"\n \"axis %s (%g, %g)\" % (ax, clen, glen))", "def initRunningVals(self):\n self.r_Vm = [0.0]*self.mirror.dataPoints\n self.r_Va = [0.0]*self.mirror.dataPoints", "def __init__(self):\n\n self.__name = '{}_{}'.format(type(self).__name__, id(self))\n self.__xax = 0\n self.__yax = 0\n\n self.addListener('zax', self.__name, self.__zaxChanged, immediate=True)\n self.__zaxChanged()", "def init_axes(self):\n plt.switch_backend(\"cairo\")\n fig = plt.figure(figsize=(15,10))\n ax = fig.add_axes([0.05, 0.15, 0.9, 0.80,])\n return (fig, ax)", "def reset(self):\n # Don't reset axis labels\n self.range = ((0, 2, 1),) * self.ndim\n self.current_step = (0,) * self.ndim\n self.order = tuple(range(self.ndim))", "def __init__(self, x_0, y_0, initX, initY,h=5):\n self.x_0=x_0\n self.y_0=y_0\n self.x_init=initX\n self.y_init=initY\n self.step=h" ]
[ "0.6247041", "0.6213832", "0.6022709", "0.602028", "0.5989789", "0.59698653", "0.59595746", "0.5930603", "0.5922851", "0.5920339", "0.58486426", "0.5825133", "0.57882136", "0.5785074", "0.5751861", "0.57397544", "0.57143", "0.5657272", "0.56560445", "0.5648683", "0.56252724", "0.5620141", "0.5607352", "0.56034017", "0.55876297", "0.5564904", "0.5549435", "0.55394316", "0.55391467", "0.55177075", "0.5506144", "0.54896486", "0.54881406", "0.5482634", "0.54634494", "0.5451611", "0.5445422", "0.5445422", "0.54396534", "0.54368556", "0.54267716", "0.5417533", "0.5408815", "0.5401683", "0.5399429", "0.5391471", "0.5388643", "0.53855836", "0.5374117", "0.53591186", "0.5355355", "0.5335805", "0.5332584", "0.532594", "0.532482", "0.53234994", "0.5323361", "0.53086084", "0.52901417", "0.5273782", "0.5272496", "0.52706426", "0.525273", "0.5248996", "0.5246375", "0.5240455", "0.5228327", "0.52179444", "0.52179444", "0.5217278", "0.5216129", "0.5205658", "0.5201598", "0.51980513", "0.519299", "0.5192254", "0.518811", "0.5184326", "0.51769227", "0.51725215", "0.5170558", "0.5169305", "0.51564753", "0.5147242", "0.51471615", "0.51418364", "0.513279", "0.5130735", "0.513003", "0.512716", "0.51182646", "0.511391", "0.50975055", "0.5097046", "0.50955075", "0.5094579", "0.5086688", "0.5085638", "0.5085628", "0.5079593", "0.50771314" ]
0.0
-1
ne on, hgcd on, ff off, doHartmann, ne off, hgcd off
def test_goto_field_boss_hartmann(self): sopTester.updateModel('mcp', TestHelper.mcpState['all_off']) cmdState = self.actorState.gotoField cmdState.reinitialize(self.cmd) cmdState.doSlew = False cmdState.doCalibs = False cmdState.arcTime = 0 cmdState.flatTime = 0 cmdState.doGuider = False self._goto_field_boss(5, 29, 0, 0, cmdState)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def genfb_py(h, n, u, v, f, dt, dx, dy, du,dv,dn, gridu,gridv,gridn, threadblock, beta=0.281105, eps=0.013, gamma=0.0880, mu=0.3, nu=0, dudt_x=dudt, dvdt_x=dvdt, dndt_x=dndt, grav=True, cori=True, advx=True, advy=True, attn=True, ): # generalized forward backward feedback timestep\n \n p5 = np.float32(0.5)\n one = np.float32(1)\n p32 = np.float32(1.5)\n beta = np.float32(beta)\n eps = np.float32(eps)\n gamma= np.float32(gamma)\n mu = np.float32(mu)\n \n dn_m1,dn_m2,dn_m0 = dn # dn[0], dn[1], dn[2] # unpack\n if dn_m1 is dn_m2 or dn_m1 is dn_m0:\n print (\"error dn_m1\")\n if dn_m2 is dn_m0: \n print (\"error dn_m0\")\n print( dn_m1[280,5],dn_m2[280,5],dn_m0[280,5]) \n# hn = n.copy_to_host()\n# print ('n', hn.shape,n.shape, np.argmax(hn),np.max(hn),np.argmin(hn),np.min(hn))\n# hn = u.copy_to_host()\n# print ('u', hn.shape,n.shape, np.argmax(hn),np.max(hn),np.argmin(hn),np.min(hn))\n# hn = v.copy_to_host()\n# print ('v', hn.shape,n.shape, np.argmax(hn),np.max(hn),np.argmin(hn),np.min(hn))\n dndt_x[gridn, threadblock](h, n, u, v, dx, dy, dn_m0)\n \n # must do the following before the u and v !\n #n1 = n + ((p32+beta)* dn_m0 - (p5+beta+beta)* dn_m1+ (beta)* dn_m2)*dt\n# n_m0 = dn_m0.copy_to_host()\n# print ('dn_m0',dn_m0.shape,np.argmax(n_m0),np.max(n_m0),np.argmin(n_m0),np.min(n_m0))\n# lincomb4_cuda[gridn,threadblock](n, dn_m0, dn_m1, dn_m2, one, (p32+beta)*dt, -(p5+beta+beta)*dt, (beta)*dt, n)\n \n h_n = n.copy_to_host()\n hn_m0 = dn_m0.copy_to_host()\n hn_m1 = dn_m1.copy_to_host()\n hn_m2 = dn_m2.copy_to_host()\n h_n = h_n+(p32+beta)*dt*hn_m0 -(p5+beta+beta)*dt*hn_m0+(beta)*dt*hn_m0\n n[:]=h_n\n \n \n \n du_m0,du_m1,du_m2,du_p1 = du # du[0], du[1], du[2], du[3] # unpack\n# if du_p1 is du_m0 or du_p1 is du_m1 or du_p1 is du_m2:\n# print (\"error du_p1\")\n# if du_m0 is du_m2 or du_m0 is du_m1 :\n# print (\"error du_m0\")\n# if du_m2 is du_m1:\n# print (\"error du_m1\")\n print( du_m0[280,5],du_m1[280,5],du_m2[280,5],du_p1[280,5]) \n dudt_x[gridu, threadblock](h, n, f, u, v, dx, dy, du_p1, grav, cori, advx, advy, attn,nu,mu)\n\n dv_m0,dv_m1,dv_m2,dv_p1 = dv #dv[0], dv[1], dv[2], dv[3] # unpack \n print( dv_m0[280,5],dv_m1[280,5],dv_m2[280,5],dv_p1[280,5]) \n dvdt_x[gridv, threadblock](h, n, f, u, v, dx, dy, dv_p1, grav, cori, advx, advy, attn,nu,mu)\n \n #u1 = u+ ((p5+gamma+eps+eps)*du_p1 +(p5-gamma-gamma-eps-eps-eps)*du_m0 +gamma*du_m1+eps*du_m2)*dt\n # lincomb5_cuda[gridu,threadblock](u, du_p1, du_m0, du_m1, du_m2, one, (p5+gamma+eps+eps)*dt, (p5-gamma-gamma-eps-eps-eps)*dt, gamma*dt, eps*dt, u)\n lincomb5_cuda[gridu,threadblock](u, du_p1, du_m0, du_m1, du_m2, one, one*dt, np.float32(0.0), np.float32(0.0), np.float32(0.0), u)\n\n #v1 = v+ ((p5+gamma+eps+eps)*dv_p1 +(p5-gamma-gamma-eps-eps-eps)*dv_m0 +gamma*dv_m1+eps*dv_m2)*dt\n # lincomb5_cuda[gridv,threadblock](v, dv_p1, dv_m0, dv_m1, dv_m2, one, (p5+gamma+eps+eps)*dt, (p5-gamma-gamma-eps-eps-eps)*dt, gamma*dt, eps*dt, v)\n lincomb5_cuda[gridv,threadblock](v, dv_p1, dv_m0, dv_m1, dv_m2, one, one*dt, np.float32(0.0), np.float32(0.0), np.float32(0.0), v)\n \n\n dv = [ dv_p1,dv_m0,dv_m1,dv_m2 ]\n du = [ du_p1,du_m0,du_m1,du_m2 ]\n dn = [ dn_m0,dn_m1,dn_m2 ]\n return du, dv, dn", "def fredkin(ha, hb, hc):\n\n return controlled_U(ha, swap(hb, hc))", "def genfb(h, n, u, v, f, dt, dx, dy, du,dv,dn, beta=0.281105, eps=0.013, gamma=0.0880, mu=0.3, nu=0, dudt_x=dudt, dvdt_x=dvdt, dndt_x=dndt, grav=True, cori=True, advx=True, advy=True, attn=True): # generalized forward backward feedback timestep\n \n beta = np.float32(beta)\n eps = np.float32(eps)\n gamma = np.float32(gamma)\n mu = np.float32(mu)\n \n \n dn_m1,dn_m2,dn_m0 = dn # unpack\n dndt_x(h, n, u, v, dx, dy, dn_m0)\n \n# test_out = dn_m0.copy()\n# dndt(h, n, u, v, dx, dy, test_out)\n \n# test_dif = dn_m0-test_out\n# if np.max(np.abs(test_dif[1:-1,1:-1] )) >1E-5 :\n# test_dif[1:-1,5][np.abs(test_dif[1:-1,5] ) <1E-5]=0.0\n# print (\"dn diff 2\")\n# print (test_dif[:,5])\n \n #dn_m0[:]=test_out \n\n # must do the following before the u and v !\n n1 = n + ((p32+beta)* dn_m0 - (p5+beta+beta)* dn_m1+ (beta)* dn_m2)*dt\n #del dn_m2\n du_m0,du_m1,du_m2,du_p1 = du # unpack\n dudt_x(h, n1, f, u, v, dx, dy, du_p1, grav=grav, cori=cori, advx=advx, advy=advy, attn=attn,nu=nu,mu=mu)\n\n dv_m0,dv_m1,dv_m2,dv_p1 = dv # unpack \n dvdt_x(h, n1, f, u, v, dx, dy, dv_p1, grav=grav, cori=cori, advx=advx, advy=advy, attn=attn,nu=nu,mu=mu)\n \n# test_out = du_p1.copy()\n# dudt(h, n1, f, u, v, dx, dy, test_out)\n \n# test_dif = du_p1-test_out\n# if np.max(np.abs(test_dif[1:-1,5] )) >1E-5 :\n# test_dif[1:-1,5][np.abs(test_dif[1:-1,5] ) <1E-5]=0.0\n# print (\"du diff\")\n# print (test_dif[:,5])\n \n# #du_p1[:] = test_out\n\n# test_out = dv_p1.copy()\n# dvdt(h, n1, f, u, v, dx, dy, test_out)\n \n# test_dif = dv_p1-test_out\n# if np.max(np.abs(test_dif[1:-1,5] )) >1E-5 :\n# test_dif[1:-1,5][np.max(test_dif[1:-1,5] ) <1E-5]=0.0\n# print (\"dv diff\")\n# print (test_dif[:,5])\n \n #dv_p1[:] = test_out\n \n u1 = u+ ((p5+gamma+eps+eps)*du_p1 +(p5-gamma-gamma-eps-eps-eps)*du_m0 +gamma*du_m1+eps*du_m2)*dt\n # del du_m2\n v1 = v+ ((p5+gamma+eps+eps)*dv_p1 +(p5-gamma-gamma-eps-eps-eps)*dv_m0 +gamma*dv_m1+eps*dv_m2)*dt\n # del dv_m2\n\n\n \n \n dv = [ dv_p1,dv_m0,dv_m1,dv_m2 ]\n du = [ du_p1,du_m0,du_m1,du_m2 ]\n dn = [ dn_m0,dn_m1,dn_m2 ]\n# n[:,:], u[:,:], v[:,:], = n1, u1, v1\n return n1, u1, v1, du,dv,dn", "def test_G_H(self):\r\n chkmtx = (numpy.dot(golay.DEFAULT_G, golay.DEFAULT_H.T) % 2)\r\n self.assertTrue((chkmtx == 0).all())", "def __init__(self, N: int, P: int, H: int, csdfg_fwd: CompiledSDFG,\n csdfg_bwd: CompiledSDFG):\n super().__init__()\n\n class DaceMHAFunction(torch.autograd.Function):\n @staticmethod\n def forward(ctx, Q, K, V, WQ, WK, WV, WO, BQ, BK, BV, BO, scaler):\n B, SN, N = Q.shape\n _, SM, _ = K.shape\n output = torch.empty([B, SN, N], device=Q.device)\n csdfg_fwd(k=K.contiguous(),\n q=Q.contiguous(),\n v=V.contiguous(),\n __return=output,\n wk=WK.contiguous(),\n wq=WQ.contiguous(),\n wv=WV.contiguous(),\n wo=WO.contiguous(),\n bk=BK.contiguous(),\n bq=BQ.contiguous(),\n bv=BV.contiguous(),\n bo=BO.contiguous(),\n scaler=scaler.item(),\n B=B,\n P=P,\n H=H,\n N=H * P,\n SN=SN,\n SM=SM)\n ctx.save_for_backward(Q, K, V, output)\n return output\n\n @staticmethod\n def backward(ctx, grads):\n # ctx.saved_tensors\n output = torch.empty_like(K, device='cuda')\n gK, gQ, gV, gWK, gWQ, gWV = [\n torch.empty(grads.shape, grads.dtype) for _ in range(6)\n ]\n csdfg_bwd(output=output,\n gK=gK,\n gQ=gQ,\n gV=gV,\n gWK=gWK,\n gWQ=gWQ,\n gWV=gWV)\n return gK, gQ, gV, gWK, gWQ, gWV\n\n self.P = P\n self.H = H\n self.N = N\n\n self.op = DaceMHAFunction\n self.WK = torch.nn.Parameter(torch.Tensor(P, H, P * H))\n self.WQ = torch.nn.Parameter(torch.Tensor(P, H, P * H))\n self.WV = torch.nn.Parameter(torch.Tensor(P, H, P * H))\n self.WO = torch.nn.Parameter(torch.Tensor(P, H, P * H))\n self.BK = torch.nn.Parameter(torch.Tensor(P, H))\n self.BQ = torch.nn.Parameter(torch.Tensor(P, H))\n self.BV = torch.nn.Parameter(torch.Tensor(P, H))\n self.BO = torch.nn.Parameter(torch.Tensor(N))\n self.scaler = torch.tensor([P**-0.5], dtype=torch_dtype)\n\n # Initialize parameters\n xavier_uniform_(self.WK)\n xavier_uniform_(self.WQ)\n xavier_uniform_(self.WV)\n xavier_uniform_(self.WO)\n\n uniform_(self.BK)\n uniform_(self.BQ)\n uniform_(self.BV)\n uniform_(self.BO)", "def main():\n regexham = r'\\s+\\((\\d+,\\s*\\d+)\\)\\s+([\\-+]?\\d+\\.\\d+[eEdD]?[\\-+]?\\d+)' #to extract the Hamiltonian.\n root = '.'\n #fname = 'output_files/'\n ciffci = CIFlow_Reader('testfci.dat', regexp = regexham , read_ham= True)\n ciffcipar = CIFlow_Reader( 'psi0_output10outputfci.dat', regexp = regexham , read_ham = True)\n #print ciffci.calc_overlap(cifdoci)\n #print e.get_groundstate('00000000000011|00000000000011') \n\n psir = rp.PsiReader('psi0_output10.dat', isbig = False, numorbs = -1 , read_ints = False)\n\n detlist = dw.cimain(psir.values['nalpha'],psir.values['nbeta'], psir.values['norb'], [range(1,psir.values['nalpha']+psir.values['nbeta']), []], [] , fname = 'determinants.dat' ,ref = [lambda x , y , z : psir.get_hf_orbs()] , add_frozen = 0, write = False) #CISDDOCI\n count = 0\n for det in detlist:\n for det2 in detlist:\n #+ because the eigenvectors have already a different phasefactor of 1.\n if abs(ciffci.get_mat_element(det[0]+'|'+det[1], det2[0]+'|'+det2[1]) - ciffcipar.get_mat_element(det[0]+'|'+det[1], det2[0]+'|'+det2[1]) ) > 1e-10 :\n print 'difference in hamiltonian row: ' , det[0]+'|'+det[1] , \" col: \" , det2[0]+'|'+det2[1] , 'fci: ', ciffci.get_mat_element(det[0]+'|'+det[1], det2[0]+'|'+det2[1]) , 'fciaddres: ' , ciffcipar.get_mat_element(det[0]+'|'+det[1],det2[0]+'|'+det2[1]) \n count += 1\n print 'There were ' , count , ' different elements'", "def hemianopsia_hard(hemi='left',dprime_fnc=dprime_basic):\n def hemianopsia_fnc(distance):\n if (hemi == 'left' and distance[0] < 0) or (hemi == 'right' and distance[0] > 0):\n return SMALL_FLOAT\n else:\n return dprime_fnc(distance)\n return hemianopsia_fnc", "def exp_mod(a, b, nbr):\n bina = [int(x) for x in bin(a)[2:]]\n #binb = [int(x) for x in bin(b)[2:]]\n binn = [int(x) for x in bin(nbr)[2:]]\n #while len(binn)<len(bina):\n # binn = [0]+binn\n #print(bina, binn)\n binn.reverse()\n bina.reverse()\n n = len(bina)+len(binn)*4+1\n na = len(bina)\n nan = len(bina)+len(binn) # debut de Y\n nany = len(bina)+2*len(binn)+1 # debut de \"A\" (ici c'est b)\n nanya = len(bina)+3*len(binn)+1 # debut de \"APOW\" (ce qui doit etre mesuré)\n q = QuantumRegister(n+2, 'q') # +lost+lost2\n circ = QuantumCircuit(q)\n for i in range(na):\n if bina[i]:\n circ.x(q[i])\n for i in range(len(binn)):\n if binn[i]:\n circ.x(q[na+i])\n expmod(circ, q, # X, a, A, APOW, Y, n, N, binn, lost, lost2)\n [q[i] for i in range(len(bina))],\n b%nbr,\n [q[i+nany] for i in range(len(binn))],\n [q[i+nanya] for i in range(len(binn))],\n [q[i+nan] for i in range(len(binn)+1)],\n nbr,\n [q[i+na] for i in range(len(binn))],\n binn,\n q[n],\n q[n+1])\n if len(bina)%2:\n circ_m = measure(circ, q, [i for i in range(nan,nany)])\n else:\n circ_m = measure(circ, q, [i for i in range(nanya,n)])\n #circ_m = measure(circ, q, [i for i in range(n)])\n return circ_m", "def chk_hamming(data):\n pass", "def zzx_heu_gcd(f, g, **flags):\n def interpolate(h, x):\n f = []\n\n while h:\n g = h % x\n\n if g > x // 2:\n g -= x\n\n f.insert(0, g)\n h = (h-g) // x\n\n return f\n\n def finalize(h, cff, cfg, gcd):\n h = zzx_mul_const(h, gcd)\n return h, cff, cfg\n\n if not (f or g):\n return [], [], []\n elif not f:\n return g, [], [1]\n elif not g:\n return f, [1], []\n\n df = zzx_degree(f)\n dg = zzx_degree(g)\n\n cf = zzx_content(f)\n cg = zzx_content(g)\n\n gcd = igcd(cf, cg)\n\n f = [ c // gcd for c in f ]\n g = [ c // gcd for c in g ]\n\n if df == 0 or dg == 0:\n return [gcd], f, g\n\n f_norm = zzx_max_norm(f)\n g_norm = zzx_max_norm(g)\n\n B = 2*min(f_norm, g_norm) + 29\n\n x = max(min(B, 99*INT_TYPE(isqrt(B))),\n 2*min(f_norm // abs(poly_LC(f)),\n g_norm // abs(poly_LC(g))) + 2)\n\n for i in xrange(0, 6):\n ff = zzx_eval(f, x)\n gg = zzx_eval(g, x)\n\n if ff and gg:\n h = igcd(ff, gg)\n\n cff = ff // h\n cfg = gg // h\n\n h = interpolate(h, x)\n h = zzx_primitive(h)[1]\n\n cff_, r = zzx_div(f, h)\n\n if not r:\n cfg_, r = zzx_div(g, h)\n\n if not r:\n return finalize(h, cff_, cfg_, gcd)\n\n cff = interpolate(cff, x)\n\n h, r = zzx_div(f, cff)\n\n if not r:\n cfg_, r = zzx_div(g, h)\n\n if not r:\n return finalize(h, cff, cfg_, gcd)\n\n cfg = interpolate(cfg, x)\n\n h, r = zzx_div(g, cfg)\n\n if not r:\n cff_, r = zzx_div(f, h)\n\n if not r:\n return finalize(h, cff_, cfg, gcd)\n\n x = INT_TYPE(2.7319*x*isqrt(isqrt(x)))\n\n raise HeuristicGCDFailed('no luck')", "def distcalc(z,h=0.70,omegalambda=0.7,omegam=0.3,omegak=0.0):\n\n H0 = 100 * h # this is in units of km/s/Mpc\n\n H0freq = H0 * constants.kilo/(constants.mega * constants.parsec) # this is H0 is units of Hz\n \n hubbletime = 1.0/H0freq # in seconds\n hubbletimeyr = hubbletime / constants.year\n\n #hubble distance\n dh = constants.c / H0freq # in meters\n\n #now i can calculate the comoving distance (line of sight) using hogg eqn 15\n dc = dh * integrate.quad(dcintegrand,0,z,(omegalambda,omegam,omegak))[0]\n\n #now i can find the transverse comoving distance using hogg eqn 16\n if omegak == 0:\n dm = dc\n elif omegak > 0:\n dm = dh/np.sqrt(omegak) * np.sinh(dc * np.sqrt(omegak) / dh)\n else:\n dm = dh/np.sqrt(abs(omegak)) * np.sin(dc * np.sqrt(abs(omegak)) / dh)\n\n\n #now i will calculate the angular diameter distance (hogg eqn 18)\n da = dm/(1+z)\n \n #now i will calculate scale in kpc/arcsec, since this is commonly used\n scale = da * constants.arcsec / (constants.kilo * constants.parsec)\n\n #now i will calculate the luminosity distance (hog eqn 21)\n dl = (1+z)*dm\n \n #now i will calculate lookback time and \n #time from the begining of the universe to that redshift using hogg eqn 30\n \n tlookback = hubbletimeyr * integrate.quad(timeintegrand,0,z,(omegalambda,omegam,omegak))[0]\n \n tz = hubbletimeyr * integrate.quad(timeintegrand,z,np.inf,(omegalambda,omegam,omegak))[0]\n \n #all sky co-moving volume out to redshift z (hogg eqn 30)\n if omegak == 0:\n vc = 4 * np.pi * dm**3 / 3\n elif omegak > 0:\n vc = ( 4 * np.pi * dh**3 / (2 * omegak) ) * ( dm * np.sqrt(1 + omegak * dm**2 / dh**2) / dh - \n np.arcsinh( np.sqrt(omegak) * dm / dh ) / np.sqrt(omegak) )\n else:\n vc = ( 4 * np.pi * dh**3 / (2 * omegak) ) * ( dm * np.sqrt(1 + omegak * dm**2 / dh**2) / dh - \n np.arcsin( np.sqrt(abs(omegak)) * dm / dh ) / np.sqrt(abs(omegak)) )\n\n #for output, i will make a dictionary\n output = dict(dh=dh, dc=dc, dm=dm, da=da, scale=scale, dl=dl, tlookback = tlookback, tz=tz, vc=vc)\n\n return output", "def global_fn(self, h, e, g_state):\n N, K, _ = h.shape\n\n # Concatenate all relevant inputs.\n h = h.sum(dim=1)\n\n mask = torch.ones(1, K, K, 1)\n if torch.cuda.is_available():\n mask = mask.cuda()\n for kx1 in range(K):\n for kx2 in range(K):\n if kx1 != kx2-1:\n mask[:, kx1, kx2, :] = 0.\n #if kx1 == kx2:\n # mask[:, kx1, kx2, :] = 0.\n e = torch.sum(e*mask, dim=[1,2])\n\n x = torch.cat([h, e], dim=1).view(-1, 1, 2*self.n_hidden)\n output, g_n = self.G_gru(input=x,\n hx=g_state)\n output = output.view(N, self.n_hidden)\n return output, g_n", "def Gamma01ph(ne,ng):\r\n\r\n return d(ne,ng)**2*domega(ne,ng)**3/(3*np.pi*epsilon_0*hbar*c**3)", "def pollard_rho(g: int, h: int, n: int, order: int = None):\n x = {0: 1}\n a = {0: 0}\n b = {0: 0}\n\n import ressources.multGroup as multGroup\n\n if order is None:\n order = multGroup.multiplicativeOrder(g, n)\n\n # from a, b and c, partitioning the field\n def step_xab(x, a, b, g, h, order, n):\n s = x % 3\n\n # S1\n if s == 1:\n x = x * h % n\n b = (b + 1) % order\n\n # S2\n if s == 0:\n x = square_and_multiply(x, 2, n)\n a = 2 * a % order\n b = 2 * b % order\n\n # S3\n if s == 2:\n x = x * g % n\n a = (a + 1) % order\n\n return x, a, b\n\n # returns x, a, b for a given i using memoization\n def get_xab(i):\n\n if i not in x:\n _x, _a, _b = get_xab(i - 1)\n\n x[i], a[i], b[i] = step_xab(_x, _a, _b, g, h, order, n)\n\n return x[i], a[i], b[i]\n\n def naturals_from(i):\n while True:\n # yield is a keyword that is used like return, except the function will return a generator.\n # https://www.google.com/search?client=firefox-b-d&q=yield+python\n yield i\n i += 1\n\n for i in naturals_from(1):\n\n x_i, a_i, b_i = get_xab(i)\n x_2i, a_2i, b_2i = get_xab(2 * i)\n\n if x_i == x_2i:\n\n r = (b_i - b_2i) % order\n\n if r == 0:\n return False\n\n return multGroup.inv(r, order) * (a_2i - a_i) % order", "def compute_gcd(first, second, n, gcd_factor): \n a = QuantumRegister(n, \"aq\")\n b = QuantumRegister(n, \"bq\")\n anc = QuantumRegister(2, \"ancq\")\n res = QuantumRegister(n, \"resq\")\n zero = QuantumRegister(2, \"zq\")\n anc_cl = ClassicalRegister(2, \"anccl\")\n zero_cl = ClassicalRegister(2, \"zcl\")\n a_cl = ClassicalRegister(n, \"acl\")\n b_cl = ClassicalRegister(n, \"bcl\") \n res_cl = ClassicalRegister(n, \"cl\")\n qc = QuantumCircuit(a, b, anc, res, zero, anc_cl, zero_cl, \n res_cl, a_cl, b_cl, name=\"qc\")\n\n for i in range(0, n):\n if first[i] == \"1\":\n qc.x(a[n-(i+1)])\n \n for i in range(0, n):\n if second[i] == \"1\":\n qc.x(b[n-(i+1)])\n\n for i in range(0, n):\n if gcd_factor[i] == \"1\":\n qc.x(res[n-(i+1)]) \n\n qc.measure(a,res_cl)\n qc.x(zero[0]).c_if(res_cl, 0)\n qc.measure(b, res_cl)\n qc.x(zero[1]).c_if(res_cl, 0)\n qc.measure(zero, zero_cl)\n #Resetting the zero qreg to 0, with zero_cl holding the zero status \n qc.x(zero[0]).c_if(res_cl, 0) \n qc.measure(a,res_cl) \n qc.x(zero[1]).c_if(res_cl, 0) \n #Resetting res_cl to zero again.\n for i in range(n):\n qc.measure(zero[0], res_cl[i]) \n\n #Checking for equality\n args = [a[x] for x in range(n)] + [b[x] for x in range(n)]\n csub = CompositeGate(\"csub\", [math.pi], args, circuit =\"qc\")\n csub = subtract.subtract(qc, n, math.pi, a, b, csub, -1)\n qc.measure(a, res_cl)\n qc.x(zero[0]).c_if(res_cl, 0)\n cadd = CompositeGate(\"csub\", [math.pi], args, circuit =\"qc\")\n cadd = subtract.subtract(qc, n, math.pi, a, b, cadd, 1)\n\n #Checking if either b, a or both are zero - if they are, then swap\n #the 1 in res_q out for a zero\n qc.swap(res[0], zero[0]).c_if(zero_cl, 1)\n qc.swap(res[0], zero[0]).c_if(zero_cl, 2)\n qc.swap(res[0], zero[0]).c_if(zero_cl, 3)\n #flip the 1 in zero_q to make zero_q on the zero state again.\n qc.x(zero[0]).c_if(zero_cl, 1)\n qc.x(zero[0]).c_if(zero_cl, 2)\n qc.x(zero[0]).c_if(zero_cl, 3) \n\n zero_status, gcd_factor, b_value, equality_status, a_value = \\\n gcd_main.gcd(qc, a, b, res, anc, zero, zero_cl, anc_cl, res_cl,\n n, a_cl, b_cl)\n\n return zero_status, gcd_factor, b_value, equality_status, a_value", "def exotic(double[:] v, redshifts, in_terms, double H0):\n\n cdef double t = v[0]\n cdef double a = v[1]\n cdef double ombar_m = v[2]\n cdef double ombar_r = v[3]\n cdef double ombar_de = v[4]\n cdef double z = v[5]\n cdef double dl = v[6]\n cdef double gamma = in_terms[0]\n cdef double zeta = in_terms[1]\n\n cdef double Hz = H0 * (ombar_m +ombar_r +ombar_de)**(0.5)\n\n# if ombar_m < 0 or ombar_de < 0:\n# print('exotic')\n# print('z = %s, Hz = %s, gamma = %s, zeta = %s'% (z, Hz, gamma, zeta))\n# print('ombar_m = %s, ombar_r = %s, ombar_de = %s'\n# % (ombar_m, ombar_r, ombar_de))\n\n if math.isnan(Hz):\n print('exotic')\n print('z = %s, Hz = %s, gamma = %s, zeta = %s'% (z, Hz, gamma, zeta))\n print('ombar_m = %s, ombar_r = %s, ombar_de = %s'\n % (ombar_m, ombar_r, ombar_de))\n\n cdef double dtdz = -1.0/((1.0+z) * Hz)\n cdef double dadz = -(1.0+z)**(-2.0)\n cdef double domdz = (3.0*ombar_m +gamma*ombar_m*ombar_r /Hz) /(1.0+z)\n cdef double dordz = (4.0*ombar_r -(gamma*ombar_m*ombar_r -zeta*ombar_r*ombar_de) /Hz ) /(1.0+z)\n cdef double dodedz = -zeta*ombar_r*ombar_de/(1.0+z) /Hz\n cdef double ddldz = 1.0/Hz\n\n # first derivatives of functions I want to find:\n f = [dtdz,# dt/dz (= f.d wrt z of time)\n dadz,# d(a)/dz (= f.d wrt z of scale factor)\n domdz,# dw = 0, (ombar_m)/dz (= f.d wrt z of density_m(t) / crit density(t0))\n dordz,# w = 1/3, d(ombar_r)/dz (= f.d wrt z of density_r(t) / crit density(t0))\n dodedz,# w = -1, d(ombar_de)/dz (= f.d wrt z of density_de(t) / crit density(t0))\n 1.0,# d(z)/dz (= f.d wrt z of redshift)\n ddldz]# d(dl)/dz (= f.d wrt z of luminosty distance) # H + Hdz*(1+z)\n\n return f", "def test_init_hebbian_2(self):\n v_one = [1, -1, -1, -1, 1, -1, -1, -1, 1]\n v_two = [-1, -1, -1, 1, 1, 1, -1, -1, -1]\n network = HopfieldNetwork([v_one, v_two])\n expected = np.array([\n [0, 0, 0, -2, 0, -2, 0, 0, 2],\n [0, 0, 2, 0, -2, 0, 2, 2, 0],\n [0, 2, 0, 0, -2, 0, 2, 2, 0],\n [-2, 0, 0, 0, 0, 2, 0, 0, -2],\n [0, -2, -2, 0, 0, 0, -2, -2, 0],\n [-2, 0, 0, 2, 0, 0, 0, 0, -2],\n [0, 2, 2, 0, -2, 0, 0, 2, 0],\n [0, 2, 2, 0, -2, 0, 2, 0, 0],\n [2, 0, 0, -2, 0, -2, 0, 0, 0]\n ], np.int64)\n npt.assert_equal(network.weight_matrix, expected)", "def hEqn(g, e, h, f, Gescape, Gform, EHdecay, FHloss, stepsize):\n hchange = (g * Gescape\n - h * e * EHdecay\n - h * f * FHloss\n - e * h * Gform)\n return hchange * stepsize", "def zzX_heu_gcd(f, g, **flags):\n if poly_univariate_p(f):\n return zzx_heu_gcd(f, g, **flags)\n\n def interpolate(h, x):\n f = []\n\n while not zzX_zero_p(h):\n g = zzX_zz_trunc(h, x)\n f.insert(0, g)\n h = zzX_sub(h, g)\n h = zzX_quo_const(h, x)\n\n return f\n\n def finalize(h, cff, cfg, gcd):\n if zzX_zz_LC(h) > 0:\n h = zzX_mul_const(h, gcd)\n else:\n h = zzX_mul_const(h, -gcd)\n cff = zzX_neg(cff)\n cfg = zzX_neg(cfg)\n\n return h, cff, cfg\n\n zero_f = zzX_zero_p(f)\n zero_g = zzX_zero_p(g)\n\n l = poly_level(f)\n z = zzX_zero(l)\n\n if zero_f and zero_g:\n return z, z, z\n elif zero_f:\n return g, z, zzX_const(l, 1)\n elif zero_g:\n return f, zzX_const(l, 1), z\n\n df = zzX_degree(f)\n dg = zzX_degree(g)\n\n cf = zzX_zz_content(f)\n cg = zzX_zz_content(g)\n\n gcd = igcd(cf, cg)\n\n f = zzX_quo_const(f, gcd)\n g = zzX_quo_const(g, gcd)\n\n f_norm = zzX_max_norm(f)\n g_norm = zzX_max_norm(g)\n\n B = 2*min(f_norm, g_norm) + 29\n\n x = max(min(B, 99*INT_TYPE(isqrt(B))),\n 2*min(f_norm // abs(zzX_zz_LC(f)),\n g_norm // abs(zzX_zz_LC(g))) + 2)\n\n for i in xrange(0, 6):\n ff = zzX_eval(f, x)\n gg = zzX_eval(g, x)\n\n if not (zzX_zero_p(ff) or zzX_zero_p(gg)):\n h, cff, cfg = zzX_heu_gcd(ff, gg, **flags)\n\n h = interpolate(h, x)\n h = zzX_zz_primitive(h)[1]\n\n cff_, r = zzX_div(f, h)\n\n if zzX_zero_p(r):\n cfg_, r = zzX_div(g, h)\n\n if zzX_zero_p(r):\n return finalize(h, cff_, cfg_, gcd)\n\n cff = interpolate(cff, x)\n\n h, r = zzX_div(f, cff)\n\n if zzX_zero_p(r):\n cfg_, r = zzX_div(g, h)\n\n if zzX_zero_p(r):\n return finalize(h, cff, cfg_, gcd)\n\n cfg = interpolate(cfg, x)\n\n h, r = zzX_div(g, cfg)\n\n if zzX_zero_p(r):\n cff_, r = zzX_div(f, h)\n\n if zzX_zero_p(r):\n return finalize(h, cff_, cfg, gcd)\n\n x = INT_TYPE(2.7319*x*isqrt(isqrt(x)))\n\n raise HeuristicGCDFailed('no luck')", "def zzx_cofactors(f, g, **flags):\n return zzx_heu_gcd(f, g, **flags)", "def __pow__(G, H):\n return NotImplemented", "def apply_fhd(self, gfhd):\n for bl in self.data.keys():\n i,j = bl\n p1,p2 = self.pol\n G = gfhd[p1][i]*gfhd[p2][j].conj()\n ind = np.where(G != 0)[0]\n self.data[bl][self.pol][:,ind] /= G[ind]", "def ldos_defect(h,v,e=0.0,delta=0.001,n=1):\n raise # still not finished\n import green\n # number of repetitions\n rep = 2*n +1\n # calculate pristine green function\n g,selfe = green.supercell_selfenergy(h,e=e,delta=delta,nk=100,nsuper=rep)\n # now calculate defected green function \n ez = e + 1j*delta # complex energy\n emat = np.matrix(np.identity(len(g)))*ez # E +i\\delta \n import supercell\n pintra = supercell.intra_super2d(h,n=rep) # pristine\n vintra = supercell.intra_super2d(h,n=rep,central=v) # defective\n selfe = emat - pintra - g.I # dyson euqation, get selfenergy\n gv = (emat - vintra -selfe).I # Green function of a vacancy, with selfener\n return", "def pohlig_hellman(g: int, h: int, n: int):\n\n def group_of_prime_power_order(g, h, n=tuple):\n # n = (p, e) prime factor exponent times he appears\n p, e = n\n n = square_and_multiply(p, e)\n\n x = 0\n # By Lagrange's theorem, this element has order p.\n y = square_and_multiply(g, square_and_multiply(p, e - 1, n), n)\n\n for k in range(e):\n hk = square_and_multiply(\n square_and_multiply(g, -x, n) * h,\n square_and_multiply(p, e - 1 - k, n),\n n,\n )\n dk = pollard_rho(y, hk, n)\n x += dk * square_and_multiply(p, k, n)\n\n return x\n\n pFactors = findPrimeFactors(n, True)\n integers, modulis = [], []\n\n for p, e in pFactors.items():\n ni = square_and_multiply(p, e)\n gi = square_and_multiply(g, (n // ni), n)\n hi = square_and_multiply(h, (n // ni), n)\n\n xi = group_of_prime_power_order(gi, hi, (p, e))\n\n integers.append(xi)\n modulis.append(ni)\n\n return ChineseRemainder(integers, modulis)", "def k_HF(T, n, h=1e-3):\n mu_p, _ = physics_solver_mu(n * (1 + h), T)\n mu_m, _ = physics_solver_mu(n * (1 - h), T)\n dn_dmu = 2 * h / (mu_p - mu_m) # second order diff\n return dn_dmu / n", "def test_loss_hamiltonian_incomplete(self):\n g = nx.lollipop_graph(4, 1).to_directed()\n edge_weight_data = {edge: (i + 1) * 0.5 for i, edge in enumerate(g.edges)}\n for k, v in edge_weight_data.items():\n g[k[0]][k[1]][\"weight\"] = v\n h = loss_hamiltonian(g)\n\n expected_ops = [\n qml.PauliZ(0),\n qml.PauliZ(1),\n qml.PauliZ(2),\n qml.PauliZ(3),\n qml.PauliZ(4),\n qml.PauliZ(5),\n qml.PauliZ(6),\n qml.PauliZ(7),\n qml.PauliZ(8),\n qml.PauliZ(9),\n qml.PauliZ(10),\n qml.PauliZ(11),\n qml.PauliZ(12),\n qml.PauliZ(13),\n ]\n expected_coeffs = [\n np.log(0.5),\n np.log(1),\n np.log(1.5),\n np.log(2),\n np.log(2.5),\n np.log(3),\n np.log(3.5),\n np.log(4),\n np.log(4.5),\n np.log(5),\n np.log(5.5),\n np.log(6),\n np.log(6.5),\n np.log(7),\n ]\n\n assert expected_coeffs == h.coeffs\n assert all([op.wires == exp.wires for op, exp in zip(h.ops, expected_ops)])\n assert all([type(op) is type(exp) for op, exp in zip(h.ops, expected_ops)])", "def get_34cohom_dim(v,l, even_e):\n op1 = OrdinaryGraphComplex.ContractEdgesGO.generate_operator(v,l, even_e)\n op2 = OrdinaryGraphComplex.ContractEdgesGO.generate_operator(v+1,l, even_e)\n fullvs = op1.domain\n fullvs2 = op2.domain\n \n vs34 = Valence34Mask(fullvs)\n vs342 = Valence34Mask(fullvs2)\n\n D34rank = 0\n if op1.is_valid():\n D = op1.get_matrix()\n # P34 = vs34.get_P34()\n # D34 = D * P34 \n i34 = vs34.get_34index_list()\n D34 = D[:, i34]\n D34rank = D34.rank()\n\n DD34rank = 0\n DD5rank = 0\n if op2.is_valid():\n DD = op2.get_matrix()\n # PP34 = vs342.get_P34()\n ii34 = vs342.get_34index_list()\n # DD34 = DD * PP34 \n DD34 = DD[:,ii34]\n DD34rank = DD34.rank()\n\n # P5 = vs34.get_P5()\n # DD5 = P5 * DD * PP34\n i5 = vs34.get_5index_list()\n DD5 = DD[i5, ii34]\n DD5rank = DD5.rank()\n\n\n return vs34.get_34dimension() - D34rank -DD34rank + DD5rank", "def test_superposition_ud_cnot():\n program = dedent(\n \"\"\"\\\n register q0[0]\n register q1[1]\n H q1\n CNOT q1 q0\n \"\"\"\n )\n\n result = run(program, run_gate_array, return_distribution=True)\n assert isclose(result, [0.5, 0.0, 0.0, 0.5]).all()", "def lstmcell_grad_h(input, hx, cx, w_ih, w_hh, b_ih, b_hh, dh, dc, target=\"cce\"):\n # things from fwd\n batch, input_size = get_shape(input)\n _, hidden_size = get_shape(hx)\n xh = akg.topi.concatenate((hx, input), 1)\n whl = [w_ih, w_hh]\n W = Concat(whl, 1) # [4*hidden_size, input_size+hidden_size]\n\n gates = dense(input, w_ih, b_ih, True) + dense(hx, w_hh, b_hh, True)\n\n ingate_in, forgetgate_in, cellgate_in, outgate_in = Split(gates, 4, 1)\n\n ingate = sigmoid(ingate_in)\n forgetgate = sigmoid(forgetgate_in)\n cellgate = Tanh(cellgate_in)\n outgate = sigmoid(outgate_in)\n cy = (forgetgate * cx) + (ingate * cellgate)\n tanh_cy = Tanh(cy)\n #hy = outgate * tanh_cy\n\n # starts bwd\n # head * dh/do shape [n,]\n doutgate = dh * tanh_cy\n doutgate_in = outgate * (1 - outgate) * doutgate\n kk = akg.tvm.reduce_axis((0, batch))\n dWo = akg.tvm.compute((hidden_size, hidden_size + input_size), lambda i, j:\n akg.tvm.sum(xh[kk, j] * doutgate_in(kk, i), axis=kk), name=\"dWo\")\n\n dtanh_cy = dh * outgate\n dc = (1 - tanh_cy * tanh_cy) * dtanh_cy\n\n dingate = cellgate * dc\n dingate_in = ingate * (1 - ingate) * dingate\n kk3 = akg.tvm.reduce_axis((0, batch))\n dWi = akg.tvm.compute((hidden_size, hidden_size + input_size), lambda i, j:\n akg.tvm.sum(xh[kk3, j] * dingate_in(kk3, i), axis=kk3), name=\"dWi\")\n\n dforgetgate = dc * cx\n dforgetgate_in = forgetgate * (1 - forgetgate) * dforgetgate\n kk2 = akg.tvm.reduce_axis((0, batch))\n dWf = akg.tvm.compute((hidden_size, hidden_size + input_size), lambda i, j:\n akg.tvm.sum(xh[kk2, j] * dforgetgate_in(kk2, i), axis=kk2), name=\"dWf\")\n\n dcellgate = ingate * dc\n dcellgate_in = (1 - cellgate * cellgate) * dcellgate\n kk4 = akg.tvm.reduce_axis((0, batch))\n dWc = akg.tvm.compute((hidden_size, hidden_size + input_size), lambda i, j:\n akg.tvm.sum(xh[kk4, j] * dcellgate_in(kk4, i), axis=kk4), name=\"dWc\")\n\n dW = akg.topi.concatenate((dWi, dWf, dWc, dWo))\n\n db = akg.topi.concatenate((dingate_in, dforgetgate_in, dcellgate_in, doutgate_in), 1)\n\n kk5 = akg.tvm.reduce_axis((0, 4 * hidden_size))\n dxh = akg.tvm.compute((batch, hidden_size + input_size), lambda i, j:\n akg.tvm.sum(W[kk5, j] * db[i, kk5], axis=kk5), name=\"dxh\")\n dhx = akg.tvm.compute((batch, hidden_size), lambda i, j: dxh[i, j], name=\"dhx\")\n dx = akg.tvm.compute((batch, input_size), lambda i, j: dxh[i, j + hidden_size], name=\"dx\")\n\n dcx = forgetgate * dc\n\n dw_ih = akg.tvm.compute(w_ih.shape, lambda i, j: dW[i, j])\n #dw_hh = akg.tvm.compute(w_hh.shape, lambda i, j: dW[i, j + input_size])\n\n bhr = akg.tvm.reduce_axis((0, batch))\n\n db_ih = akg.tvm.compute((4 * hidden_size,), lambda i: akg.tvm.sum(db[i, bhr], axis=bhr), name=\"dbih\")\n\n bir = akg.tvm.reduce_axis((0, batch))\n\n db_hh = akg.tvm.compute((4 * hidden_size,), lambda i: akg.tvm.sum(db[i, bir], axis=bir), name=\"dbhh\")\n\n return dw_ih, w_hh, db_ih, db_hh, dcx, dhx, dx", "def __findUHF(self,samples): \n self.GPU_setArrayToZeros.prepared_call(self.gShapeZero,self.bShapeZero,\n self.GPU_bufDoppSum)\n\n self.GPU_filterMasks.prepared_call(self.gShapeVecMasks,self.bShapeVecMasks,\n self.GPU_bufXcorr,self.GPU_bufSignalFreq,self.GPU_bufBitsMask,self.GPU_bufDoppIdx,\n np.int32(self.Nfft),np.int32(self.doppIdxArrayLen))\n\n if self.FFTNoBatches == 1:\n cufft.cufftExecC2C(self.fftPlanDopplers,int(self.GPU_bufXcorr),int(self.GPU_bufXcorr),cufft.CUFFT_INVERSE)\n\n \n else:\n # It is at the moment faster to call single fft loops than batched fft loops (don't ask me why). This can however be changed in the config filte. Therefore all these loops\n for j in range(self.fftLoops):\n for i in range(self.num_streams):\n if j*self.num_streams + i < self.FFTNoBatches:\n buffAddr = self.buffAddr[j*self.num_streams+i] # compute the address in the large buffer\n cufft.cufftExecC2C(self.fftPlanDopplers[i],buffAddr,buffAddr,cufft.CUFFT_INVERSE)\n \n self.GPU_absSumDoppler.prepared_call(self.gShapeAbsSum,self.bShapeAbsSum,\n self.GPU_bufDoppSum,self.GPU_bufXcorr,np.int32(self.Nfft))\n\n if STORE_BITS_IN_FILE:\n tstore = time.time()\n tmpArr = np.empty((self.doppIdxArrayLen,self.num_masks),dtype=np.float32)\n cuda.memcpy_dtoh(tmpArr,self.GPU_bufDoppSum)\n self.sum_match = np.vstack([self.sum_match,np.sum(tmpArr,axis=1)])\n sumBest = np.argmax(np.sum(tmpArr,axis=1))\n log.debug(f'demodulator store in file 1 (save array) time {time.time()-tstore} s')\n\n self.GPU_estDoppler.prepared_call(self.gShapeDopp,self.bShapeDopp,\n self.GPU_bufDoppResult,self.GPU_bufFindDoppTmp,self.GPU_bufDoppSum,np.int32(self.num_dopplers),np.int32(self.doppIdxArrayOffset))\n\n bestDoppler = np.empty(2,dtype=np.float32) # contains [doppler, standard_dev]\n cuda.memcpy_dtoh(bestDoppler,self.GPU_bufDoppResult) # fetch the result\n # do scaling with the best Doppler before returning\n # Keep the scaling in indices. This is handier for the demodulation\n\n try:\n lowIdx = int(bestDoppler[0])\n highIdx = int(np.ceil(bestDoppler[0]))\n lowVal = self.doppHzLUT[lowIdx]\n highVal = self.doppHzLUT[highIdx]\n # This one is for our stats\n bestDopplerScaled = lowVal + (highVal-lowVal) * (bestDoppler[0] % 1) # scale with decimal offset\n\n # this one is for the demodulator. Contains the index\n self.dopplerIdxlast = np.int32(np.round(self.doppCyperSymNorm[lowIdx] + (self.doppCyperSymNorm[highIdx]-self.doppCyperSymNorm[lowIdx]) * (bestDoppler[0] % 1))) # directly for use in the demodulator\n # print(f'bestDopplerScaled {self.dopplerIdxlast}')\n SNR = self.computeSNR(lowIdx,highIdx,5)\n # SNR = 20.\n freqOffset = bestDopplerScaled - self.centreFreqOffset # we only want the detected frequency offset. Not the IF offset\n sdev_Hz = bestDoppler[1]/self.Nfft * self.sampleRate # not used when SUM_ALL_MASKS = 1\n\n except ValueError as e:\n log.error(f'Error occurred during find_UHF -- skipping block. Message: {e}')\n self.dopplerIdxlast = 0\n freqOffset = 0.\n sdev_Hz = 0.\n SNR = 0.\n \n return freqOffset, sdev_Hz, self.clippedPeakIPure, SNR", "def directed_Hausdorff_hyperbox(b1,b2): \n return max(0,np.max(np.hstack((b1.u-b2.u,b2.l-b1.l))))", "def getEG(n,int_method,func) :\n m = np.asarray([0.5,0.6,0.7,0.8,0.9,1,1.1,1.2,1.3,1.4,1.5,2,2.5,3,3.5,4,\n 4.5,5,5.5,6,6.5,7,7.5,8,8.5,9,9.5,10])\n bes = np.asarray([0.5,0.47768,0.44879,0.39831,0.25858,0,0.15502,0.25699,\n 0.30896,0.35245,0.39119,0.51822,0.53678,0.54984,0.55847,\n 0.56395,0.57054,0.57950,0.58402,0.58765,0.59512,0.60214,\n 0.60469,0.61143,0.61789,0.62443,0.63097,0.63694])\n p = np.asarray([1,0.85417,0.94685,1.04467,2.55052,0,1.59086,\n 1.00670,0.88866,0.83763,0.81030,0.76108,0.83093,0.86863,\n 0.89233,0.90909,0.92097,0.93007,0.93735,0.94332,0.94813,\n 0.95193,0.95557,0.95864,0.96107,0.96360,0.96570,\n 0.96788])\n h0 = np.asarray([0,-0.03567,-0.04808,-0.04315,-0.01879,0,0.00041,0.00069,\n 0.00639,0.01405,0.02294,0.07814,0.13994,0.19278,0.23793,\n 0.27678,0.31039,0.33974,0.36585,0.38917,0.41003,0.42891,\n 0.44621,0.46195,0.47644,0.48982,0.50223,0.51379])\n h1 = np.asarray([0,0.26899, 0.10571,0.01763,-0.39382,0,0.15211,0.05665,\n 0.00933,-0.02791,-0.05876,-0.16720,-0.13033,-0.10455 ,\n -0.08618,-0.07208,-0.06179,-0.05369,-0.04715,-0.04176,\n -0.03742,-0.03408,-0.03081,-0.02808,-0.02599,-0.02375,\n -0.02194,-0.02004])\n h2 = np.asarray([0,-0.09016,-0.06893,-0.04971,-0.08828,0,-0.03341,\n -0.03964,-0.04456,-0.04775,-0.04984,-0.05381,-0.03570,\n -0.02476,-0.01789,-0.01333,-0.01028,-0.00812,-0.00653,\n -0.00534,-0.00444,-0.00376,-0.00319,-0.00274,-0.00238,\n -0.00207,-0.00182,-0.00160])\n h3 = np.asarray([0,0.03993,0.03363,0.02216,-0.00797,0,0.00899,0.01172,\n 0.01150,0.01026,0.00860,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,\n 0])\n \n if (func == True) :\n int_bes = splev(n,splrep(m, bes))\n int_p = splev(n,splrep(m, p))\n int_h0 = splev(n,splrep(m, h0))\n int_h1 = splev(n,splrep(m, h1))\n int_h2 = splev(n,splrep(m, h2))\n int_h3 = splev(n,splrep(m, h3))\n else :\n int_bes = griddata(m, bes, n, method=int_method)\n int_p = griddata(m, p, n, method=int_method)\n int_h0 = griddata(m, h0, n, method=int_method)\n int_h1 = griddata(m, h1, n, method=int_method)\n int_h2 = griddata(m, h2, n, method=int_method)\n int_h3 = griddata(m, h3, n, method=int_method)\n \n return np.asarray([int_bes,int_p,int_h0,int_h1,int_h2,int_h3])", "def glueEmH( Ja, Jf, truncNum = scipy.inf ):\n w, v = truncBasisH( Ja, truncNum )\n sPlus, sMinus, sZ = sPlusAndMinusAndZ( v )\n \n H1 = scipy.zeros( ( len(w)**4, len(w)**4 ) )\n \n for n in range( len(w)**4 ):\n # Diagonal previous generation contributions\n o = oct(n)[-4:].zfill(4)\n o = [int(char) for char in o]\n o_A, o_B, o_C, o_D = o\n \n H1[n, n] += scipy.sum( [ w[ i ] for i in o ] )\n \n # Edge terms\n for np in range( n, len(w)**4 ):\n op = oct(np)[-4:].zfill(4)\n op = [int(char) for char in op]\n op_A, op_B, op_C, op_D = op\n \n x = 0.\n if ( (o_B == op_B) and (o_C == op_C) ):\n x += -Jf * ( .5 * ( sPlus[0][o_A, op_A] * sMinus[0][o_D, op_D] + sMinus[0][o_A, op_A] * sPlus[0][o_D,op_D] ) + sZ[0][o_A, op_A] * sZ[0][o_D, op_D] )\n if ( (o_C == op_C) and (o_A == op_A) ):\n x += -Jf * ( .5 * ( sPlus[1][o_B, op_B] * sMinus[1][o_D, op_D] + sMinus[1][o_B, op_B] * sPlus[1][o_D,op_D] ) + sZ[1][o_B, op_B] * sZ[1][o_D, op_D] )\n if ( (o_A == op_A) and (o_B == op_B) ):\n x += -Jf * ( .5 * ( sPlus[2][o_C, op_C] * sMinus[2][o_D, op_D] + sMinus[2][o_C, op_C] * sPlus[1][o_D,op_D] ) + sZ[1][o_C, op_C] * sZ[2][o_D, op_D] )\n \n H1[n, np] = x\n H1[np, n] = x\n \n return H1", "def ecfp(mol,radius):\n #mol=Chem.AddHs(mol)\n bitInfo={}\n atoms_dict=invariants(mol)\n \n for idxs,i in atoms_dict.items():\n bitInfo[i]=bitInfo.get(i,())+((idxs,0),)\n \n neighborhoods=[]\n atom_neighborhoods=[len(mol.GetBonds())*bitarray('0') for a in mol.GetAtoms()]\n dead_atoms=len(mol.GetAtoms())*bitarray('0')\n \n for r in range(1,radius+1):\n round_ids={} #new bit ID this iteration\n round_atom_neighborhoods=copy.deepcopy(atom_neighborhoods) #bond to include under this r\n neighborhoods_this_round=[] #(round_atom_neighborhoods,round_ids,idxs)\n \n for idxs,a in enumerate(mol.GetAtoms()):\n if dead_atoms[idxs]:\n continue\n nbsr=[] #list to hash this iteration\n o_bond=bond(mol,idxs)\n for b in o_bond:\n round_atom_neighborhoods[idxs][b[2]] = True\n round_atom_neighborhoods[idxs] |= atom_neighborhoods[b[1]]\n nbsr.append((b[0],atoms_dict[b[1]]))\n nbsr=sorted(nbsr)\n nbsr=[item for sublist in nbsr for item in sublist]\n nbsr.insert(0,atoms_dict[idxs])\n nbsr.insert(0,r)\n \n round_ids[idxs]=get_hash(nbsr)\n neighborhoods_this_round.append((round_atom_neighborhoods[idxs],round_ids[idxs],idxs))\n for lst in neighborhoods_this_round:\n if lst[0] not in neighborhoods:\n bitInfo[lst[1]] = bitInfo.get(lst[1],())+((lst[2],r),)\n neighborhoods.append(lst[0])\n else:\n dead_atoms[lst[2]]=True\n atoms_dict=round_ids\n atom_neighborhoods=copy.deepcopy(round_atom_neighborhoods)\n return bitInfo", "def ehermite(a, b):\n [c, d, g] = extgcd(a, b)\n if g:\n E = np.array([[c, d], [-b/g, a/g]])\n else:\n E = np.array([[1, 0], [0, 1]])\n\n return E\n #Leila: check this \"http://www.ece.northwestern.edu/local-apps/matlabhelp/techdoc/ref/gcd.html\"", "def check_correctness_sigmoid_channelwise(f):\n\n rng = np.random.RandomState([2012, 7, 19])\n batch_size = 5\n pool_size = 1\n n = 3 * pool_size\n zv = rng.randn(batch_size, n).astype(config.floatX) * 1. - 1.5\n top_down_v = rng.randn(batch_size, n / pool_size).astype(config.floatX)\n\n z_th = T.matrix()\n z_th.name = 'z_th'\n\n top_down_th = T.matrix()\n top_down_th.name = 'top_down_th'\n\n p_th, h_th = f(z_th, pool_size, top_down_th)\n h_s = T.nnet.sigmoid(z_th + top_down_th)\n\n func = function([z_th, top_down_th], [p_th, h_th, h_s])\n\n pv, hv, h_s = func(zv, top_down_v)\n p_s = h_s\n\n assert p_s.shape == pv.shape\n assert h_s.shape == hv.shape\n if not np.allclose(h_s, hv):\n print((h_s.min(), h_s.max()))\n print((hv.min(), hv.max()))\n assert False\n if not np.allclose(p_s, pv):\n diff = abs(p_s - pv)\n print('max diff ', diff.max())\n print('min diff ', diff.min())\n print('ave diff ', diff.mean())\n assert False", "def simulation_OFC(self,ncmE,ncmC,f,g,Cfun,h,dt,tf,x0,z0=None,dscale=10.0,\\\n xnames=\"num\",Ncol=1,FigSize=(20,10),FontSize=20,phis=None):\n \"\"\"\n \n \n 1) SIMULATION\n \n \n \"\"\"\n if len(sig(f).parameters) == 1:\n fun1 = f\n f = lambda x,p: fun1(x)\n if len(sig(g).parameters) == 1:\n fun2 = g\n g = lambda x,p: fun2(x)\n if len(sig(Cfun).parameters) == 1:\n fun3 = Cfun\n Cfun = lambda x,p: fun3(x)\n if len(sig(h).parameters) == 1:\n fun4 = h\n h = lambda x,p: fun4(x)\n print(\"========================================================\")\n print(\"====================== SIMULATIOM ======================\")\n print(\"========================================================\")\n if dt <= self.dt_rk:\n self.dt_rk = dt\n self.Nrk = int(dt/self.dt_rk)\n Nsim = int(tf/dt)\n np.set_printoptions(precision=1)\n print(\"time step =\",dt)\n print(\"terminal time =\",tf)\n print(\"initial state =\",x0)\n print(\"estimated initial state =\",z0)\n funx = lambda x,p,dEf: f(x,p)+dEf(x,p)\n z = z0\n zhis = np.zeros((Nsim+1,self.n))\n zhis[0,:] = z\n x = x0\n xhis = np.zeros((Nsim+1,self.n))\n xhis[0,:] = x\n tit1 = \"Performance of NCM-based Output Feedback (1)\"\n tit2 = \"Performance of NCM-based Output Feedback (2)\"\n tit3 = \"Performance of NCM-based Output Feedback (3)\"\n tit4 = \"Performance of NCM-based Output Feedback (4)\"\n ly = r\"estimation error: $\\|x-\\hat{x}\\|_2$\"\n l1 = r\"estimation error\"\n lyb = r\"tracking error: $\\|x-x_d\\|_2$\"\n l1b = r\"tracking error\"\n bNam1 = \"=================== ESTIMATION ERROR ===================\"\n bNam2 = \"============ ESTIMATION ERROR OF EACH STATE ============\"\n bNam3 = \"==================== Tracking ERROR ====================\"\n bNam4 = \"============= Tracking ERROR OF EACH STATE =============\"\n l2 = r\"optimal steady-state upper bound\"\n if phis == None:\n phis = np.linspace(self.plims[0,:],self.plims[1,:],Nsim)\n for k in range(Nsim):\n p = phis[k,:]\n Mc = ncmC.ncm(z,p)\n u = -g(z,p).T@Mc@z\n dEfC = lambda x,p: g(x,p)@u\n d1 = self.unifrand2(ncmC.d1_over,np.size(ncmC.Bw(x,p),1))*dscale\n x = self.rk4(x,p,dEfC,funx)+ncmC.Bw(x,p)@d1*dt\n xhis[k+1,:] = x\n Me = ncmE.ncm(z,p)\n Cx = Cfun(z,p)\n Lx = Me@Cx.T\n #Lx = K.T\n d2 = self.unifrand2(ncmE.d2_over,np.size(ncmE.Gw(x,p),1))*dscale\n y = h(x,u,p)+ncmE.Gw(x,p)@d2\n funz = lambda z,p,dEf: f(z,p)+g(z,p)@u+dEf(z,p)\n dEfE = lambda z,p: Lx@(y-h(z,u,p))\n z = self.rk4(z,p,dEfE,funz)\n zhis[k+1,:] = z\n this = np.linspace(0,tf,Nsim+1)\n \"\"\"\n \n \n 2) FIGURE GENERATION\n \n \n \"\"\"\n print(\"========================================================\")\n print(bNam1)\n print(\"========================================================\")\n matplotlib.rcParams.update({\"font.size\": 15})\n matplotlib.rc(\"text\",usetex=True)\n plt.figure()\n plt.plot(this,np.sqrt(np.sum((xhis-zhis)**2,1)))\n plt.plot(this,np.ones(np.size(this))*ncmE.Jcv_opt)\n plt.xlabel(r\"time\",fontsize=FontSize)\n plt.ylabel(ly,fontsize=FontSize)\n plt.legend([l1,l2],loc=\"best\")\n plt.title(tit1,fontsize=FontSize)\n plt.show()\n print(\"========================================================\")\n print(bNam2)\n print(\"========================================================\")\n Nrow = int(self.n/Ncol)+np.remainder(self.n,Ncol)\n fig,ax = plt.subplots(Nrow,Ncol,figsize=FigSize)\n plt.subplots_adjust(wspace=0.25,hspace=0.25)\n if Ncol == 1:\n ax = np.reshape(ax,(self.n,1))\n elif Nrow == 1:\n ax = np.reshape(ax,(1,self.n))\n if xnames == \"num\":\n xnames = []\n for i in range(self.n):\n xnames += [r\"state \"+str(i+1)]\n for row in range(Nrow):\n for col in range(Ncol):\n i = Ncol*row+col\n if i+1 <= self.n:\n ax[row,col].plot(this,xhis[:,i]-zhis[:,i])\n ax[row,col].set_xlabel(r\"time\",fontsize=FontSize)\n LabelName = r\"estimation error: \"+xnames[i]\n ax[row,col].set_ylabel(LabelName,fontsize=FontSize)\n fig.suptitle(tit2,fontsize=FontSize)\n plt.show()\n print(\"========================================================\")\n print(bNam3)\n print(\"========================================================\")\n matplotlib.rcParams.update({\"font.size\": 15})\n matplotlib.rc(\"text\",usetex=True)\n plt.figure()\n plt.plot(this,np.sqrt(np.sum((xhis)**2,1)))\n plt.plot(this,np.ones(np.size(this))*ncmC.Jcv_opt)\n plt.xlabel(r\"time\",fontsize=FontSize)\n plt.ylabel(lyb,fontsize=FontSize)\n plt.legend([l1b,l2],loc=\"best\")\n plt.title(tit3,fontsize=FontSize)\n plt.show()\n print(\"========================================================\")\n print(bNam4)\n print(\"========================================================\")\n Nrow = int(self.n/Ncol)+np.remainder(self.n,Ncol)\n fig,ax = plt.subplots(Nrow,Ncol,figsize=FigSize)\n plt.subplots_adjust(wspace=0.25,hspace=0.25)\n if Ncol == 1:\n ax = np.reshape(ax,(self.n,1))\n elif Nrow == 1:\n ax = np.reshape(ax,(1,self.n))\n if xnames == \"num\":\n xnames = []\n for i in range(self.n):\n xnames += [r\"state \"+str(i+1)]\n for row in range(Nrow):\n for col in range(Ncol):\n i = Ncol*row+col\n if i+1 <= self.n:\n ax[row,col].plot(this,xhis[:,i])\n ax[row,col].set_xlabel(r\"time\",fontsize=FontSize)\n LabelName = r\"tracking error: \"+xnames[i]\n ax[row,col].set_ylabel(LabelName,fontsize=FontSize)\n fig.suptitle(tit4,fontsize=FontSize)\n plt.show()\n print(\"========================================================\")\n print(\"==================== SIMULATIOM END ====================\")\n print(\"========================================================\")\n return this,xhis,zhis", "def edgeHam( H, Jf, repNum, repString, adjacencyList ):\n \n flipVal = -.5 * Jf\n opVal = .25 * Jf\n sameVal = -.25 * Jf\n \n # Cycle throught the adjacencies\n for (i,j) in adjacencyList:\n \n si,sj = repString[i], repString[j]\n \n if si != sj:\n # Raising / Lowering terms will flip the two and spit out -1/2 Jf\n # We have to do some wonky stuff because you can't do element \n # assignment with strings\n flipString = [c for c in repString]\n flipString[i] = sj\n flipString[j] = si\n flipString = ''.join( flipString )\n flipNum = int( flipString, base = 2 )\n \n H[(repNum, flipNum)] += flipVal\n \n # Opposite spins will get a 1/4 from the s_z terms\n H[(repNum, repNum)] += opVal\n \n else:\n # Same spins will get a -1/4 from the s_z terms\n H[(repNum, repNum)] += sameVal", "def check_correctness_channelwise(f):\n\n rng = np.random.RandomState([2012, 7, 19])\n batch_size = 5\n pool_size = 4\n n = 3 * pool_size\n zv = rng.randn(batch_size, n).astype(config.floatX) * 1. - 1.5\n top_down_v = rng.randn(batch_size, n / pool_size).astype(config.floatX)\n\n p_np, h_np = max_pool_channels_python(zv, pool_size, top_down_v)\n\n z_th = T.matrix()\n z_th.name = 'z_th'\n\n top_down_th = T.matrix()\n top_down_th.name = 'top_down_th'\n\n p_th, h_th = f(z_th, pool_size, top_down_th)\n\n func = function([z_th, top_down_th], [p_th, h_th])\n\n pv, hv = func(zv, top_down_v)\n\n assert p_np.shape == pv.shape\n assert h_np.shape == hv.shape\n if not np.allclose(h_np, hv):\n print((h_np.min(), h_np.max()))\n print((hv.min(), hv.max()))\n assert False\n if not np.allclose(p_np, pv):\n diff = abs(p_np - pv)\n print('max diff ', diff.max())\n print('min diff ', diff.min())\n print('ave diff ', diff.mean())\n assert False", "def mat24_complete_heptad(p_io):\n ## uint_fast32_t err, s1, s5, s015, s3, s4, s8, s01234; \n ## uint_fast32_t s567, s67, s9AB, s9CD, s9, s6GH, s6;\n ## uint_fast32_t sACE, sD, sFGI, sG, sFJK, sJLM, sALN;\n\n err = ((p_io[0] + 8) | (p_io[1] + 8) | (p_io[2] + 8) | \n (p_io[3] + 8) | (p_io[4] + 8) | (p_io[5] + 8) | (p_io[8] + 8));\n err &= (0 - 0x20);\n s1 = 1 << p_io[1]; \n s5 = 1 << p_io[5];\n s015 = (1 << p_io[0]) ^ s1 ^ s5;\n s3 = 1 << p_io[3]; \n s4 = 1 << p_io[4]; \n s8 = 1 << p_io[8];\n s01234 = s015 ^ s5 ^ (1 << p_io[2]) ^ s3 ^ s4;\n # if err == 0 then 0 <= s01234 < 0x1000000 has odd parity\n # octad = [0, 1, 2, 3, 4, 5, 6, 7]\n s567 = odd_syn(s01234);\n err |= (s01234) & s567;\n # if err == 0 then entries [0,1,2,3,4,6] are in an octad\n err |= (s01234 | s567) & s8;\n # if err == 0 then entry 8 is not in that octad\n err |= s5 ^ (s5 & s567);\n s67 = s567 & ~s5;\n # octad = [0, 1, 2, 3, 8, 9, 10, 11]\n s9AB = odd_syn(s01234 ^ s4 ^ s8);\n # octad = [0, 1, 4, 5, 8, 9, 12, 13]\n s9CD = odd_syn(s015 ^ s4 ^ s8);\n s9 = s9AB & s9CD;\n p_io[9] = lsb24(s9);\n # octad [1, 3, 5, 6, 8, 9, 16, 17]\n s6GH = odd_syn(s1 ^ s3 ^ s5 ^ s8 ^ s9);\n s6 = s67 & s6GH;\n p_io[6] = lsb24(s6);\n p_io[7] = lsb24(s67 & ~s6GH);\n # still needed: \n # err, s1, s015, s3, s8, s01234, s9AB, s9CD, s9, s6GH, s6\n # octad [0, 2, 4, 6, 8, 10, 12, 14]\n sACE = odd_syn(s01234 ^ s1 ^ s3 ^ s6 ^ s8);\n p_io[10] = lsb24(s9AB & sACE);\n p_io[11] = lsb24(s9AB & ~sACE & ~s9);\n p_io[12] = lsb24(s9CD & sACE);\n sD = s9CD & ~sACE & ~s9;\n p_io[13] = lsb24(sD);\n p_io[14] = lsb24(sACE & ~s9AB & ~s9CD);\n # still needed: \n # err, s1, s015, s3, s8, s6GH, s6, sACE, sD\n # octad [0, 1, 5, 6, 13, 15, 16, 18]\n sFGI = odd_syn(s015 ^ s6 ^ sD);\n sG = s6GH & sFGI;\n p_io[16] = lsb24(sG);\n p_io[17] = lsb24(s6GH & ~s6 & ~sFGI);\n # octad [0, 1, 3, 5, 8, 15, 19, 20]\n sFJK = odd_syn(s015 ^ s3 ^ s8);\n p_io[15] = lsb24(sFGI & sFJK);\n p_io[18] = lsb24(sFGI & ~sG & ~sFJK);\n # octad [0, 3, 5, 6, 16, 19, 21, 22]\n sJLM = odd_syn(s015 ^ s1 ^ s3 ^ s6 ^ sG);\n p_io[19] = lsb24(sFJK & sJLM);\n p_io[20] = lsb24(sFJK & ~sFGI & ~sJLM);\n # octad [0, 1, 5, 6, 8, 10, 21, 23]\n sALN = odd_syn(s015 ^ s6 ^ s8); \n p_io[21] = lsb24(sALN & sJLM);\n p_io[22] = lsb24(sJLM & ~sALN & ~sFJK);\n p_io[23] = lsb24(sALN & ~sACE & ~sJLM);\n return err;", "def homothick():\n return se2hmt(binary([[1,1,1],\n [0,0,0],\n [0,0,0]]),\n binary([[0,0,0],\n [0,1,0],\n [1,1,1]]))", "def fedorenko(tau, h, grid):\n relation = tau / h\n new_grid = np.zeros(len(grid) - 1)\n for m in range(1, len(grid) - 1):\n new_grid[m-1] = grid[m] - np.dot(relation, grid[m] - grid[m-1]) - np.dot(relation / 2 * (relation - relation ** 2), grid[m-1] - 2 * grid[m] + grid[m+1])\n new_grid = np.insert(new_grid, 0, grid[0])\n return new_grid", "def optimize_fore(self):\n self.u = np.random.uniform(-1, 1, (32, 288, 1, 1))\n \n self.l2 = torch.from_numpy(self.u).float()\n print('self u shape',self.l2.shape)\n self.n = torch.randn(32, 1, 28, 28)\n self.l1 = self.enc(self.input + self.n)\n self.del1=self.dec(self.l1)\n self.del2=self.dec(self.l2)\n self.update_netc()\n self.update_netd()\n\n self.update_l2()\n self.update_netg()\n print('sssssssssssssssssssssss')", "def pingjiazhibiao(result):\n import math\n list_ed_normal = []\n list_es_normal = []\n list_ed_true = []\n list_es_true = []\n # these definations are for statistic\n ed_pred_all, es_pred_all,ed_true_all,es_true_all,ed_match,es_match,ed_normal,es_normal,ed_nomiss,es_nomiss= 0,0,0,0,0,0,0,0,0,0\n total_error_ed,total_error_es = 0,0\n sample_missimg_num = 0\n a4cdDict = {}\n a4csDict = {}\n for i in range(-5,7):\n a4cdDict[i] = 0\n a4csDict[i] = 0\n for i in result:\n pred = i[0]\n ed_pred = pred[0]\n es_pred = pred[1]\n if ed_pred == [] or es_pred == []:\n sample_missimg_num += 1\n true = i[1]\n ed_true = true[0]\n es_true = true[1]\n\n # avoid many to one\n ed_pred.sort()\n es_pred.sort()\n deleteAmong10frames(ed_pred)\n deleteAmong10frames(es_pred)\n \n for j in ed_pred:\n ed_pred_all += 1\n for t in ed_true:\n if math.fabs(j - t) < 6:\n ed_normal += 1\n total_error_ed += math.fabs(t - j)\n a4cdDict[j-t]+=1\n break\n # all - normal = FP\n # normal is TP\n a4cdDict[6] = ed_pred_all-ed_normal\n\n for j in es_pred:\n es_pred_all += 1\n for t in es_true:\n if math.fabs(j - t) < 6:\n es_normal += 1\n total_error_es += math.fabs(t - j)\n a4csDict[j-t]+=1\n break\n a4csDict[6] = es_pred_all-es_normal\n for j in ed_true:\n ed_true_all += 1\n for t in ed_pred:\n if math.fabs(t - j) < 6:\n ed_nomiss += 1\n break\n\n for j in es_true:\n es_true_all += 1\n for t in es_pred:\n if math.fabs(t - j) < 6:\n es_nomiss += 1\n break\n # aFD precision recall \n ed_result = total_error_ed / ed_normal,(ed_normal / ed_pred_all),(ed_nomiss / ed_true_all)\n es_result = total_error_es / es_normal,(es_normal / es_pred_all),(es_nomiss / es_true_all)\n return ed_result,a4cdDict, es_result,a4csDict, sample_missimg_num / len(result)", "def wol(noi, noft, times,\r\n M, NE, NI,\r\n Omee, Omei, Omie, Omii, F):\r\n FF=np.zeros((NE,1))\r\n for h in range(noi):\r\n ip=np.random.rand(M,1)\r\n ip/=np.linalg.norm(ip)\r\n cn=np.zeros((NE,1))\r\n for i in range(noft):\r\n c = np.zeros((M,1))\r\n VE, VI=np.zeros((NE, times)), np.zeros((NI, times))\r\n oe, oi=np.zeros((NE, times)), np.zeros((NI, times))\r\n re, ri=np.zeros((NE, times)), np.zeros((NI, times))\r\n for j in range(times):\r\n epev, epiv = sigv * np.random.randn(1,1), sigv * np.random.randn(1,1)\r\n epet, epit = sigt * np.random.randn(1,1), sigt * np.random.randn(1,1)\r\n c[:,0] = + 1 * ip[:,0]\r\n VE[:,j]=(1 - lam * dt) * VE[:,j-1] + dt* F[:,:] @ c[:,0] + Omee[:,:] @ oe[:,j-1] + Omie[:,:] @ oi[:,j-1] + epev[0,0]\r\n if VE[ne,j]>TE and RE[ne,0] < 0:\r\n oe[ne,j] = 1\r\n re[:,j]=(1 - lam * dt) * re[:,j-1]+oe[:,j-1]\r\n VI[:,j]=(1 - lam * dt) * VI[:,j-1] + Omei[:,:] @ oe[:,j-1] + Omii[:,:] @ oi[:,j-1] + epiv[0,0]\r\n ni=np.argmax(VI[:,j] - TI - epit[0,0])\r\n if VI[ni,j]>TI and RI[ni,0] < 0:\r\n oi[ni,j] = 1\r\n ri[:,j]=(1 - lam * dt) * ri[:,j-1]+oi[:,j-1]\r\n np.hstack((cn,np.sum(oe, axis=1, keepdims=True)))\r\n np.hstack((FF, np.var(cn[:,1:], axis=1)/np.mean(cn[:,1:], axis=1, keepdims=True)))\r\n return np.nanmean(FF[:,1:])", "def doublehgm_same_evaluation(input_generator,branches,iden_method,Plot,reference=None):\n input_signal = input_generator.GetOutput()\n filter_spec_tofind = nlsp.log_bpfilter(branches=branches,input=input_signal)\n ref_nlsystem = nlsp.HammersteinGroup_Series(input_signal=input_signal,\n nonlinear_functions=(nlsp.nl_branches(nlsp.function_factory.power_series,branches),nlsp.nl_branches(nlsp.function_factory.power_series,branches)),\n filter_irs=(filter_spec_tofind,filter_spec_tofind),\n max_harmonics=(range(1,branches+1),range(1,branches+1)),\n hgm_type=(nlsp.HammersteinGroupModel_up,nlsp.HammersteinGroupModel_up))\n\n found_filter_spec, nl_functions = iden_method(input_generator,ref_nlsystem.GetOutput(2),branches)\n iden_nlsystem = nlsp.HammersteinGroupModel_up(input_signal=input_signal,\n nonlinear_functions=nl_functions,\n filter_irs=found_filter_spec,\n max_harmonics=range(1,branches+1))\n if reference is not None:\n reference = nlsp.change_length_signal(reference,length=len(input_signal))\n ref_nlsystem = nlsp.HammersteinGroup_Series(input_signal=reference,\n nonlinear_functions=(nlsp.nl_branches(nlsp.function_factory.power_series,branches),nlsp.nl_branches(nlsp.function_factory.power_series,branches)),\n filter_irs=(filter_spec_tofind,filter_spec_tofind),\n max_harmonics=(range(1,branches+1),range(1,branches+1)),\n hgm_type=(nlsp.HammersteinGroupModel_up,nlsp.HammersteinGroupModel_up))\n iden_nlsystem.SetInput(reference)\n if Plot is True:\n plot.relabelandplotphase(sumpf.modules.FourierTransform(ref_nlsystem.GetOutput(2)).GetSpectrum(),\"Reference System\",show=False)\n plot.relabelandplotphase(sumpf.modules.FourierTransform(iden_nlsystem.GetOutput()).GetSpectrum(),\"Identified System\",show=True)\n print \"SNR between Reference and Identified output for double hgm all same: %r\" %nlsp.snr(ref_nlsystem.GetOutput(2),\n iden_nlsystem.GetOutput())", "def extendedGcd(a, b):\n x0 = 1\n x1 = 0\n y0 = 0\n y1 = 1\n\n while b != 0:\n p = a // b\n z = a % b\n a = b\n b = z\n\n w = x1\n x1 = x0 - p * x1\n x0 = w\n \n v = y1\n y1 = y0 - p * y1\n y0 = v\n print(\"returns: gcd, si, ti\")\n return (gcd(a, b), x0, y0)", "def _h(W):\r\n # E = slin.expm(W * W)\r\n # h = np.trace(E) - d\r\n M = np.eye(d) + W * W / d\r\n E = np.linalg.matrix_power(M, d - 1)\r\n h = (E.T * M).sum() - d\r\n G_h = E.T * W * 2\r\n return h, G_h", "def _dipole_gof(uu, sing, vv, B, B2):\n ncomp = 3 if sing[2] / sing[0] > 0.2 else 2\n one = np.dot(vv[:ncomp], B)\n Bm2 = np.sum(one ** 2)\n return Bm2 / B2, one", "def test_init_hebbian_3(self):\n v_one = [1, -1, -1, -1, 1, -1, -1, -1, 1]\n v_two = [-1, -1, -1, 1, 1, 1, -1, -1, -1]\n v_three = [-1, -1, 1, -1, -1, 1, -1, -1, 1]\n network = HopfieldNetwork([v_one, v_two, v_three])\n expected = np.array([\n [0, 1, -1, -1, 1, -3, 1, 1, 1],\n [1, 0, 1, 1, -1, -1, 3, 3, -1],\n [-1, 1, 0, -1, -3, 1, 1, 1, 1],\n [-1, 1, -1, 0, 1, 1, 1, 1, -3],\n [1, -1, -3, 1, 0, -1, -1, -1, -1],\n [-3, -1, 1, 1, -1, 0, -1, -1, -1],\n [1, 3, 1, 1, -1, -1, 0, 3, -1],\n [1, 3, 1, 1, -1, -1, 3, 0, -1],\n [1, -1, 1, -3, -1, -1, -1, -1, 0]\n ])\n npt.assert_equal(network.weight_matrix, expected)", "def ohms(self):\n # Rwb = Rwiper + Rtotal * (counts / 256)\n # Rwa = Rwiper + Rtotal * ((256 - counts) / 256)\n g = 0\n rtotal=0.0\n reach=[]\n for chan in self.get_channel_list(self.nchans):\n self.rwa[chan] = float( 256 - self.vals[chan] ) / 256.0\n self.rwb[chan] = float( self.vals[chan] ) / 256.0\n self.rwa[chan] *= self.Rtotal\n self.rwb[chan] *= self.Rtotal \n self.rwa[chan] += self.Rwiper\n self.rwb[chan] += self.Rwiper", "def inverse_gc(g):\n i = g\n j = 1\n while j<N:\n i = i ^ (g >> j)\n j = j + 1\n return i", "def toffoli(ha, hb, hc):\n\n return controlled_U(ha, cnot(hb, hc))", "def grover(self) -> None:\n\n size = self.circ_size\n random.seed(self.seed)\n\n # Split the single qreg into the computational register and a \"work\" register to store the oracle results\n qr = self.qr[:-1]\n wr = self.qr[-1:]\n\n if self.truth_value is None:\n raise ValueError(f'This circuit type requires a truth value be set before it can be generated. i.e. with'\n f'\"my_Premade.truth_value = (some_int)\"')\n\n tv = self.truth_value\n\n self.h(qr)\n\n loop = math.floor(math.sqrt(2 ** size))\n logger.debug(f'loop: {loop}')\n for i in range(loop):\n # NOT the wires where the magic state bit should be 0\n for j in range(len(qr)):\n if not tv & (1 << j): # i.e. tharr be zeroes in this here bitstring position\n self.x(qr[j])\n\n self.x(wr[0])\n self.mcrz(math.pi, qr, wr[0])\n self.x(wr[0])\n\n for j in range(len(qr)):\n if not tv & (1 << j): # i.e. tharr be zeroes in this here bitstring position\n self.x(qr[j])\n\n # Inversion about the average\n self.h(qr)\n self.x(qr)\n\n self.mcrz(math.pi, qr[:-1], qr[-1])\n\n self.x(qr)\n self.h(qr)\n\n if self.meas: self.measure(self.qr, self.cr)", "def test_superposition_cnot():\n\n program = dedent(\n \"\"\"\\\n register q0[0]\n register q1[1]\n H q0\n CNOT q0 q1\n \"\"\"\n )\n\n result = run(program, run_gate_array, return_distribution=True)\n assert isclose(result, [0.5, 0.0, 0.0, 0.5]).all()", "def cell_cnc_tracker(Out, U, V, W, t, cell0, cellG, cellD, SHP, cryoconite_locations):\n\n Cells = np.random.rand(len(t),SHP[0],SHP[1],SHP[2]) * cell0\n CellD = np.zeros(shape=(SHP[0],SHP[1],SHP[2])) + cellD\n CellG = np.zeros(shape=(SHP[0],SHP[1],SHP[2])) + cellG\n \n \n for i in np.arange(0,SHP[0],1):\n CellG[i,:,:] = np.where(cryoconite_locations==True, CellG[i,:,:]*1000, CellG[i,:,:])\n \n for t in np.arange(0,len(Out.Qz[:,0,0,0]),1):\n \n for layer in np.arange(0,len(Out.Qz[0,:,0,0]),1):\n\n # normalise lateral flow so that -ve= flow out of cells towards edges\n # and positive flow is towards centre line. In Qx -ve = leftwards flow\n # and +ve = rightwards flow. This leads to a rightwards drift in cell\n # fluxes if not normalised in this way.\n U[t,layer,:,0:int(U.shape[2]/2)] = 0-U[t,layer,:,0:int(U.shape[2]/2)]\n\n # nabla is the inverted delta operator used to denote the divergence \n # of a vector field, here applied to hydrological flow in m3/d \n # calculated as dx/dt + dy/dt + dz/dt\n\n nabla = (U[t,layer,:,:] + V[t,layer,:,:] + W[t,layer,:,:])\n \n # divergence gives net in/outflow in m3/t\n # cells/m3 = cells/mL *1000\n\n delC = Out.Q[t,layer,:,:] * (1+CellG[layer,:,:] - CellD[layer,:,:])\n\n Cells[t,layer,:,:] = Cells[t,layer,:,:] + (delC * 1000) \n\n Cells[Cells<0] = 0\n \n CellColumnTot = Cells.sum(axis=1)\n \n\n return Cells, CellColumnTot", "def dnde_photon_spectrum_fns(\n self,\n) -> Dict[str, Callable[[Union[float, npt.NDArray[np.float64]], float], float]]:\n\n def dnde_zero(e, _: float):\n return np.zeros_like(e)\n\n def wrap(f):\n @functools.wraps(f)\n def fnew(*args, **kwargs):\n return f(self, *args, **kwargs)\n\n return fnew\n\n return {\n \"e e\": wrap(dnde_photon_e_e),\n \"mu mu\": wrap(dnde_photon_mu_mu),\n \"ve ve\": dnde_zero,\n \"vt vt\": dnde_zero,\n \"vm vm\": dnde_zero,\n \"pi pi\": wrap(dnde_photon_pi_pi),\n \"k0 k0\": wrap(dnde_photon_k0_k0),\n \"k k\": wrap(dnde_photon_k_k),\n \"pi0 gamma\": wrap(dnde_photon_pi0_gamma),\n \"eta gamma\": wrap(dnde_photon_eta_gamma),\n \"pi0 phi\": wrap(dnde_photon_pi0_phi),\n \"eta phi\": wrap(dnde_photon_eta_phi),\n \"eta omega\": wrap(dnde_photon_eta_omega),\n \"pi0 pi0 gamma\": wrap(dnde_photon_pi0_pi0_gamma),\n \"pi pi pi0\": wrap(dnde_photon_pi_pi_pi0),\n \"pi pi eta\": wrap(dnde_photon_pi_pi_eta),\n \"pi pi etap\": wrap(dnde_photon_pi_pi_etap),\n \"pi pi omega\": wrap(dnde_photon_pi_pi_omega),\n \"pi0 pi0 omega\": wrap(dnde_photon_pi0_pi0_omega),\n \"pi0 k0 k0\": wrap(dnde_photon_pi0_k0_k0),\n \"pi0 k k\": wrap(dnde_photon_pi0_k_k),\n \"pi k k0\": wrap(dnde_photon_pi_k_k0),\n \"pi pi pi pi\": wrap(dnde_photon_pi_pi_pi_pi),\n \"pi pi pi0 pi0\": wrap(dnde_photon_pi_pi_pi0_pi0),\n \"v v\": wrap(dnde_photon_v_v),\n }", "def le_func(rn, g, h):\n le = np.copy(rn)\n le -= g\n le -= h\n return le", "def Heralded_HOM_exp_simulator(squeezing_parameter, bs_reflectivity, number_resolving_det=True, coher_ampl=0., old_PNR_func=False,\n cutoff=6, parallelized=False):\n\n #####################\n ### INITALISATION ###\n #####################\n\n nmodes = 4\n\n ## Initialises the initial covariance matrix of the Gaussian state to be the identity,\n ## meaning we start with vacuum in all modes.\n cov_mat = np.identity(2 * nmodes)\n\n ##############################\n ### DEFINE COHERENT STATES ###\n ##############################\n\n ## Defines the coherent state amplitudes in the input modes.\n ## Because no coherent state is present here, they are all zeros.\n ampls = np.ones(nmodes) * coher_ampl\n\n ###########################\n ### DEFINE SPDC SOURCES ###\n ###########################\n\n ## Defines the first SPDC source as a TMS between modes 0 and 1, as a symplectic transformation\n TMS1 = get_tms_sym(squeezing_parameter, phi=0, Mode1=0, Mode2=1, NumModes=nmodes)\n\n ## Defines the second SPDC source as a TMS between modes 2 and 3, as a symplectic transformation\n TMS2 = get_tms_sym(squeezing_parameter, phi=0, Mode1=2, Mode2=3, NumModes=nmodes)\n\n ############################\n ### DEFINE LINEAR OPTICS ###\n ############################\n\n ## Defines the linear-optical unitary (in this case a simple tunable beam-splitter),\n tunable_bs_matrix = np.identity(nmodes)\n tunable_bs_matrix[1, 1] = np.sqrt(1 - bs_reflectivity)\n tunable_bs_matrix[1, 2] = np.sqrt(bs_reflectivity)\n tunable_bs_matrix[2, 1] = - np.sqrt(bs_reflectivity)\n tunable_bs_matrix[2, 2] = np.sqrt(1 - bs_reflectivity)\n LO_unitary = tunable_bs_matrix\n\n ## Gets the linear-optical transformation in the symplectic form.\n LO_unitary_sym = get_unitary_sym(LO_unitary)\n\n ###############################################\n ### CALCULATE TOTAL GAUSSIAN TRANSFORMATION ###\n ###############################################\n\n ## Obtains the total Gaussian transformation matrix in the symplectic formalism\n sym_transf = LO_unitary_sym @ TMS2 @ TMS1\n\n #############################\n ### EVOLVE GAUSSIAN STATE ###\n #############################\n\n ## Obtains the covariance matrix of the output Gaussian state\n cov_mat = symplectic_evolution(cov_mat, sym_transf)\n\n ## Obtains the amplitudes of the output Gaussian state\n ampls = LO_unitary @ ampls\n\n ###############################\n ### SINGLE PHOTON DETECTION ###\n ###############################\n\n ## Map state into the StrawberryField package, which has fast functions for\n ## photon counting (i.e. non-Gaussian measurements) from Gaussian states.\n gauss_state = map_into_StrawberryFields(ampls, cov_mat, nmodes)\n\n ## Define in which modes we want t observe a coincidence detection.\n ## In this case we are looking for a 4-fold coincidence detection with a photon in all four modes.\n ## Repeated elements would represent multiple photons in the same mode. I.e. [0, 0, 2]\n ## would indicate two photons in mode 0 and one in mode 2.\n output_configuration = [0, 1, 2, 3]\n\n ## convert the detection configuration into a Fock state\n output_Fock = conf_to_Fock(output_configuration, nmodes)\n\n if number_resolving_det:\n ## Calculates the detection probability considering number-resolving detectors.\n det_prob = gauss_state.fock_prob(output_Fock)\n else:\n if old_PNR_func:\n ## Calculates the detection probability considering threshold detectors.\n det_prob = threshold_detection_prob_old(gauss_state, output_Fock, cutoff=cutoff)\n else:\n ## Calculates the detection probability considering threshold detectors.\n if parallelized:\n det_prob = threshold_detection_prob_parallel(gauss_state.cov(), gauss_state.means(), output_Fock)\n else:\n det_prob = threshold_detection_prob(gauss_state.cov(), gauss_state.means(), output_Fock)\n\n return det_prob", "def next_heegner_disc(self, n=0, sgn=-1, fd=False):\n for D in range(n + 1, 2 * n + 100):\n DD = D * sgn\n if(self.is_Heegner_disc(DD)):\n return DD\n raise ArithmeticError(\" COuld not find any Heegner discriminat > {0} !\".format(n))", "def test_h_gate_nondeterministic_minimal_basis_gates(self):\n shots = 2000\n circuits = ref_1q_clifford.h_gate_circuits_nondeterministic(final_measure=True)\n targets = ref_1q_clifford.h_gate_counts_nondeterministic(shots)\n job = execute(circuits, QasmSimulator(), shots=shots, basis_gates='U,CX')\n result = job.result()\n self.is_completed(result)\n self.compare_counts(result, circuits, targets, delta=0.05 * shots)", "def test_tanh_con():\n c=14\n assert {'diff':EF.tanh(c).der, 'value': EF.tanh(c).val}=={'diff':0, 'value': ((math.exp(c)-math.exp(-c))/2)/((math.exp(c)+math.exp(-c))/2)}", "def rnn_tanh_cell_grad(input, hidden, w_ih, w_hh, b_ih, b_hh, grad, target=\"cce\"):\n batch, input_size = get_shape(input)\n _, hidden_size = get_shape(hidden)\n igates = akg.topi.nn.dense(input, w_ih, b_ih)\n hgates = akg.topi.nn.dense(hidden, w_hh, b_hh)\n h = Tanh(igates + hgates)\n\n dh = (1 - h * h) * grad\n kk = akg.tvm.reduce_axis((0, batch))\n dWih = akg.tvm.compute((hidden_size, input_size), lambda i, j:\n akg.tvm.sum(input[kk, j] * dh(kk, i), axis=kk), name=\"dWih\")\n kk2 = akg.tvm.reduce_axis((0, batch))\n dWhh = akg.tvm.compute((hidden_size, hidden_size), lambda i, j:\n akg.tvm.sum(hidden[kk2, j] * dh(kk2, i), axis=kk2), name=\"dWhh\")\n kk3 = akg.tvm.reduce_axis((0, hidden_size))\n di = akg.tvm.compute((batch, input_size), lambda i, j: akg.tvm.sum(w_ih[kk3, j] * dh[i, kk3], axis=kk3), name=\"di\")\n kk4 = akg.tvm.reduce_axis((0, hidden_size))\n dhid = akg.tvm.compute((batch, hidden_size), lambda i, j: akg.tvm.sum(w_hh[kk4, j] * dh[i, kk4], axis=kk4), name=\"dhid\")\n db = akg.topi.sum(dh, 0)\n return di, dhid, dWih, dWhh, db\n # dbih/dbhh are the same and returning it twice causes CCEbuild to fail due to some SSA error\n # return di, dhid, dWih, dWhh, db, db", "def HaarGOB(treeG):\n # number of level of the chain (or tree)\n Ntr = len(treeG)\n # reorder chain (optional) \n # reordering each level so that in each level the nodes are in the\n # descent order of degrees\n # compute u_l^c for level J_0 (top level)\n clusterJ0 = treeG[Ntr-1]['clusters']\n N0 = len(clusterJ0)\n # generate indicator function on G^c\n chic = np.identity(N0)\n uc = [None]*N0\n uc[0] = 1/np.sqrt(N0,dtype=np.float64)*np.ones(N0)\n for l in np.arange(1, N0):\n uc[l] = np.sqrt((N0-l)/(N0-l+1))*(chic[l-1,:]-1/(N0-l)*np.sum(chic[l:,:], axis=0))\n# u = copy.deepcopy(uc)\n treeG[Ntr-1]['u'] = uc\n # compute the next level orthonormal basis ulk and stored into u\n for j_tr in np.arange(0, Ntr-1)[::-1]:\n N1 = len(treeG[j_tr]['clusters'])\n u = [None]*N1\n i = N0\n for l in range(N0):\n clusterl = treeG[j_tr+1]['clusters'][l]\n kl = len(clusterl)\n #for k==1\n ucl = uc[l]\n ul1 = np.zeros(N1)\n for j in range(N0):\n idxj = treeG[j_tr+1]['clusters'][j]\n ul1[idxj] = ucl[j]/np.sqrt(len(idxj))\n u[l] = ul1\n if kl>1:\n chil = np.zeros((kl,N1))\n for k in range(kl):\n idxl = treeG[j_tr+1]['clusters'][l]\n chil[k,idxl[k]] = 1;\n \n for k in np.arange(1,kl):\n i = i+1\n ulk = np.sqrt((kl-k)/(kl-k+1))*(chil[k-1,:]-1/(kl-k)*np.sum(chil[k:,:],axis=0))\n u[i-1] = ulk\n treeG[j_tr]['u'] = u\n # update uc and N0\n# uc = copy.deepcopy(u)\n uc = u\n N0 = N1\n return treeG", "def ftlan_E1c(hop, v0, T, m=50, Min_b=10e-10, Min_m=5, kB=1, norm = np.linalg.norm):\n# def Tri_diag(a1, b1):\n# mat = np.diag(b1, -1) + np.diag(a1, 0) + np.diag(b1, 1)\n# e, w = np.linalg.eigh(mat)\n# return e, w\n\n beta = 1./(T * kB)\n E = 0.\n a, b = [], []\n v0 = v0/norm(v0)\n Hv = hop(v0)\n a.append(v0.dot(Hv))\n v1 = Hv - a[0] * v0\n b.append(norm(v1))\n if b[0] < Min_b:\n return 0\n\n v1 = v1/b[0]\n Hv = hop(v1)\n a.append(v1.dot(Hv))\n\n for i in range(1, m - 1):\n v2 = Hv - b[i - 1] * v0 - a[i] * v1\n b.append(norm(v2))\n if abs(b[i]) < Min_b:\n b.pop()\n break\n\n v2 = v2/b[i]\n Hv = hop(v2)\n a.append(v2.dot(Hv))\n v0 = v1.copy()\n v1 = v2.copy()\n \n a = np.asarray(a)\n b = np.asarray(b)\n\n eps, phi = Tri_diag(a, b)\n l = len(eps)\n# Eo = eps[0]\n# eps = eps-Eo\n exp_eps = np.exp(-beta * eps)\n E = np.sum(exp_eps * eps * phi[0, :]**2.)\n Z = np.sum(exp_eps * phi[0, :]**2.)\n# for i in range(len(eps)):\n# E += exp_eps[i] * eps[i] * phi[0, i]**2\n\n# E = E + Eo\n# de = eps[:, np.newaxis] - eps\n# for i in range(l):\n# E += eps[i] * phi[0, i]**2./np.sum(np.exp(-beta*de[:l, i])*(phi[0, :l]**2.))\n return E, Z", "def real_extract(og, fg):\n\n oe = sg2plato(og)\n ae = sg2plato(fg)\n q = 0.22 + 0.001 * oe\n return (q * oe + ae) / (1 + q)", "def test6(self):\n smis = [ 'CCC(O)C(=O)O','c1ccccc1','C1CCCCC1','C1NCCCC1','CNCNCNC']\n for smi in smis:\n m = Chem.MolFromSmiles(smi)\n fp1 = Chem.RDKFingerprint(m,2,7,4096)\n fp2 = DataStructs.FoldFingerprint(fp1,2)\n fp3 = Chem.RDKFingerprint(m,2,7,2048)\n assert tuple(fp2.GetOnBits())==tuple(fp3.GetOnBits())\n fp2 = DataStructs.FoldFingerprint(fp2,2)\n fp3 = Chem.RDKFingerprint(m,2,7,1024)\n assert tuple(fp2.GetOnBits())==tuple(fp3.GetOnBits())\n fp2 = DataStructs.FoldFingerprint(fp1,4)\n assert tuple(fp2.GetOnBits())==tuple(fp3.GetOnBits())", "def hgmwithfilter_evaluation(input_generator,branches,nlfuntion,iden_method,Plot,reference=None):\n input_signal = input_generator.GetOutput()\n # filter_spec_tofind = nlsp.create_bpfilter([2000,8000,30000],input_signal)\n filter_spec_tofind = nlsp.log_bpfilter(branches=branches,input=input_signal)\n # filter_spec_tofind = [i for i in reversed(filter_spec_tofind)]\n length_kernel = len(filter_spec_tofind[0])\n # filter_spec_tofind = nlsp.log_chebyfilter(branches=branches,input=input_signal)\n ref_nlsystem = nlsp.HammersteinGroupModel_up(input_signal=input_signal,\n nonlinear_functions=nlsp.nl_branches(nlfuntion,branches),\n filter_irs=filter_spec_tofind,\n max_harmonics=range(1,branches+1))\n found_filter_spec, nl_functions = iden_method(input_generator,ref_nlsystem.GetOutput(),branches)\n found_filter_spec = nlsp.change_length_filterkernels(found_filter_spec,length=length_kernel)\n iden_nlsystem = nlsp.HammersteinGroupModel_up(input_signal=input_signal,\n nonlinear_functions=nl_functions,\n filter_irs=found_filter_spec,\n max_harmonics=range(1,branches+1))\n # nlsp.filterkernel_evaluation_plot(filter_spec_tofind,found_filter_spec)\n # nlsp.filterkernel_evaluation_sum(filter_spec_tofind,found_filter_spec)\n if reference is not None:\n reference = nlsp.change_length_signal(reference,length=len(input_signal))\n ref_nlsystem.SetInput(reference)\n iden_nlsystem.SetInput(reference)\n if Plot is True:\n plot.relabelandplot(ref_nlsystem.GetOutput(),\"Reference Output\",show=False)\n plot.relabelandplot(iden_nlsystem.GetOutput(),\"Identified Output\",show=True)\n # nlsp.plot_array([sumpf.modules.FourierTransform(s).GetSpectrum() for s in filter_spec_tofind],label_array=[\"reference%d\" %i for i in range(len(filter_spec_tofind))],Show=False)\n # nlsp.plot_array([sumpf.modules.FourierTransform(s).GetSpectrum() for s in found_filter_spec],label_array=[\"identified%d\" %i for i in range(len(found_filter_spec))],Show=True)\n print \"SNR between Reference and Identified output without overlapping filters: %r\" %nlsp.snr(ref_nlsystem.GetOutput(),\n iden_nlsystem.GetOutput())\n sumpf.modules.SignalFile(filename=\"C:/Users/diplomand.8/Desktop/linearHGM_explannation/cheby/noise/input\", signal=reference,format=sumpf.modules.SignalFile.WAV_FLOAT)\n sumpf.modules.SignalFile(filename=\"C:/Users/diplomand.8/Desktop/linearHGM_explannation/cheby/noise/%s\" %iden_method.__name__,signal=iden_nlsystem.GetOutput(),format=sumpf.modules.SignalFile.WAV_FLOAT)\n sumpf.modules.SignalFile(filename=\"C:/Users/diplomand.8/Desktop/linearHGM_explannation/cheby/noise/reference\",signal=ref_nlsystem.GetOutput(),format=sumpf.modules.SignalFile.WAV_FLOAT)", "def cantor() -> bigger.MCG[Edge]: # pylint: disable=too-many-statements\n\n POS, EQ, NEG = +1, 0, -1\n\n def edges() -> Iterable[Edge]:\n for x in naturals():\n for y in [POS, EQ, NEG]:\n yield x, y\n\n def negate(X: Edge) -> Edge:\n return X[0], -X[1]\n\n def invert(sign: int, X: tuple[Edge, bool, Edge, bool, Edge, bool, Edge, bool]) -> tuple[Edge, bool, Edge, bool, Edge, bool, Edge, bool]:\n return X if sign == POS else (negate(X[6]), not X[7], negate(X[4]), not X[5], negate(X[2]), not X[3], negate(X[0]), not X[1])\n\n def link(edge: Edge) -> tuple[Edge, bool, Edge, bool, Edge, bool, Edge, bool]:\n n, k = edge\n if k == EQ: # Equator\n if n == 0:\n return ((0, NEG), False, (1, NEG), True, (1, POS), False, (0, POS), True)\n elif n == 1:\n return ((2, POS), False, (0, POS), False, (0, NEG), True, (2, NEG), True)\n else: # n > 1\n return ((3 * n - 3, NEG), False, (3 * n - 1, NEG), True, (3 * n - 1, POS), False, (3 * n - 3, POS), True)\n\n # Northern / Southern hemisphere.\n if n == 0:\n return invert(k, ((0, EQ), False, (1, POS), False, (1, EQ), True, (2, POS), False))\n elif n == 1:\n return invert(k, ((4, POS), False, (3, POS), False, (0, POS), True, (0, EQ), False))\n elif n == 2:\n return invert(k, ((7, POS), False, (6, POS), False, (0, POS), False, (1, EQ), True))\n N, r = n // 3 + 1, n % 3\n incoming = 3 * (N // 2) - (1 if N % 2 else 2)\n if r == 0:\n return invert(k, ((N, EQ), False, (n + 2, POS), False, (incoming, POS), True, (n + 1, POS), False))\n elif r == 1:\n return invert(k, ((6 * N - 2, POS), False, (6 * N - 3, POS), False, (n - 1, POS), False, (incoming, POS), True))\n else: # r == 2:\n return invert(k, ((6 * N + 1, POS), False, (6 * N + 0, POS), False, (n - 2, POS), True, (N, EQ), False))\n\n T = bigger.Triangulation.from_pos(edges, link)\n\n def generator(name: str) -> bigger.Encoding[Edge]: # pylint: disable=too-many-branches\n twist_match = re.match(r\"(?P<curve>[ab])_(?P<n>-?\\d+)$\", name)\n rotate_match = re.match(r\"r$\", name)\n\n if twist_match is not None:\n parameters = twist_match.groupdict()\n curve_name = parameters[\"curve\"]\n N = int(parameters[\"n\"])\n if curve_name == \"a\":\n if N == 1:\n cut_sequence = [(0, EQ), (0, POS), (1, EQ)]\n else:\n cut_sequence = [(0, EQ), (N, EQ), (3 * N - 3, POS)]\n while N > 1:\n low_N = N // 2\n cut_sequence.append((3 * low_N - (1 if N % 2 else 2), POS))\n if N % 2:\n cut_sequence.append((3 * low_N - 3, POS))\n N = low_N\n elif curve_name == \"b\":\n if N <= 3:\n cut_sequence = [(0, EQ), (0, POS), (1, EQ)]\n else:\n extend_left = N % 2\n N = N // 2\n cut_sequence = [(N, EQ), (3 * N - 3, POS)]\n while N > 1:\n N_low = N // 2\n cut_sequence.append((3 * N_low - (1 if N % 2 else 2), POS))\n if extend_left:\n cut_sequence.append((3 * N_low - 3, POS))\n if N % 2 != extend_left:\n cut_sequence.append((N_low, EQ))\n break\n N = N_low\n else:\n cut_sequence.append((0, EQ))\n\n curve = T(dict(((x, y * s), 1) for x, y in cut_sequence for s in [+1, -1]))\n return curve.twist()\n elif rotate_match is not None:\n\n def isom(edge: Edge) -> Edge:\n n, k = edge\n if k == EQ:\n if n == 0:\n return (1, EQ)\n elif n == 1:\n return (0, EQ)\n return (n ^ (1 << n.bit_length() - 2), k)\n\n if n == 0:\n return (0, k)\n elif n == 1:\n return (2, k)\n elif n == 2:\n return (1, k)\n N, r = n // 3 + 1, n % 3\n return (3 * (N ^ (1 << N.bit_length() - 2)) - 3 + r, k)\n\n return T.encode([(-1, isom, isom)])\n\n raise ValueError(f\"Unknown mapping class {name}\")\n\n return bigger.MCG(T, generator)", "def a5_homelien(clk, i, o):\n\n\tmajority = Signal(bool(0))\n\tr1, r1_reg, r1_clk, r1_out, r1_bit = lfsr_hw(clk, i, majority, 19, [13, 16, 17, 18], 8, \"R1\")\n\tr2, r2_reg, r2_clk, r2_out, r2_bit = lfsr_hw(clk, i, majority, 22, [20, 21], 10, \"R2\")\n\tr3, r3_reg, r3_clk, r3_out, r3_bit = lfsr_hw(clk, i, majority, 23, [7, 20, 21, 22], 10, \"R3\")\n\t@always_comb\n\tdef xors():\n\t\t# this should be done automatically with the chain function from the lists above\n\t\t# but perhaps need to complain on myhdl list to get it working again\n\t\tr1_bit.next = i[0] ^ r1_reg[13] ^ r1_reg[16] ^ r1_reg[17] ^ r1_reg[18]\n\t\tr2_bit.next = i[0] ^ r2_reg[20] ^ r2_reg[21]\n\t\tr3_bit.next = i[0] ^ r3_reg[7] ^ r3_reg[20] ^ r3_reg[21] ^ r3_reg[22]\n\n\t@always_comb\n\tdef calc_stuff():\n\t\tmajority.next = (r1_clk + r2_clk + r3_clk) >= 2\n\t\to.next = r1_out ^ r2_out ^ r3_out\n\n\treturn (r1, r2, r3, calc_stuff, xors)", "def chkiapws09table6(printresult=True,chktol=_CHKTOL):\n from teospy.tests.tester import Tester\n args1 = [(273.15,101325.), (273.15,1e8), (313.15,101325.)]\n DERS2 = ((0,0),(1,0),(0,1),(2,0),(1,1),(0,2))\n \n funs = liq_g\n fargs = [(der+args) for args in args1 for der in DERS2]\n refs = [\n 0.101342743e3,0.147644587,0.100015695e-2,-0.154472324e2,\n -0.677459513e-7,-0.508915308e-12,\n 0.977303868e5,0.851506346e1,0.956683354e-3,-0.142970174e2,\n 0.199088060e-6,-0.371527164e-12,\n -0.116198898e5,-0.572365181e3,0.100784471e-2,-0.133463968e2,\n 0.388499694e-6,-0.445841077e-12\n ]\n fnames = 'liq_g'\n argfmt = '({0:1g},{1:1g},{2:6.2f},{3:6g})'\n header = 'F03 liq_g derivatives'\n testder = Tester(funs,fargs,refs,fnames,argfmt,header=header)\n \n funs = [cp,density,soundspeed,\n enthalpy,entropy,helmholtzenergy,internalenergy]\n fargs = args1\n refs = [\n [0.421941153e4,0.390523030e4,0.417942416e4],\n [0.999843071e3,0.104527793e4,0.992216354e3],\n [0.140240099e4,0.157543089e4,0.152891242e4],\n [0.610136242e2,0.954044973e5,0.167616267e6],\n [-0.147644587,-0.851506346e1,0.572365181e3],\n [0.183980891e-2,0.206205140e4,-0.117220097e5],\n [-0.403272791e2,-0.263838183e3,0.167514147e6]\n ]\n fnames = ['cp','density','soundspeed',\n 'enthalpy','entropy','helmholtzenergy','internalenergy']\n argfmt = '({0:6.2f},{1:6g})'\n header = 'F03 thermodynamic properties'\n testprop = Tester(funs,fargs,refs,fnames,argfmt,header=header)\n \n # Run Tester instances and print results\n testder.run()\n testprop.run()\n if printresult:\n testder.printresults(chktol=chktol)\n testprop.printresults(chktol=chktol)\n return (testder, testprop)", "def test():\n Z = func.evaluate_circuit(F, e_x, e_y, e_xor)\n if Z == d[0]:\n return 0\n elif Z == d[1]:\n return 1", "def test_mannext(self):\n self.chck_triple('mannext')", "def FigA7(case):\n \n #set the parameter, arrays\n \n n_array=np.array([1,2,3])\n\n #set the result arrays\n if case==0:\n class_number=5\n elif case==1:\n class_number=6\n fate=np.zeros([class_number])#number of evolutionary fate\n fate_matrix=np.zeros([np.size(n_array),np.size(fate)])\n \n time=np.linspace(0,100000, 1000000)\n loop=10**6\n \"\"\"\n 0 Co and/or Ch cannot survive in mono-culture\n 1 Co cannot invade\n 2 Only equilibrium of exclusion is stable\n 3 Only equilibrium of coexistence is stable\n 4 Two equilibria are UNstable\n 5 two Equilibrium are stable (which may occur only when sCO vs rCh)\n \"\"\"\n for tri in range(np.size(n_array)):\n counter=0\n n=n_array[tri]\n print(str(\"Hill coefficient is %d\" %(n)))\n fate=np.zeros([class_number])#number of evolutionary fate should be reset\n if case==0 or case==1:\n fname=str('parameter-sweep-MC-n%d-case%d' %(n, case))\n else:\n print(\"Error in case\")\n return 1\n \n for i in range(loop):\n if(i+1)%10000==0:\n print(i+1)\n Ks,cd,T0, alpha,=np.random.uniform(0,1,4)\n Kr,cr=np.random.uniform([Ks,0],[1,1],2)#Kr>Ks and cr.cd\n #check whether r is positive or not\n if case==0:\n r1=rmax*(1-cr-cd)#rCO\n r2=rmax#sCH\n W0Co=r1-dmax*T0**n/(T0**n+Kr**n)-alpha#initial growth of Cooperator\n W0Ch=r2-dmax*T0**n/(T0**n+Ks**n)-alpha#initial growth of Cheater\n elif case==1:\n r1=rmax*(1-cd)#sCo\n r2=rmax*(1-cr)#rCh\n W0Co=r1-dmax*T0**n/(T0**n+Ks**n)-alpha\n W0Ch=r2-dmax*T0**n/(T0**n+Kr**n)-alpha\n stab_e=0#initialize the falgs of stability\n stab_c=0\n if W0Co<0 or W0Ch<0:\n fate[0]+=1\n res=0\n else:\n #succeed in mono-culture \n init=np.array([T0,10**(-6)])\n if case==0: \n solCo=odeint(DyCoop, init, time, args=(T0, r1, Kr, alpha, n))\n Ts=solCo[-1,0]\n #x1s=solCo[-1,1]\n solCh=odeint(DyCheat, init, time, args=(T0, r2, Ks, alpha, n))\n x2s=solCh[-1,1]\n else:\n solCo=odeint(DyCoop, init, time, args=(T0, r1, Ks, alpha, n))\n Ts=solCo[-1,0]\n #x1s=solCo[-1,1]\n solCh=odeint(DyCheat, init, time, args=(T0, r2, Kr, alpha, n))\n x2s=solCh[-1,1]\n \n #Evolutionary dynamics \n if case==0:\n K=Kr\n else:\n K=Ks\n if r1*(1-x2s)-dmax*T0**n/(T0**n+K**n)<alpha:\n #Co cannot invade\n fate[1]+=1\n res=1\n else:\n #Co can invade\n #calculate Tdagger Td and check whether coexist or exclude\n if case==0:\n #rCo vs sCh\n #in this case, at most one equilbrium is stable\n tau=Quad(case,alpha,cr+cd,0,Kr, Ks, n)\n Td=tau**(1/n)\n if Td<Ts:\n #Co exclude Ch\n fate[2]+=1\n res=2\n else:\n x1d=alpha*Kd*(T0-Td)/(fmax*Td-alpha*(T0-Td))\n x2d=1-x1d-(dmax*Td**n/(Td**n+K**n)+alpha)/r1\n #check the stability condition\n stab=Stab_cond(alpha, T0, Td,x1d,x2d, r1,r2,n, K)\n if stab==0:\n #stable coexistence\n fate[3]+=1\n res=3\n else:\n #unstable coexistence nor exclusion\n fate[4]+=1\n res=4\n print(Td, x1d, x2d)\n else:\n #sCo vs rCh\n # in this case two equilibria can be stable at the same time\n [tau_p,tau_m]=Quad(case,alpha,cd,cr,Ks, Kr, n)\n if tau_m>Ts**n or tau_p<Ts**n:\n # cexclusion is stable\n stab_e=1\n # stability in coexistence \n if tau_p<0:\n stab_c=0\n else:\n Td=tau_p**(1/n)\n x1d=alpha*Kd*(T0-Td)/(fmax*Td-alpha*(T0-Td))\n x2d=1-x1d-(dmax*Td**n/(Td**n+K**n)+alpha)/r1\n #check the stability condition\n stab=Stab_cond(alpha, T0, Td,x1d,x2d, r1,r2,n, K)\n if stab==0:\n #stable coexistence\n stab_c=1\n #classify\n if stab_e==1 and stab_c==1:\n # two stable equilbria\n fate[5]+=1\n res=5\n elif stab_e==1 and stab_c==0:\n #only stable cexclusion\n fate[2]+=1\n res=2\n elif stab_e==0 and stab_c==1:\n #stable coexistence\n fate[3]+=1\n res=3\n else:\n #both unstable\n fate[4]+=1\n res=4\n \n #save the results\n if counter==0:\n result=np.array([[Ks, Kr, cr, cd, alpha, T0,res]])\n #save the result with parameter values\n \n else:\n #add array of results\n R=np.array([[Ks, Kr, cr, cd, alpha, T0,res]])\n result=np.concatenate((result, R), axis=0)\n counter+=1\n \n #save csv file and graph\n np.savetxt(fname+'.csv',result, delimiter=',', header='Ks, Kr, cr, cd, alpha, T0, class', fmt='%.6f') \n print(fate)\n fate_matrix[tri,:]=fate \n if case==0: \n np.savetxt('parameter_sweep_MC_total_case0.csv',fate_matrix, delimiter=',', header='cl0,l1,cl2,cl3,cl4', fmt='%d')\n else:\n np.savetxt('parameter_sweep_MC_total_case1.csv',fate_matrix, delimiter=',', header='cl0,l1,cl2,cl3,cl4,cl5', fmt='%d')\n Plot(case)", "def pollard(N) :\n\tleft = right = seed\n\td = 1\n\twhile d == 1: \n\t\tleft = pseudo(left,N)\n\t\tright = pseudo(pseudo(right,N),N)\n\t\td = gcd(abs(left-right),N)\n\t\tif PRINT_TRACE : \n\t\t\tprint(left,right,d)\n\tif d == N : \n\t\tprint(\"Failure\")\n\telse :\n\t\tprint(\"non trivial factor is %d\"%d)", "def CFHX(m_dot, p_nominal, epsilon, p_HP_in, T_HP_in, p_LP_in, T_LP_in):\r\n\r\n\r\n # Data from Aleks:\r\n # Length CFHX = 22 cm\r\n # D_in = 23.6 mm, D_out 40.5 mm\r\n # T range: 40 K - 5 K\r\n # m_dot = 0.5 g/s\r\n # p = 1 bar\r\n # Effectiveness: 97.4 %\r\n # dp_HP = 4.8 mbar (= dp23)\r\n # dp_LP = 5 mbar (= dp78)\r\n\r\n # Geometry of the CFHX\r\n A_HP = 0.25 * np.pi * 0.0236**2 #m²\r\n A_LP = 0.25 * np.pi * (0.0405**2 - 0.0236**2) #m²\r\n\r\n\r\n ## Calculation of the outgoing pressure with the scaled pressure drops\r\n\r\n # Scaling of the pressure drop with the Darcy--Weisbach equation\r\n # dp = f *L/D_i * 0.5 * Rho * u**2\r\n dp_HP_Aleks = 4.8e2 #Pa\r\n dp_LP_Aleks = 5.0e2 #Pa\r\n # Mean density with the arithmetic mean of the temperature range values\r\n Rho_Aleks = hp.HeCalc(3, 0, 1, 1e5, 2, 0.5*(40+5), 1) #kg/m³\r\n u_HP_Aleks = 0.5e-3/(A_HP*Rho_Aleks) #m/s\r\n u_LP_Aleks = 0.5e-3/(A_LP*Rho_Aleks) #m/s\r\n\r\n # Mean density of the two inlet temperatures and the nominal pressure to be able to compare the dp\r\n Rho = hp.HeCalc(3, 0, 1, p_nominal, 2, 0.5*(T_HP_in + T_LP_in), 1) #kg/m³\r\n u_HP = m_dot/(A_HP*Rho) #m/s\r\n u_LP = m_dot/(A_LP*Rho) #m/s\r\n\r\n # Actual scaling\r\n dp_HP = Rho/Rho_Aleks * u_HP**2/u_HP_Aleks**2 * dp_HP_Aleks #Pa\r\n dp_LP = Rho/Rho_Aleks * u_LP**2/u_LP_Aleks**2 * dp_LP_Aleks #Pa\r\n\r\n # Calculation of the outgoing pressure with the scaled pressure drops\r\n p_HP_out = p_HP_in - dp_HP #Pa\r\n p_LP_out = p_LP_in - dp_LP #Pa\r\n\r\n\r\n ## Calculation of the outgoing temperatures using the effectiveness\r\n # Asumming that the effectiveness is the same for both the HP and the LP side!\r\n\r\n # Check which stream restricts the heat exchange -> Pinch point\r\n # See \"Compact heat exchangers\" by Kays, London : Chapter 7\r\n dh_HP_max = hp.HeCalc(9, 0, 1, p_HP_in, 2, T_HP_in, 1) - hp.HeCalc(9, 0, 1, p_HP_out, 2, T_LP_in, 1)\r\n dh_LP_max = hp.HeCalc(9, 0, 1, p_LP_out, 2, T_HP_in, 1) - hp.HeCalc(9, 0, 1, p_LP_in, 2, T_LP_in, 1)\r\n\r\n # The maximum possible heat transfer corresponds to the restricting one\r\n dh_max = min(dh_HP_max, dh_LP_max)\r\n\r\n # Calculating the specific enthalpy with all known pressures and temperatures\r\n h_HP_in = hp.HeCalc(9, 0, 1, p_HP_in, 2, T_HP_in, 1) #J/kg\r\n h_LP_in = hp.HeCalc(9, 0, 1, p_LP_in, 2, T_LP_in, 1) #J/kg\r\n\r\n # Calculating the outgoing enthalpies\r\n h_HP_out = h_HP_in - epsilon * dh_max #J/kg\r\n h_LP_out = h_LP_in + epsilon * dh_max #J/kg\r\n\r\n # Calculation of the temperatures dependend on the specific enthalpy and the pressure\r\n T_HP_out = hp.HeCalc(2, 0, 1, p_HP_out, 9, h_HP_out, 1) #K\r\n T_LP_out = hp.HeCalc(2, 0, 1, p_LP_out, 9, h_LP_out, 1) #K\r\n\r\n # Cross check the dp scaling\r\n # print(\"u_HP_Aleks\", u_HP_Aleks)\r\n # print(\"u_HP\", u_HP)\r\n # print(\"Rho_Aleks\", Rho_Aleks)\r\n # print(\"Rho\", Rho)\r\n # print(\"dp_HP\", dp_HP)\r\n # print(\"dp_HP/dp_HP_Aleks \", dp_HP/dp_HP_Aleks)\r\n # print(\"dp_LP/dp_LP_Aleks \", dp_LP/dp_LP_Aleks)\r\n\r\n # Output of the results\r\n state_out = {\"h_HP\": h_HP_out, \"T_HP\": T_HP_out, \"p_HP\": p_HP_out,\r\n \"h_LP\": h_LP_out, \"T_LP\": T_LP_out, \"p_LP\": p_LP_out}\r\n return state_out", "def softclippingHGMevaluation(input_generator,branches,iden_method,Plot,reference=None):\n for t in range(8,11):\n t = t / 10.0\n p = 1.0 - t\n input_signal = input_generator.GetOutput()\n nl_functions = [nlsp.function_factory.softclip(power=p),]*branches\n filter_spec_tofind = nlsp.log_bpfilter(branches=branches, input=input_signal)\n ref_nlsystem = nlsp.HammersteinGroupModel_up(input_signal=input_signal,\n nonlinear_functions=nl_functions,\n filter_irs=filter_spec_tofind,\n max_harmonics=range(1,branches+1))\n\n found_filter_spec, nl_functions = iden_method(input_generator,ref_nlsystem.GetOutput(),branches)\n iden_nlsystem = nlsp.HammersteinGroupModel_up(input_signal=input_signal,\n nonlinear_functions=nl_functions,\n filter_irs=found_filter_spec,\n max_harmonics=range(1,branches+1))\n # sine = sumpf.modules.SineWaveGenerator(frequency=5000.0,phase=0.0,samplingrate=input_signal.GetSamplingRate(),length=len(input_signal)).GetSignal()\n sine = sumpf.modules.SweepGenerator(samplingrate=input_signal.GetSamplingRate(),length=len(input_signal)).GetSignal()\n ref_nlsystem.SetInput(sine)\n iden_nlsystem.SetInput(sine)\n if reference is not None:\n reference = nlsp.change_length_signal(reference,length=len(input_signal))\n ref_nlsystem.SetInput(reference)\n iden_nlsystem.SetInput(reference)\n\n if Plot is True:\n plot.relabelandplot(sumpf.modules.FourierTransform(ref_nlsystem.GetOutput()).GetSpectrum(),\"Reference System\",show=False)\n plot.relabelandplot(sumpf.modules.FourierTransform(iden_nlsystem.GetOutput()).GetSpectrum(),\"Identified System\",show=False)\n print \"SNR between Reference and Identified output for symmetric hardclipping HGM(threshold:%r): %r\" %(t,nlsp.snr(ref_nlsystem.GetOutput(),\n iden_nlsystem.GetOutput()))", "def hilfe(self):\n sDreieck_hilfe(3)", "def test_bit_driver_output(self):\n\n H = qaoa.bit_driver(range(3), 1)\n hamiltonian = qml.Hamiltonian([1, 1, 1], [qml.PauliZ(0), qml.PauliZ(1), qml.PauliZ(2)])\n\n assert decompose_hamiltonian(H) == decompose_hamiltonian(hamiltonian)", "def test_helpers():\n\n print \"GCD of 8 and 12 is %0d\" % fractions.gcd(8, 12)\n\n print \"%0d and %0d are co-prime\" % (2, coPrime(2))\n print \"%0d and %0d are co-prime\" % (6, coPrime(6))\n\n mod_inverse(11, 60)\n\n modulo(2, 3, 4)\n\n totient(24)", "def dice(hm, hf):\n return 2 * np.count_nonzero(hm & hf) / float(np.count_nonzero(hm) + np.count_nonzero(hf))", "def needleman_wunsch(x, y, lodict={}, gop=-2.5, gep=-1.75, local=False, indel=''):\n n, m = len(x), len(y)\n dp = np.zeros((n + 1, m + 1))\n pointers = np.zeros((n + 1, m + 1), np.int32)\n if not local:\n for i1, c1 in enumerate(x):\n if gop is None:\n dp[i1 + 1, 0] = lodict.get((c1, indel), gep)\n else:\n dp[i1 + 1, 0] = dp[i1, 0]+(gep if i1 + 1 > 1 else gop)\n pointers[i1 + 1, 0] = 1\n for i2, c2 in enumerate(y):\n if gop is None:\n dp[0, i2 + 1] = lodict.get((indel, c2), gep)\n else:\n dp[0, i2 + 1] = dp[0, i2]+(gep if i2 + 1 > 1 else gop)\n pointers[0, i2 + 1] = 2\n for i1, c1 in enumerate(x):\n for i2, c2 in enumerate(y):\n match = dp[i1, i2] + lodict.get(\n (c1, c2),\n 1 if c1 == c2 else -1)\n insert = dp[i1, i2 + 1] + (\n lodict.get((c1, indel), gep) if gop is None else\n gep if pointers[i1, i2 + 1] == 1 else gop)\n delet = dp[i1 + 1, i2] + (\n lodict.get((indel, c2), gep) if gop is None else\n gep if pointers[i1 + 1, i2] == 2 else gop)\n pointers[i1 + 1, i2 + 1] = p = np.argmax([match, insert, delet])\n max_score = [match, insert, delet][p]\n if local and max_score < 0:\n max_score = 0\n dp[i1 + 1, i2 + 1] = max_score\n alg = []\n if local:\n i, j = np.unravel_index(dp.argmax(), dp.shape)\n else:\n i, j = n, m\n score = dp[i, j]\n while (i > 0 or j > 0):\n pt = pointers[i, j]\n if pt == 0:\n i -= 1\n j -= 1\n alg = [(x[i], y[j])] + alg\n if pt == 1:\n i -= 1\n alg = [(x[i], indel)] + alg\n if pt == 2:\n j -= 1\n alg = [(indel, y[j])] + alg\n if local and dp[i, j] == 0:\n break\n return score, alg", "def hilfe(self):\n sZweieck_hilfe(3)", "def make_Gr(mlat, *J):\n\n if (len(J)!=3):\n print(\"Number of paramaters are not right, must be 5!\")\n \n NN = 2*mlat # # of sites in one super unitcell\n tau = -np.zeros((NN, NN),dtype=complex)\n h = np.zeros((NN,NN), dtype=complex)\n\n # translational cell's Hamiltonian\n for i in range(mlat-1):\n if (i%2==0):\n h[i,i+1] = J[0]\n h[mlat+i,mlat+i+1] = J[1]\n h[i,mlat+i] = J[2] # horizoltal connection\n elif (i%2==1):\n h[i,i+1] = J[1]\n h[mlat+i,mlat+i+1] = J[0]\n # longitudinal connection of the last sites\n if (mlat-1)%2 == 0:\n h[mlat-1,2*mlat-1] = J[2]\n \n h = h + h.conj().T # make it hermitian\n\n # Hopping matrix\n for i in range(1,mlat,2):\n tau[i+mlat,i] = J[2]\n\n return h, tau", "def tanh(a):", "def TR_algo3(h, vd=2):\n ve = 0\n vd = 2\n p = [0]*N\n for i in range(M-1, -1, -1):\n w = [bit_component(h, i*N+ii) for ii in range(N)]\n #print(i, w)\n w = sum( [wx*2**j for j, wx in enumerate(w)] )\n #print(i, w, gc(w))\n l = gc(w)\n l = T_inv(ve, vd, l)\n for j in range(N):\n p[j] += bit_component(l, j) << i\n ve = ve ^ rotate_left(e(w), vd+1)\n vd = (vd + d(w) + 1) % N\n return p", "def enthalpy(temp,pres):\n g = liq_g(0,0,temp,pres)\n g_t = liq_g(1,0,temp,pres)\n h = g - temp*g_t\n return h", "def detect_edges_hed(self):\n inp = cv2.dnn.blobFromImage(self.input_image, scalefactor=1.0,\n size=(self.WIDTH, self.HEIGHT),\n mean=(104.00698793, 116.66876762, 122.67891434),\n swapRB=False, crop=False)\n self.net.setInput(inp)\n out = self.net.forward()\n hed = out[0, 0]\n hed = (255 * hed).astype(\"uint8\")\n hed = cv2.fastNlMeansDenoising(hed, SMOOTH_FACTOR, SMOOTH_FACTOR, 21)\n save_image_opencv(hed, 'hed')\n hed_skel = skeletonize(hed).astype(\"uint8\")\n save_image_opencv(hed_skel, 'hed_skel')\n thresheld_hed = cv2.threshold(hed_skel, 0, 255,\n cv2.THRESH_BINARY | cv2.THRESH_OTSU)[1]\n save_image_opencv(thresheld_hed, 'thresheld_hed')\n return hed, thresheld_hed", "def test_hof(a, b):\n def f(g, x):\n return g(x) * g(x + 10.0)\n\n def g(x):\n return x * b\n\n return f(g, a) + f(g, b)", "def hilfe(self):\n sKreis_hilfe(3)", "def current_operator(h0):\n h = h0.copy()\n h = h0.get_multicell() # multicell Hamiltonian\n if h.dimensionality != 1: raise # only for 1d\n if not h.is_multicell: # no multicell\n def fj(k):\n phik = np.exp(1j*2.*np.pi*k) # complex phase\n jk = 1j*(h.inter*phik - h.inter.H*np.conjugate(phik)) \n return jk\n else: # multicell Hamiltonian\n def fj(k):\n jk = h.intra*0. # initialize\n for t in h.hopping:\n phik = np.exp(1j*2.*np.pi*k*t.dir[0]) # complex phase\n jk = jk + 1j*t.m*phik*t.dir[0]\n return jk\n return fj", "def Mo96(self,dc,nu):\n return 1. + (nu**2.-1.)/dc", "def magma_zheevd_m(ngpu, jobz, uplo, n, A, lda, w, work, lwork,\n rwork, lrwork, iwork, liwork):\n\n uplo = _uplo_conversion[uplo]\n info = c_int_type()\n status = _libmagma.magma_zheevd_m(ngpu, jobz, uplo, n, int(A), lda,\n int(w), int(work), lwork, int(rwork),\n lrwork, int(iwork), liwork, ctypes.byref(info))\n magmaCheckStatus(status)", "def fix_COO_and_NH(pdb):\n M = Molecule(pdb)\n ds = M.ds\n g = M.g\n m = Chem.MolFromPDBFile(pdb, removeHs=True) # don't use True, otherwise Error\n mc = Chem.EditableMol(m)\n q = Chem.MolFromSmarts( 'C(~[O])~[O]' )\n matches = m.GetSubstructMatches(q)\n #bom = Chem.GetAdjacencyMatrix(m,useBO=True)\n for (i,j,k) in matches:\n d1, d2 = ds[i,j], ds[i,k]\n b1 = m.GetBondBetweenAtoms(i,j)\n b2 = m.GetBondBetweenAtoms(i,k)\n if d1 < d2:\n bij = 2.0; bik = 1.0\n else:\n bij = 1.0; bik = 2.0\n mc.RemoveBond(i,j)\n mc.RemoveBond(i,k)\n mc.AddBond(i,j,bo2bt['%.1f'%bij])\n mc.AddBond(i,k,bo2bt['%.1f'%bik])\n mu = mc.GetMol()\n return mu\n # NX4, e.g., >[N+]<, >[NH+]-, >[NH2+], -[NH3+]\n #q = Chem.MolFromSmarts( '[NX4]' )\n\n # [*]~NX2, e.g., >C=[NH2+]\n\n # [*]~NX, e.g., >C=[NH+]-", "def test_hyperbolic_functions(self):\r\n inputs = ['0', '0.5', '1', '2', '1+j']\r\n neg_inputs = ['0', '-0.5', '-1', '-2', '-1-j']\r\n negate = lambda x: [-k for k in x]\r\n\r\n # sinh is odd\r\n sinh_vals = [0, 0.521, 1.175, 3.627, 0.635 + 1.298j]\r\n self.assert_function_values('sinh', inputs, sinh_vals)\r\n self.assert_function_values('sinh', neg_inputs, negate(sinh_vals))\r\n\r\n # cosh is even - do not negate\r\n cosh_vals = [1, 1.128, 1.543, 3.762, 0.834 + 0.989j]\r\n self.assert_function_values('cosh', inputs, cosh_vals)\r\n self.assert_function_values('cosh', neg_inputs, cosh_vals)\r\n\r\n # tanh is odd\r\n tanh_vals = [0, 0.462, 0.762, 0.964, 1.084 + 0.272j]\r\n self.assert_function_values('tanh', inputs, tanh_vals)\r\n self.assert_function_values('tanh', neg_inputs, negate(tanh_vals))\r\n\r\n # sech is even - do not negate\r\n sech_vals = [1, 0.887, 0.648, 0.266, 0.498 - 0.591j]\r\n self.assert_function_values('sech', inputs, sech_vals)\r\n self.assert_function_values('sech', neg_inputs, sech_vals)\r\n\r\n # the following functions do not have 0 in their domain\r\n inputs = inputs[1:]\r\n neg_inputs = neg_inputs[1:]\r\n\r\n # csch is odd\r\n csch_vals = [1.919, 0.851, 0.276, 0.304 - 0.622j]\r\n self.assert_function_values('csch', inputs, csch_vals)\r\n self.assert_function_values('csch', neg_inputs, negate(csch_vals))\r\n\r\n # coth is odd\r\n coth_vals = [2.164, 1.313, 1.037, 0.868 - 0.218j]\r\n self.assert_function_values('coth', inputs, coth_vals)\r\n self.assert_function_values('coth', neg_inputs, negate(coth_vals))", "def mobius(decomp): #fix 1 value\n return 0 if any([decomp[p] >= 2 for p in decomp]) else (-1) ** (breadth(decomp) % 2)", "def zernike_diff(Nstar=1,seeing=[0.9,0.,0.],npix=npix,zenith=0,filter='g', theta=0., corrector='corrector',x=None,y=None,z=None,zernike_max_order=20,regular=False):\n hdu = genImgVallCCD(Nstar=Nstar,seeing=seeing,npix=npix,zenith=zenith,filter=filter, theta=theta, corrector=corrector,x=x,y=y,z=z,regular=regular)\n nn = len(hdu)\n data = []\n colnames = ['x','y','M20','M22','M31','M33']\n for hdui in hdu[1:]:\n Nobj = hdui.data.shape[0]\n M20=np.zeros(Nobj)\n M22=np.zeros(Nobj).astype(complex)\n M31=np.zeros(Nobj).astype(complex)\n M33=np.zeros(Nobj).astype(complex)\n #sigma = 1.1/0.27\n sigma = 1.08/0.27\n for i in range(Nobj):\n M20[i],M22[i],M31[i],M33[i]=complexMoments(data=hdui.data[i][4:].reshape(npix,npix),sigma=sigma)\n x=hdui.header['ccdXcen']\n y=hdui.header['ccdYcen']\n data.append([x,y,np.median(M20), np.median(M22), np.median(M31), np.median(M33)])\n data=np.array(data)\n betaAll=[]\n betaErrAll=[]\n R2adjAll=[]\n beta,betaErr,R2_adj = zernikeFit(data[:,0].real,data[:,1].real,data[:,2].real,max_order=zernike_max_order)\n betaAll.append(beta)\n betaErrAll.append(betaErr)\n R2adjAll.append(R2_adj)\n for i in range(3,6):\n beta,betaErr,R2_adj = zernikeFit(data[:,0].real,data[:,1].real,data[:,i].real,max_order=zernike_max_order)\n betaAll.append(beta)\n betaErrAll.append(betaErr)\n R2adjAll.append(R2_adj)\n beta,betaErr,R2_adj = zernikeFit(data[:,0].real,data[:,1].real,data[:,i].imag,max_order=zernike_max_order)\n betaAll.append(beta)\n betaErrAll.append(betaErr)\n R2adjAll.append(R2_adj)\n betaAll = np.array(betaAll)\n betaErrAll = np.array(betaErrAll)\n R2adjAll = np.array(R2adjAll)\n return betaAll,betaErrAll, R2adjAll", "def half_gcdex(f, g):\n lev, dom, per, F, G = f.unify(g)\n s, h = dmp_half_gcdex(F, G, dom)\n return per(s), per(h)", "def test_2_layer():\r\n # angular frequency in radians * THz\r\n w = 100 * nu.THz\r\n # Relative permittivity of metal and dielectric\r\n em = -4.56 + 0.12j\r\n ed = 1.23 + 0.01j\r\n ex_list = ez_list = [ed, em]\r\n # Relative permeabilities\r\n mu_list = [1,1]\r\n # Dictionary of input parameters\r\n input_params = {'w': w, 'd_list': [inf,inf], 'ex_list': ex_list,\r\n 'ez_list': ez_list, 'mu_list': mu_list}\r\n \r\n # Calculate the theoretical kx\r\n theo_kx = (w / nu.c0) * cmath.sqrt((em * ed) / (em + ed))\r\n if theo_kx.imag < 0:\r\n theo_kx *= -1\r\n print('Theoretical kx:',\r\n '(%.7g+%.7gj) rad/um' % (theo_kx.real / nu.um**-1, theo_kx.imag / nu.um**-1))\r\n \r\n # If I use the theoretical kx value, the mode should be correct and\r\n # all my tests should pass.\r\n params = deepcopy(input_params)\r\n params['kx'] = theo_kx\r\n params = find_all_params_from_kx(params)\r\n kzd, kzm = params['kz_list']\r\n # check that kz_list is correct\r\n assert_floats_are_equal(kzd**2, (w**2 / nu.c0**2) * ed**2 / (em + ed))\r\n assert_floats_are_equal(kzm**2, (w**2 / nu.c0**2) * em**2 / (em + ed))\r\n # check that layer_bottom_list is correct\r\n assert params['layer_bottom_list'][0] == -inf\r\n assert params['layer_bottom_list'][1] == 0\r\n # Check that the boundary condition matrix agrees with hand-calculation\r\n bc_mat = bc_matrix(params)\r\n # ...top-left is Ex0down / H0down\r\n assert_floats_are_equal(bc_mat[0,0], -kzd / (w * ed * nu.eps0))\r\n # ...top-right is -Ex1up / H1up\r\n assert_floats_are_equal(bc_mat[0,1], -kzm / (w * em * nu.eps0))\r\n # ...bottom-left is eps0 * Ez0down / H0down\r\n assert_floats_are_equal(bc_mat[1,0], ed * -theo_kx / (w * ed * nu.eps0))\r\n # ...bottom-right is -eps1 * Ez1up / H1up\r\n assert_floats_are_equal(bc_mat[1,1], -em * -theo_kx / (w * em * nu.eps0))\r\n # Check that one of the eigenvalues is almost zero (compared to the size\r\n # of the matrix elements).\r\n eigenvalues = np.linalg.eig(bc_mat)[0]\r\n assert abs(eigenvalues).min() / abs(bc_mat).max() < 1e-6\r\n # Check that the mode passes all tests.\r\n assert check_mode(params, thorough=True) is True\r\n # Check that I can scale the fields and it still passes all tests.\r\n params_scaled = rescale_fields(1.23+4.56j, params)\r\n assert check_mode(params_scaled, thorough=True) is True\r\n \r\n # Now try my kx-finding algorithm, to see if it finds the right value.\r\n kx_list = find_kx(input_params)\r\n print('kx_list:',\r\n ['(%.7g+%.7gj) rad/um' % (kx.real / nu.um**-1, kx.imag / nu.um**-1)\r\n for kx in kx_list])\r\n kx = kx_list[0]\r\n assert_floats_are_equal(theo_kx, kx)\r\n \r\n plot_mode(params)\r\n \r\n print('If you see this message, all the tests succeeded!!')", "def hemianopsia_soft(hemi='left',slope=5.0,dprime_fnc=dprime_basic):\n def hemianopsia_fnc(distance):\n if (hemi == 'left'):\n return dprime_fnc(distance) * sigmoid(distance[0],slope)\n elif (hemi == 'right'):\n return dprime_fnc(distance) * sigmoid(-distance[0],slope)\n else:\n assert False, \"Unimplemented branch for the argument bling\"\n return hemianopsia_fnc", "def test_hill_formula(self):\n # make sure smiles match reference\n molecule_smiles = create_ethanol()\n assert molecule_smiles.hill_formula == \"C2H6O\"\n # make sure is not order dependent\n molecule_smiles_reverse = create_reversed_ethanol()\n assert molecule_smiles.hill_formula == molecule_smiles_reverse.hill_formula\n # make sure single element names are put first\n order_mol = Molecule.from_smiles(\"C(Br)CB\")\n assert order_mol.hill_formula == \"C2H6BBr\"\n # test molecule with no carbon\n no_carb_mol = Molecule.from_smiles(\"OS(=O)(=O)O\")\n assert no_carb_mol.hill_formula == \"H2O4S\"\n # test no carbon and hydrogen\n br_i = Molecule.from_smiles(\"BrI\")\n assert br_i.hill_formula == \"BrI\"\n # make sure files and smiles match\n molecule_file = Molecule.from_file(get_data_file_path(\"molecules/ethanol.sdf\"))\n assert molecule_smiles.hill_formula == molecule_file.hill_formula\n # make sure the topology molecule gives the same formula\n from openforcefield.topology.topology import Topology, TopologyMolecule\n\n topology = Topology.from_molecules(molecule_smiles)\n topmol = TopologyMolecule(molecule_smiles, topology)\n assert molecule_smiles.hill_formula == Molecule.to_hill_formula(topmol)\n # make sure the networkx matches\n assert molecule_smiles.hill_formula == Molecule.to_hill_formula(\n molecule_smiles.to_networkx()\n )" ]
[ "0.57660997", "0.56802636", "0.5665918", "0.5624264", "0.559748", "0.55954516", "0.5568438", "0.5507126", "0.5500543", "0.5387872", "0.53738797", "0.534831", "0.5345321", "0.53414637", "0.5333701", "0.5332214", "0.53233445", "0.53016967", "0.5296686", "0.5290382", "0.5285452", "0.52381265", "0.5234824", "0.5226292", "0.522", "0.52175117", "0.5210483", "0.52064", "0.5205207", "0.519419", "0.5191068", "0.5178392", "0.5163471", "0.51458675", "0.51453906", "0.5144496", "0.51374555", "0.51339066", "0.5133479", "0.5116599", "0.51035094", "0.5101471", "0.50941586", "0.50930697", "0.50891495", "0.50846404", "0.50655556", "0.50628453", "0.50620574", "0.5059643", "0.5058443", "0.50520486", "0.50460535", "0.5037342", "0.5029017", "0.50126594", "0.5011933", "0.5008556", "0.5007174", "0.49954998", "0.4989143", "0.49869806", "0.4978726", "0.49771848", "0.4971292", "0.49688584", "0.4967936", "0.49635145", "0.49633166", "0.49604857", "0.4957939", "0.49571395", "0.49565798", "0.49529856", "0.49529114", "0.49424136", "0.4942343", "0.49376395", "0.4935935", "0.49339616", "0.49305564", "0.492473", "0.491807", "0.49156398", "0.49126974", "0.4910364", "0.490546", "0.49048918", "0.49043804", "0.49042785", "0.49038616", "0.4901323", "0.49010888", "0.49004564", "0.48991325", "0.48983994", "0.48964095", "0.48948187", "0.48940328", "0.48776788", "0.487531" ]
0.0
-1
see cmd_calls/TestGotoField.txt for command list.
def test_goto_field_boss_calibs(self): sopTester.updateModel('mcp', TestHelper.mcpState['all_off']) cmdState = self.actorState.gotoField cmdState.reinitialize(self.cmd) cmdState.doSlew = False cmdState.doHartmann = False cmdState.doGuider = False self._goto_field_boss(10, 57, 0, 0, cmdState)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_goto_field_apogee(self):\n cmdState = self.actorState.gotoField\n cmdState.reinitialize(self.cmd)\n self._goto_feld_apogee(13, 46, 0, 0, cmdState)", "def test_goto_field_apogee_no_guider(self):\n cmdState = self.actorState.gotoField\n cmdState.reinitialize(self.cmd)\n cmdState.doGuider = False\n self._goto_feld_apogee(3, 11, 0, 0, cmdState)", "def test_goto_field_apogee_no_slew(self):\n cmdState = self.actorState.gotoField\n cmdState.reinitialize(self.cmd)\n cmdState.doSlew = False\n self._goto_feld_apogee(9, 37, 0, 0, cmdState)", "def test_select_field():", "def test_goto_field_apogee_bypass_gangToCart(self):\n self._prep_bypass('gangToCart', clear=True)\n cmdState = self.actorState.gotoField\n cmdState.reinitialize(self.cmd)\n self._goto_feld_apogee(13, 44, 4, 0, cmdState)", "def test_goto_field_cartridge_mismatch(self):\n\n sopTester.updateModel('guider', TestHelper.guiderState['bossLoaded'])\n\n mcpState = TestHelper.mcpState['boss_science']\n mcpState.update({'instrumentNum': [15]})\n sopTester.updateModel('mcp', mcpState)\n\n cmdState = self.actorState.gotoField\n cmdState.reinitialize(self.cmd)\n\n masterThread.goto_field(self.cmd, cmdState, myGlobals.actorState)\n self._check_cmd(0, 14, 0, 0, finish=True, didFail=True)", "def navigate_to_field(self, field):\n raise NotImplementedError()", "def goto(self, item):\n command = 'goto ' + str(item)\n self.run_command(command)", "def goto(self, offset):\n self._vim.command('goto {}'.format(offset))", "def test_goto_field_apogee_no_slew_shutter_open(self):\n sopTester.updateModel('apogee', TestHelper.apogeeState['B_open'])\n cmdState = self.actorState.gotoField\n cmdState.reinitialize(self.cmd)\n cmdState.doSlew = False\n self._goto_feld_apogee(10, 37, 0, 0, cmdState)", "def test_goto_field_apogee_no_slew_decenter_off(self):\n sopTester.updateModel('mcp', TestHelper.mcpState['all_off'])\n sopTester.updateModel('guider', TestHelper.guiderState['guiderOnDecenter'])\n cmdState = self.actorState.gotoField\n cmdState.reinitialize(self.cmd)\n cmdState.doSlew = False\n self._goto_feld_apogee(9, 37, 0, 0, cmdState)", "def getLoc(field, *args):\n\tpath = cFuncs.fixPath(cmds.fileDialog2(fileMode = 2, ds=1)[0])\n\tcmds.textFieldButtonGrp(widgets[field], e=True, tx = path)", "def cg_goto(self, cmd):\n label = self.makeLabel(cmd)\n self.asm(unindent(f\"\"\"\n @{label}\n 0;JMP\n \"\"\"))", "def test_entities__Entity__addField__2(entities, entity, field):\n entity.addField(field)\n assert IDummy == entities[u'Field-1'].interface", "def test_30_navigate(self):\n break_bar = {\"gdb\": \"break Bar\\n\", \"lldb\": \"breakpoint set --fullname Bar\\n\"}\n for backend, spec in subtests.items():\n with self.subTest(backend=backend):\n e.Ty(spec['launch'], delay=1)\n e.Ty(break_bar[backend])\n e.In(\"<esc>:wincmd k<cr>\")\n e.In(\":e src/test.cpp\\n\")\n e.In(\":10<cr>\")\n e.In(\"<f8>\")\n\n cur, breaks = e.GetSigns()\n self.assertFalse(cur)\n self.assertEqual([5, 10], breaks)\n\n # Go to another file\n e.Ty(\":e src/lib.hpp\\n\")\n cur, breaks = e.GetSigns()\n self.assertFalse(cur)\n self.assertEqual([], breaks)\n e.Ty(\":8\\n\")\n e.In(\"<f8>\")\n cur, breaks = e.GetSigns()\n self.assertFalse(cur)\n self.assertEqual([8], breaks)\n\n # Return to the first file\n e.Ty(\":e src/test.cpp\\n\")\n cur, breaks = e.GetSigns()\n self.assertFalse(cur)\n self.assertEqual([5, 10], breaks)\n\n e.In('ZZ')", "def test_cmd_field():\n FNAME = 'xonsh-SESSIONID.json'\n FNAME += '.cmdfield'\n hist = History(filename=FNAME, here='yup', **HIST_TEST_KWARGS)\n # in-memory\n with mock_xonsh_env({'HISTCONTROL': set()}):\n hf = hist.append({'rtn': 1})\n yield assert_is_none, hf\n yield assert_equal, 1, hist.rtns[0]\n yield assert_equal, 1, hist.rtns[-1]\n yield assert_equal, None, hist.outs[-1]\n # slice\n yield assert_equal, [1], hist.rtns[:]\n # on disk\n hf = hist.flush()\n yield assert_is_not_none, hf\n yield assert_equal, 1, hist.rtns[0]\n yield assert_equal, 1, hist.rtns[-1]\n yield assert_equal, None, hist.outs[-1]\n os.remove(FNAME)", "def cmdScrollFieldReporter(*args, annotation: Union[AnyStr, bool]=\"\", backgroundColor:\n Union[List[float, float, float], bool]=None, clear: bool=True,\n copySelection: bool=True, cutSelection: bool=True, defineTemplate:\n AnyStr=\"\", docTag: Union[AnyStr, bool]=\"\", dragCallback: Script=None,\n dropCallback: Script=None, echoAllCommands: bool=True, enable:\n bool=True, enableBackground: bool=True, enableKeyboardFocus:\n bool=True, exists: bool=True, filterSourceType: Union[AnyStr,\n bool]=\"\", fullPathName: bool=True, hasFocus: bool=True, height:\n Union[int, bool]=0, highlightColor: Union[List[float, float, float],\n bool]=None, isObscured: bool=True, lineNumbers: bool=True, manage:\n bool=True, noBackground: bool=True, numberOfPopupMenus: bool=True,\n parent: Union[AnyStr, bool]=\"\", pasteSelection: bool=True,\n popupMenuArray: bool=True, preventOverride: bool=True,\n receiveFocusCommand: Script=None, saveSelection: AnyStr=\"\",\n saveSelectionToShelf: bool=True, select: List[int, int]=None,\n selectAll: bool=True, stackTrace: bool=True, statusBarMessage:\n AnyStr=\"\", suppressErrors: bool=True, suppressInfo: bool=True,\n suppressResults: bool=True, suppressStackTrace: bool=True,\n suppressWarnings: bool=True, text: Union[AnyStr, bool]=\"\",\n textLength: bool=True, useTemplate: AnyStr=\"\", visible: bool=True,\n visibleChangeCommand: Union[Script, bool]=None, width: Union[int,\n bool]=0, q=True, query=True, e=True, edit=True,\n **kwargs)->Union[AnyStr, Any]:\n pass", "def goto(n):\n n = int('{}'.format(n))\n get_controller().step_to(n)", "def cmdScrollFieldExecuter(*args, annotation: Union[AnyStr, bool]=\"\", appendText: AnyStr=\"\",\n autoCloseBraces: bool=True, backgroundColor: Union[List[float, float,\n float], bool]=None, clear: bool=True, commandCompletion: bool=True,\n copySelection: bool=True, currentLine: Union[int, bool]=0,\n cutSelection: bool=True, defineTemplate: AnyStr=\"\", docTag:\n Union[AnyStr, bool]=\"\", dragCallback: Script=None, dropCallback:\n Script=None, enable: bool=True, enableBackground: bool=True,\n enableKeyboardFocus: bool=True, execute: bool=True, executeAll:\n bool=True, exists: bool=True, fileChangedCommand: Script=None,\n filename: bool=True, filterKeyPress: Union[Script, bool]=None,\n fullPathName: bool=True, hasFocus: bool=True, hasSelection:\n bool=True, height: Union[int, bool]=0, highlightColor:\n Union[List[float, float, float], bool]=None, insertText: AnyStr=\"\",\n isObscured: bool=True, load: bool=True, loadContents: AnyStr=\"\",\n loadFile: AnyStr=\"\", manage: bool=True, modificationChangedCommand:\n Script=None, modified: bool=True, noBackground: bool=True,\n numberOfLines: Union[int, bool]=0, numberOfPopupMenus: bool=True,\n objectPathCompletion: bool=True, parent: Union[AnyStr, bool]=\"\",\n pasteSelection: bool=True, popupMenuArray: bool=True,\n preventOverride: bool=True, redo: bool=True, removeStoredContents:\n AnyStr=\"\", replaceAll: List[AnyStr, AnyStr]=None, saveFile:\n AnyStr=\"\", saveSelection: AnyStr=\"\", saveSelectionToShelf: bool=True,\n searchAndSelect: bool=True, searchDown: bool=True, searchMatchCase:\n bool=True, searchString: Union[AnyStr, bool]=\"\", searchWraps:\n bool=True, select: List[int, int]=None, selectAll: bool=True,\n selectedText: bool=True, showLineNumbers: bool=True,\n showTooltipHelp: bool=True, source: bool=True, sourceType:\n Union[AnyStr, bool]=\"\", spacesPerTab: Union[int, bool]=4,\n statusBarMessage: AnyStr=\"\", storeContents: AnyStr=\"\",\n tabsForIndent: bool=True, text: Union[AnyStr, bool]=\"\", textLength:\n bool=True, undo: bool=True, useTemplate: AnyStr=\"\", visible:\n bool=True, visibleChangeCommand: Union[Script, bool]=None, width:\n Union[int, bool]=0, q=True, query=True, e=True, edit=True,\n **kwargs)->Union[AnyStr, Any]:\n pass", "def _is_goto(self, words):\n if words[0] == 'goto':\n if len(words) != 2:\n raise SyntaxError(\"File line {}: Invalid number of arguments for C_GOTO command.\".format(self._file_line))\n return True\n else:\n return False", "def test_back_to_home_at_start_of_line(self):\n before_b = \"\"\"\\\n if a:\n b = 'xyz'\n \"\"\"\n after_b = \"\"\"\\\n if a:\n b = 'xyz'\n \"\"\"\n self.run_test(\n before_b=before_b,\n after_b=after_b,\n before_sel=(\"2.0\", \"2.0\"),\n after_sel=(\"2.4\", \"2.4\"),\n command_name=\"back-to-home\",\n )", "def test_select_detail_returns_the_correct_menu(self):\n # make some DB entries\n dataset = self.create_mixed_test_data()\n test_log_entries = dataset['test_log_entries']\n self.menu.records = test_log_entries\n # choose an index\n selected_index = 1\n # execute the method\n user_input = str(selected_index + 1)\n with patch('builtins.input', side_effect=user_input):\n result = self.menu.select_detail()\n\n # assert that the correct menu is retured\n expected_result = self.menu.present_next_result\n\n self.assertEqual(result, expected_result)", "def test_02_visit_again(self):", "def test_goto_field_boss_ffs_open_fails(self):\n sopTester.updateModel('mcp', TestHelper.mcpState['all_off'])\n cmdState = self.actorState.gotoField\n cmdState.reinitialize(self.cmd)\n self.cmd.failOn = 'mcp ffs.open'\n self._goto_field_boss(21, 102, 1, 1, cmdState, didFail=True, finish=True)", "def test_forward_char(self):\n before_b = \"\"\"\\\n first line\n line 1\n line a\n line b\n line c\n last line\n \"\"\"\n after_b = \"\"\"\\\n first line\n line 1\n line a\n line b\n line c\n last line\n \"\"\"\n self.run_test(\n before_b=before_b,\n after_b=after_b,\n before_sel=(\"1.2\", \"1.2\"),\n after_sel=(\"1.3\", \"1.3\"),\n command_name=\"forward-char\",\n )", "def test_models_edx_ui_textbook_pdf_search_navigated_next_with_valid_statement(\n statement,\n):\n assert statement.event_type == \"textbook.pdf.search.navigatednext\"\n assert statement.name == \"textbook.pdf.search.navigatednext\"", "def __goto(self):\n from QScintilla.GotoDialog import GotoDialog\n \n aw = self.activeWindow()\n lines = aw.lines()\n curLine = aw.getCursorPosition()[0] + 1\n dlg = GotoDialog(lines, curLine, self.ui, None, True)\n if dlg.exec_() == QDialog.Accepted:\n aw.gotoLine(dlg.getLinenumber(), expand=True)", "def test_01_visit(self):", "def go_to_line(self, value=None):\n self.searcher = Toplevel()\n self.searcher.wm_title(\"Go To Line\")\n self.line_number = Entry(self.searcher)\n on_clicker = Button(self.searcher, command=self.go_to, text=\"Go\")\n self.line_number.pack()\n on_clicker.pack()", "def test_goto_field_boss_guider(self):\n sopTester.updateModel('mcp', TestHelper.mcpState['all_off'])\n sopTester.updateModel('guider', TestHelper.guiderState['guiderOnDecenter'])\n cmdState = self.actorState.gotoField\n cmdState.reinitialize(self.cmd)\n cmdState.doSlew = False\n cmdState.doHartmann = False\n cmdState.doCalibs = False\n cmdState.arcTime = 0\n cmdState.flatTime = 0\n self._goto_field_boss(9, 37, 0, 0, cmdState)", "def test_entities__Entity__addField__1(entities, entity, field):\n entity.addField(field)\n assert ['Field-1'] == list(entities.keys())\n assert field is entities[u'Field-1']", "def test_field_code(self):\n inv_search = \"subject:p\"\n spi_search = \"f f p\"\n self._compare_searches(inv_search, spi_search)", "def goto(self, index):\n raise NotImplementedError", "def test_make_form_field():", "def test_field_keyword_and_message_passing_test():\n with temporary_dir() as output_dir:\n copyfile(\n TEST_ROBOT_OUTPUT_FILES / \"robot_with_failures.xml\",\n Path(output_dir) / \"output.xml\",\n )\n robot_importer.import_robot_test_results(FlowTaskFactory(), output_dir)\n\n test_result = models.TestResult.objects.get(method__name=\"Passing test\")\n assert test_result.message == \"Life is good, yo.\"\n assert test_result.robot_keyword is None", "def test_ref_and_ptr(self):\n self.build()\n (self.target, process, _, bkpt) = lldbutil.run_to_source_breakpoint(\n self, \"Stop here to check by ref and ptr.\",\n lldb.SBFileSpec(\"main.cpp\", False))\n # The reference should print just like the value:\n self.check_ii(\"ref\")\n\n self.expect(\"frame variable ptr\",\n substrs=[\"ptr =\", \"size=7\"])\n self.expect(\"expr ptr\",\n substrs=[\"size=7\"])", "def processJumpTable(jt_ea):", "def test_entities__Entity__getField__2(entity):\n assert IDummy['dummy2'] == entity.getField('dummy2')", "def test_docfields(self):\n with sphinx_build('pyexample'):\n with open('_build/text/docfx_yaml/example.example.Foo.yml') as yml_file:\n data = yaml.safe_load(yml_file)\n for item in data['items']:\n if item['uid'] == 'example.example.Foo.method_okay':\n self.assertEqual(\n item['syntax']['return'],\n {'type': ['boolean'], 'description': 'That the method is okay'},\n )\n self.assertEqual(\n item['syntax']['parameters'][1]['defaultValue'],\n 'None',\n )\n self.assertEqual(\n item['syntax']['parameters'][1]['description'],\n 'The foo param',\n )\n self.assertEqual(\n item['syntax']['content'],\n 'method_okay(self, foo=None, bar=None)',\n )", "def test_remove_previous_focus(self):\n target_1 = 'generic_field'\n result_1 = self.form.assign_focus_field(target_1)\n focused_1 = self.find_focus_field()\n\n target_2 = 'another_field'\n result_2 = self.form.assign_focus_field(target_2)\n focused_2 = self.find_focus_field()\n\n self.assertNotEqual(target_1, target_2)\n self.assertIn(target_1, self.form.fields)\n self.assertEqual(1, len(focused_1))\n self.assertEqual(target_1, focused_1[0])\n self.assertEqual(target_1, result_1)\n self.assertIn(target_2, self.form.fields)\n self.assertEqual(1, len(focused_2))\n self.assertEqual(target_2, focused_2[0])\n self.assertEqual(target_2, result_2)", "def test_get_field(test_client):\n test_dict = {\"kfc\": {\"item\": \"chickens\"}}\n assert test_client.get_field(\"kfc.item\", doc=test_dict) == \"chickens\"", "def test_issue_edit_label(self):\n pass", "def step(self, action):", "def jump(self):\n print(\"Inside ElfRider.jump\")", "def goto(cls, quad):\n\t\treturn quad.result", "def test_back_to_home_at_end_of_line(self):\n before_b = \"\"\"\\\n if a:\n b = 'xyz'\n \"\"\"\n after_b = \"\"\"\\\n if a:\n b = 'xyz'\n \"\"\"\n self.run_test(\n before_b=before_b,\n after_b=after_b,\n before_sel=(\"2.12\", \"2.12\"),\n after_sel=(\"2.4\", \"2.4\"),\n command_name=\"back-to-home\",\n )", "def test_search_table_with_field(self) -> None:\n responses.add(responses.GET, local_app.config['SEARCHSERVICE_BASE'] + SEARCH_ENDPOINT,\n json={}, status=HTTPStatus.OK)\n\n with local_app.test_client() as test:\n response = test.get('/api/search/field/'\n 'tag_names/field_val/test', query_string=dict(query_term='test',\n page_index='0'))\n self.assertEqual(response.status_code, HTTPStatus.OK)", "def test(self):\n self.build()\n exe = self.getBuildArtifact(\"a.out\")\n self.runCmd(\"file \" + exe, CURRENT_EXECUTABLE_SET)\n\n lldbutil.run_to_source_breakpoint(\n self, '// Breakpoint for bitfield', lldb.SBFileSpec(\"main.c\"))\n\n self.expect(\"fr var a\", DATA_TYPES_DISPLAYED_CORRECTLY,\n patterns=[' = A$'])\n self.expect(\"fr var b\", DATA_TYPES_DISPLAYED_CORRECTLY,\n patterns=[' = B$'])\n self.expect(\"fr var c\", DATA_TYPES_DISPLAYED_CORRECTLY,\n patterns=[' = C$'])\n self.expect(\"fr var ab\", DATA_TYPES_DISPLAYED_CORRECTLY,\n patterns=[' = AB$'])\n self.expect(\"fr var ac\", DATA_TYPES_DISPLAYED_CORRECTLY,\n patterns=[' = A | C$'])\n self.expect(\"fr var all\", DATA_TYPES_DISPLAYED_CORRECTLY,\n patterns=[' = ALL$'])\n # Test that an enum that doesn't match the heuristic we use in\n # ClangASTContext::DumpEnumValue, gets printed as a raw integer.\n self.expect(\"fr var omega\", DATA_TYPES_DISPLAYED_CORRECTLY,\n patterns=[' = 7$'])\n # Test the behavior in case have a variable of a type considered\n # 'bitfield' by the heuristic, but the value isn't actually fully\n # covered by the enumerators.\n self.expect(\"p (enum bitfield)nonsense\", DATA_TYPES_DISPLAYED_CORRECTLY,\n patterns=[' = B | C | 0x10$'])\n\n # Break inside the main.\n bkpt_id = lldbutil.run_break_set_by_file_and_line(\n self, \"main.c\", self.line, num_expected_locations=1, loc_exact=True)\n self.runCmd(\"c\", RUN_SUCCEEDED)\n\n # The stop reason of the thread should be breakpoint.\n self.expect(\"thread list\", STOPPED_DUE_TO_BREAKPOINT,\n substrs=['stopped',\n 'stop reason = breakpoint'])\n\n # The breakpoint should have a hit count of 1.\n self.expect(\"breakpoint list -f\", BREAKPOINT_HIT_ONCE,\n substrs=[' resolved, hit count = 1'])\n\n # Look up information about the 'days' enum type.\n # Check for correct display.\n self.expect(\"image lookup -t days\", DATA_TYPES_DISPLAYED_CORRECTLY,\n substrs=['enum days {',\n 'Monday',\n 'Tuesday',\n 'Wednesday',\n 'Thursday',\n 'Friday',\n 'Saturday',\n 'Sunday',\n 'kNumDays',\n '}'])\n\n enum_values = ['-4',\n 'Monday',\n 'Tuesday',\n 'Wednesday',\n 'Thursday',\n 'Friday',\n 'Saturday',\n 'Sunday',\n 'kNumDays',\n '5']\n\n # Make sure a pointer to an anonymous enum type does crash LLDB and displays correctly using\n # frame variable and expression commands\n self.expect(\n 'frame variable f.op',\n DATA_TYPES_DISPLAYED_CORRECTLY,\n substrs=[\n 'ops *',\n 'f.op'],\n patterns=['0x0+$'])\n self.expect(\n 'frame variable *f.op',\n DATA_TYPES_DISPLAYED_CORRECTLY,\n substrs=[\n 'ops',\n '*f.op',\n '<parent is NULL>'])\n self.expect(\n 'expr f.op',\n DATA_TYPES_DISPLAYED_CORRECTLY,\n substrs=[\n 'ops *',\n '$'],\n patterns=['0x0+$'])\n self.expect(\n 'expr *f.op',\n DATA_TYPES_DISPLAYED_CORRECTLY,\n substrs=['error:'],\n error=True)\n\n bkpt = self.target().FindBreakpointByID(bkpt_id)\n for enum_value in enum_values:\n self.expect(\n \"frame variable day\",\n 'check for valid enumeration value',\n substrs=[enum_value])\n lldbutil.continue_to_breakpoint(self.process(), bkpt)", "def move_to_target():\n keyboard.send('f')", "def write_goto(output_file, command, label, curr_function):\n if command == \"if-goto\":\n output_file.write(\"@SP\" + \"\\n\" +\n \"AM = M - 1\" + \"\\n\" +\n \"D = M\" + \"\\n\" +\n \"@\" + curr_function[0] + \"$\" + label + \"\\n\" +\n \"D; JNE\" + \"\\n\")\n else:\n output_file.write(\"@\" + curr_function[0] + \"$\" + label + \"\\n\" +\n \"0; JMP\" + \"\\n\")", "def test_text_field():", "def test_get_explore_fields_gets_fields(\n fc: fetcher.Fetcher, test_model, test_explores_stats\n):\n test_explore = test_explores_stats[0]\n explore = fc.get_explores(model=test_model[\"name\"], explore=test_explore[\"name\"])\n assert isinstance(explore, list)\n explore = explore[0]\n assert isinstance(explore, models.LookmlModelExplore)\n assert explore.model_name == test_model[\"name\"]\n assert explore.name == test_explore[\"name\"]\n fields = fc.get_explore_fields(explore)\n assert isinstance(fields, list)\n assert fields == test_explore[\"all_fields\"]", "def __le__(self, *args):\n return _ida_hexrays.cgoto_t___le__(self, *args)", "def test_select_detail_displays_the_correct_record(self):\n # make some DB entries\n dataset = self.create_mixed_test_data()\n test_log_entries = dataset['test_log_entries']\n self.menu.records = test_log_entries\n # choose an index\n selected_index = 1\n # execute the method\n user_input = str(selected_index + 1)\n with patch('builtins.input', side_effect=user_input):\n self.menu.select_detail()\n\n # assert that the db entry that current_record looks up is the same\n # as the one chosen.\n self.assertEqual(test_log_entries[selected_index],\n self.menu.records[self.menu.current_record])", "def navigate_to_field(self, field):\n if self.field:\n raise AssertionError(u'Already at a field, cannot nest fields: {}'.format(self))\n return Location(self.query_path, field=field, visit_counter=self.visit_counter)", "def go_to(self, value=None):\n self.go_to_this_line = self.line_number.get()\n self.my_text.mark_set(INSERT, str(float(self.go_to_this_line)))\n self.current_area()\n self.my_text.see(INSERT)\n self.searcher.destroy()", "def test_entities__Entity__getField__1(entity):\n with pytest.raises(KeyError):\n entity.getField('asdf')", "def test_first_page_passes(self):\n\n self.page.open_site(PageLocators.PREVIOUS_LINK)\n self.page.fill_all_fields()\n self.page.send_the_data()", "def test_hidden_field():", "def TestOneStep(self):\n pass", "def _guider_start(self, nCall, nInfo, nWarn, nErr, finish=False, didFail=False):\n cmdState = self.actorState.gotoField\n cmdState.reinitialize(self.cmd)\n result = masterThread.guider_start(self.cmd, cmdState, myGlobals.actorState, 'gotoField')\n self.assertEqual(result, not didFail)\n self._check_cmd(nCall, nInfo, nWarn, nErr, finish, didFail=didFail)", "def jump(act_line: int, offset: str):\n return act_line + int(offset)", "def getfield(value, arg):\n #import pdb; pdb.set_trace()\n if hasattr(value, \"fields\"):\n fields = getattr(value, \"fields\")\n if str(arg) in fields:\n return str(fields[str(arg)])", "def handle_next_breakpoint(self, bit):\n self.check_required()\n self.options.shift_breakpoint()\n self.arguments = self.options.get_arguments()\n self.current_argument = self.arguments.pop(0)", "def __ge__(self, *args):\n return _ida_hexrays.cgoto_t___ge__(self, *args)", "def test_focus(self, name=None):\n focus_func = getattr(self.form, 'assign_focus_field', None)\n fields = self.get_current_fields()\n if focus_func and issubclass(self.__class__, FocusMixIn):\n name = name or getattr(self.form, 'named_focus', None)\n expected = focus_func(name, fields)\n else:\n expected = 'username' if 'username' in fields else None\n expected = name or expected or None\n if not expected:\n self.assertTrue(True)\n return\n focus_list = self.find_focus_field()\n self.assertEqual(1, len(focus_list))\n self.assertEqual(expected, focus_list[0])", "def test_goto_field_boss_flat_on_fails(self):\n sopTester.updateModel('mcp', TestHelper.mcpState['all_off'])\n cmdState = self.actorState.gotoField\n cmdState.reinitialize(self.cmd)\n self.cmd.failOn = 'mcp ff.on'\n self._goto_field_boss(16, 71, 0, 1, cmdState, didFail=True, finish=True)", "def scrollField(*args, annotation: Union[AnyStr, bool]=\"\", backgroundColor: Union[List[float,\n float, float], bool]=None, changeCommand: Script=None, clear: bool=True,\n command: AnyStr=\"\", defineTemplate: AnyStr=\"\", docTag: Union[AnyStr, bool]=\"\",\n dragCallback: Script=None, dropCallback: Script=None, editable: bool=True,\n enable: bool=True, enableBackground: bool=True, enableKeyboardFocus: bool=True,\n enterCommand: Script=None, exists: bool=True, font: Union[AnyStr, bool]=\"\",\n fontPointSize: Union[int, bool]=0, fullPathName: bool=True, height: Union[int,\n bool]=0, highlightColor: Union[List[float, float, float], bool]=None,\n insertText: AnyStr=\"\", insertionPosition: Union[int, bool]=0, isObscured:\n bool=True, keyPressCommand: Script=None, manage: bool=True, noBackground:\n bool=True, numberOfLines: Union[int, bool]=0, numberOfPopupMenus: bool=True,\n parent: Union[AnyStr, bool]=\"\", popupMenuArray: bool=True, preventOverride:\n bool=True, qtFont: Union[AnyStr, bool]=\"\", selection: bool=True,\n statusBarMessage: AnyStr=\"\", text: Union[AnyStr, bool]=\"\", useTemplate:\n AnyStr=\"\", visible: bool=True, visibleChangeCommand: Union[Script, bool]=None,\n width: Union[int, bool]=0, wordWrap: bool=True, q=True, query=True, e=True,\n edit=True, **kwargs)->Union[AnyStr, Any]:\n pass", "def other_search(self):\n test = self.ask_zoekarg.text()\n if test:\n self.parent().search_arg = test\n self.parent().do_select()", "def test(self):\n # 0x13 is nop\n self.gdb.command(\"p *((int*) 0x%x)=0x13\" % self.target.ram)\n self.gdb.command(\"p *((int*) 0x%x)=0x13\" % (self.target.ram + 4))\n self.gdb.command(\"p *((int*) 0x%x)=0x13\" % (self.target.ram + 8))\n self.gdb.p(\"$pc=0x%x\" % self.target.ram)\n self.gdb.stepi()\n assertEqual((self.target.ram + 4), self.gdb.p(\"$pc\"))\n self.gdb.stepi()\n assertEqual((self.target.ram + 8), self.gdb.p(\"$pc\"))", "def get_jump(self):\n return self.jump", "def test_goto_definition_at_zero(Script):\n assert Script(\"a\").infer(1, 1) == []\n s = Script(\"str\").infer(1, 1)\n assert len(s) == 1\n assert list(s)[0].description == 'class str'\n assert Script(\"\").infer(1, 0) == []", "def goto(action, value, error_handle):\n print_info(\"failed: failure action= goto %s\" % value)\n error_handle['action'] = 'GOTO'\n error_handle['value'] = value\n return error_handle", "def test_case_main(self, case_results):\n\n log_test_case(self.name, 'drag up and down')\n drag_by_param(95, 50, 95, 85, 1)\n drag_by_param(95, 85, 95, 50, 1)\n sleep(1)\n drag_by_param(95, 50, 95, 85, 1)\n drag_by_param(95, 85, 95, 50, 1)\n sleep(1)\n self.ime = IME()\n click_imageview_by_id('menu_search')\n #entertext_edittext_on_focused('Mi')\n #clear_edittext_by_index(0)\n '''\n PRIVATE_del = ['del']\n self.ime.IME_input(1, PRIVATE_del)\n self.ime.IME_input(1, PRIVATE_del)\n self.ime.IME_input(1, PRIVATE_del)\n '''\n\n self.ime.IME_input_english(1, 'tes')\n sleep(2)\n if search_text(SC.PRIVATE_CONTACT_NAME):\n log_test_case(self.name, 'search name ok...')\n click_textview_by_text(SC.PRIVATE_CONTACT_NAME)\n #click_in_list_by_index(1)\n else:\n log_test_case(self.name, 'search failed...')\n return\n\n self.call_from_contact()\n\n #send sms to mike\n click_textview_by_id('secondary_action_button')\n self.ime.IME_input_english(1, 'send')\n #click_imageview_by_desc(\"send\")\n click_button_by_id('send_button_sms')\n sleep(3)\n goback()\n\n #click_textview_by_text('binz')\n # show a moment\n sleep(1)\n\n #send EMAIL\n if search_text('jubao@qunar.com'):\n click_textview_by_text('jubao@qunar.com')\n\n #\n if search_text('Complete action using'):\n click_textview_by_text('Email')\n click_button_by_id('button_always')\n entertext_edittext_on_focused('email title')\n\n click_imageview_by_id(\"send\")\n\n else:\n goback()\n\n # go address map\n scroll_to_bottom()\n if search_text('ADDRESS'):\n click_textview_by_text('pudong,shanghai,china')\n else:\n goback()", "def test_get_goal(self):\n pass", "def test_focus_on_limited_fields(self):\n original_named_focus = self.form.named_focus\n original_fields_focus = self.form.fields_focus\n original_given_focus = self.form.given_focus\n original_fields = self.form.fields\n self.form.named_focus = None\n self.form.given_focus = None\n allowed = [name for name, field in self.form.fields.items()\n if not field.disabled and not isinstance(field.widget, (HiddenInput, MultipleHiddenInput))]\n self.assertGreater(len(allowed), 1)\n fields_focus = allowed[1:]\n self.form.fields_focus = fields_focus\n expected = fields_focus[0]\n actual = self.form.assign_focus_field(None, fields=self.form.fields_focus)\n\n self.assertEqual(expected, actual)\n self.assertEqual(self.form.given_focus, actual)\n\n self.form.name_focus = original_named_focus\n self.form.fields_focus = original_fields_focus\n self.form.given_focus = original_given_focus\n self.form.fields = original_fields", "def step(self, action):\n pass", "def step(self, action):\n pass", "def fooFunction(item_index):\n print(\"item %d pressed\" % (item_index))", "def _is_ifgoto(self, words):\n if words[0] == 'if-goto':\n if len(words) != 2:\n raise SyntaxError(\"File line {}: Invalid number of arguments for C_IFGOTO command.\".format(self._file_line))\n return True\n else:\n return False", "def help_select(self):\n print(SELECT)", "def test_010(self):\n caller = self.get_caller([SingleMethod])\n self.assertEqual(\"I have very little to say.\", caller())", "def test_parse_fields(pawprint_default_tracker_db):\n\n tracker = pawprint_default_tracker_db\n\n # SELECT * FROM table\n args = ()\n assert tracker._parse_fields(*args) == \"*\"\n\n # SELECT event FROM table\n args = (\"event\",)\n assert tracker._parse_fields(*args) == \"event\"\n\n # SELECT user_id, timestamp FROM table\n args = (\"user_id\", \"timestamp\")\n assert tracker._parse_fields(*args) == \"user_id, timestamp\"\n\n # SELECT metadata #>> '{a, b}' FROM table\n args = (\"metadata__a__b\",)\n assert tracker._parse_fields(*args) == \"metadata #> '{a, b}' AS json_field\"", "def _examineOrSelect(self):\n d = getattr(self.client, self.method)('foobox')\n self.assertEqual(\n self.transport.value(), b'0001 ' + self.command + b' foobox\\r\\n')\n return d", "def do_step(self) -> None:", "def write_goto_in_func(self, label):\n label = self.label_by_scope(label)\n self.write(\"@\" + label + \"\\n0;JMP\\n\")\n # @label\n # 0;JMP", "def step():\n \n step = models.Step(action=u\"goto\", target=u\"http://www.joesfunerals.com\")", "def test_03_visit_special(self):", "def test_int_field():", "def jumpto(self, item, offset):\n try:\n self.ret = idc.jumpto(offset)\n except:\n self.ret = False\n\n return self.ret", "def click_header_field_link(self, label):\n locator = lex_locators[\"record\"][\"header\"][\"field_value_link\"].format(label)\n self._jsclick(locator)", "def test_code010(self, *test_code_args, **test_code_kwargs):\n print(\"This is test_code010 from TestDefinition #\", self.ID, \", test case #\", self.test_case_ID, sep='')", "def write_go_to(self, label):\n self._write_line('goto ' + label)", "def test_goto_field_boss_hartmann(self):\n sopTester.updateModel('mcp', TestHelper.mcpState['all_off'])\n cmdState = self.actorState.gotoField\n cmdState.reinitialize(self.cmd)\n cmdState.doSlew = False\n cmdState.doCalibs = False\n cmdState.arcTime = 0\n cmdState.flatTime = 0\n cmdState.doGuider = False\n self._goto_field_boss(5, 29, 0, 0, cmdState)", "def test_get_genome_7(self):\n self.tkt1.data_add = set([\"retrieve_record\"])\n gnm = tickets.get_genome(self.tkt1, gnm_type=\"add\")\n with self.subTest():\n self.assertEqual(gnm.host_genus, \"\")\n with self.subTest():\n self.assertEqual(gnm.retrieve_record, 1)", "def test_goto_field_boss_slew(self):\n sopTester.updateModel('mcp', TestHelper.mcpState['all_off'])\n cmdState = self.actorState.gotoField\n cmdState.reinitialize(self.cmd)\n cmdState.doGuider = False\n cmdState.doHartmann = False\n cmdState.doCalibs = False\n cmdState.arcTime = 0\n cmdState.flatTime = 0\n self._goto_field_boss(3, 26, 0, 0, cmdState)", "def test_forward_char_extend_selection(self):\n before_b = \"\"\"\\\n first line\n line 1\n line a\n line b\n line c\n last line\n \"\"\"\n after_b = \"\"\"\\\n first line\n line 1\n line a\n line b\n line c\n last line\n \"\"\"\n self.run_test(\n before_b=before_b,\n after_b=after_b,\n before_sel=(\"1.1\", \"1.1\"),\n after_sel=(\"1.1\", \"1.2\"),\n command_name=\"forward-char-extend-selection\",\n )", "def _MocaCtlGetField(self, outfcn, field):\n\n m_re = re.compile(field + r'\\s*:\\s+(\\S+)')\n for line in outfcn():\n mr = m_re.search(line)\n if mr is not None:\n return mr.group(1)\n return None", "def probe_at_single_point(self, field, *args, **kwargs):\n raise NotImplementedError", "def test(self):\n # -- Test --\n\n # (1)\n\n # (2)\n\n # (3)\n\n # (4)\n # -- Test --" ]
[ "0.7691792", "0.64816624", "0.628449", "0.5856036", "0.58257854", "0.5754098", "0.56664455", "0.56335163", "0.5571063", "0.5469148", "0.54335827", "0.53827053", "0.52839625", "0.51585394", "0.5143092", "0.5104858", "0.5102512", "0.5086537", "0.50655967", "0.50533384", "0.5010544", "0.49709177", "0.49680653", "0.49628344", "0.49567345", "0.49561286", "0.4955987", "0.49268192", "0.4919463", "0.49160644", "0.4909401", "0.49054995", "0.48988038", "0.4870134", "0.48598042", "0.484847", "0.48315704", "0.4827044", "0.48177752", "0.4817093", "0.48133612", "0.48062047", "0.47985446", "0.47976562", "0.47937015", "0.47868142", "0.47815123", "0.47748837", "0.47668114", "0.47656122", "0.4750558", "0.47476444", "0.4736779", "0.47367582", "0.4735321", "0.47338954", "0.47263035", "0.47256926", "0.47215664", "0.47182435", "0.471734", "0.47099608", "0.47069192", "0.4706346", "0.46981135", "0.46807685", "0.46784687", "0.46742418", "0.4673319", "0.46702877", "0.46510392", "0.46487918", "0.46407804", "0.46379107", "0.46332577", "0.46323773", "0.4629916", "0.4629916", "0.46280533", "0.462642", "0.46259758", "0.4622155", "0.4617772", "0.46128127", "0.4610434", "0.46103817", "0.4608422", "0.46082267", "0.46067852", "0.4604498", "0.4601525", "0.45991513", "0.45991045", "0.45989862", "0.45982143", "0.45882806", "0.45881847", "0.45742372", "0.45695114", "0.45679146" ]
0.47256958
57
Start with decentered guiding on, to check that we clear it. ff on, guider flat, ff off, ffs open 3xguider axes off, decenter off, guider on
def test_goto_field_boss_guider(self): sopTester.updateModel('mcp', TestHelper.mcpState['all_off']) sopTester.updateModel('guider', TestHelper.guiderState['guiderOnDecenter']) cmdState = self.actorState.gotoField cmdState.reinitialize(self.cmd) cmdState.doSlew = False cmdState.doHartmann = False cmdState.doCalibs = False cmdState.arcTime = 0 cmdState.flatTime = 0 self._goto_field_boss(9, 37, 0, 0, cmdState)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def f_fixed(self):\n self.fx_free = self.fy_free = self.fz_free = False\n return self", "def align(): # open EH and fast shutter\n\t#marAuxiliary.closeMarShield()\n\td2in()\n\td3in()\n\tsh('o')", "def test_DFT_rect(centering='FFTRECT', outdir=None, outname='DFT1R_', npix=None, sampling=10., nlamd=None):\n\n print \"Testing DFT, style = \"+centering\n\n\n npupil = 156\n pctr = int(npupil/2)\n s = (npupil,npupil)\n\n\n # make things rectangular:\n if nlamd is None and npix is None:\n nlamd = (10,20)\n npix = [val*sampling for val in nlamd] #(100, 200) \n elif npix is None:\n npix = [val*sampling for val in nlamd] #(100, 200) \n elif nlamd is None:\n nlamd = [val/sampling for val in npix]\n u = nlamd\n print u\n #(u, float(u)/npix[0]*npix[1])\n #npix = (npix, 2*npix)\n\n\n # FFT style\n mft1 = matrixDFT.MatrixFourierTransform(centering=centering)\n\n #ctr = (float(npupil)/2.0 + mft1.offset(), float(npupil)/2.0 + mft1.offset())\n ctr = (float(npupil)/2.0 , float(npupil)/2.0)\n #print ctr\n pupil = makedisk(s=s, c=ctr, r=float(npupil)/2.0001, t=np.float64, grey=0)\n\n pupil[0:60, 0:60] = 0\n pupil[0:10] = 0\n\n pupil /= np.sqrt(pupil.sum())\n\n plt.clf()\n plt.subplots_adjust(left=0.02, right=0.98)\n plt.subplot(141)\n\n pmx = pupil.max()\n plt.imshow(pupil, vmin=0, vmax=pmx*1.5)\n\n\n if outdir is not None:\n fits.PrimaryHDU(pupil.astype(np.float32)).writeto(outdir+os.sep+outname+\"pupil.fits\", clobber=True)\n\n a = mft1.perform(pupil, u, npix)\n\n pre = (abs(pupil)**2).sum() \n post = (abs(a)**2).sum() \n ratio = post / pre\n calcr = 1./(1.0*u[0]*u[1] *npix[0]*npix[1]) # multiply post by this to make them equal\n print \"Pre-FFT total: \"+str( pre)\n print \"Post-FFT total: \"+str( post )\n print \"Ratio: \"+str( ratio)\n #print \"Calc ratio : \"+str( calcr)\n #print \"uncorrected: \"+str( ratio/calcr)\n\n\n complexinfo(a, str=\",ft1 asf\")\n asf = a.real.copy()\n if outdir is not None:\n fits.PrimaryHDU(asf.astype(np.float32)).writeto(outdir+os.sep+outname+\"asf.fits\", clobber=True)\n cpsf = a * a.conjugate()\n psf = cpsf.real.copy()\n if outdir is not None:\n fits.PrimaryHDU(psf.astype(np.float32)).writeto(outdir+os.sep+outname+\"psf.fits\", clobber=True)\n\n ax=plt.subplot(142)\n plt.imshow(asf, norm=matplotlib.colors.LogNorm(1e-8, 1.0))\n ax.set_title='ASF'\n\n ax=plt.subplot(143)\n plt.imshow(psf, norm=matplotlib.colors.LogNorm(1e-8, 1.0))\n ax.set_title='PSF'\n\n plt.subplot(144)\n\n pupil2 = mft1.inverse(a, u, npupil)\n pupil2r = (pupil2 * pupil2.conjugate()).real\n plt.imshow( pupil2r, vmin=0,vmax=pmx*1.5*0.01) # FIXME flux normalization is not right?? I think this has to do with squaring the pupil here, that's all.\n plt.gca().set_title='back to pupil'\n plt.draw()\n print \"Post-inverse FFT total: \"+str( abs(pupil2r).sum() )\n print \"Post-inverse pupil max: \"+str(pupil2r.max())\n\n plt.suptitle('Matrix DFT with rectangular arrays using centering={0}'.format(centering))\n\n plt.savefig('test_DFT_rectangular_results_{0}.pdf'.format(centering))", "def proz_manually ():\r\n Check_180turn(left_boundary,right_boundary)\r\n ABS() # Baseline correction \r\n XCMD(\"closeall\",WAIT_TILL_DONE)", "def clear(self):\n self.np.fill(OFF)\n self.np.show()\n return True", "def full_reset(self):\n self.at_cmd('CFUN=1')", "def zguider():\n gzero.gxoff = camera.status.guider[0] + gzero.gxoff\n gzero.gyoff = camera.status.guider[1] + gzero.gyoff\n guider(0,0)\n f = open('/data/guidezero','w')\n cPickle.dump(gzero,f)\n f.close()", "def setNoZeroColor():\n dislin.nobgd()", "def terminatePlane3D():\n dislin.grffin()", "def reset(self):\n super(PolygonTool, self).reset()\n # self.__nsides = None\n # self.__increment = None\n # self.__external = False # make this adjustable?\n self.__center = None\n for _i in range(self.__nsides):\n self.__xpts[_i] = 0.0\n self.__ypts[_i] = 0.0", "def test_offcenter(self):\n actual = cm.ring_mask((5, 5), 1, 2, center=(2, 3))\n expected = np.array([[False, False, False, True, False],\n [False, False, True, False, True],\n [False, True, False, False, False],\n [False, False, True, False, True],\n [False, False, False, True, False]])\n self.assertIsNone(np.testing.assert_array_equal(actual, expected))", "def fill_single_world():\n if not front_is_clear():\n if not right_is_clear():\n if not left_is_clear():\n put_beeper()", "def cerrar(self):\n self.x0 = np.array(self.x0, dtype=float)\n self.x = np.array(self.x, dtype=float)\n self.tipos = np.array(self.tipos, dtype=int)\n self.mask_fr = self.tipos == 1\n self.mask_in = self.tipos == 2\n self.num_fr = np.sum(self.mask_fr)\n self.num_in = np.sum(self.mask_in)\n self.open = False", "def resetAlignmentCenter(self):\n cent = self.TiltSeries_._TiltAlignmentParas.cent\n imdimX = self.TiltSeries_._imdimX\n imdimY = self.TiltSeries_._imdimY\n print(imdimX, imdimY)\n if cent[0] != imdimX//2+1 or cent[1] != imdimY//2+1:\n #rint \"Centers do not match: cent=\"+str(cent)+\", imdim=\"+str(imdim)\n self.TiltSeries_._TiltAlignmentParas.cent = [imdimX//2+1, imdimY//2+1]", "def reset_mask(self):\n\n self.mask = np.ones(self.dispersion.shape, dtype=bool)", "def ToggleAllGizmoLocalMode( self ):\n\n value = self.gizmoMgr.GetGizmoLocal( 'pos' )\n self.gizmoMgr.SetGizmoLocal( 'pos', not value )\n self.gizmoMgr.SetGizmoLocal( 'rot', not value )\n self.gizmoMgr.SetGizmoLocal( 'scl', not value )", "def test_offcenter(self):\n actual = cm.circle_mask((5, 5), 2, center=(2, 3))\n expected = np.array([[False, False, False, True, False],\n [False, False, True, True, True],\n [False, True, True, True, True],\n [False, False, True, True, True],\n [False, False, False, True, False]])\n self.assertIsNone(np.testing.assert_array_equal(actual, expected))", "def Face_Cycle_L(self,event):\n t=event.widget.find_closest(event.x, event.y)[0]\n u=int(self.canvas.itemcget(t,\"tags\").split()[1])\n if u not in [4,13,22,31,40,49]:\n v=self.colours_face[self.ocol[self.canvas.itemcget(t,\"fill\")][0]][0][0]\n self.cubestring[int(u)]=v\n self.cube.cube = self.cubestring#Cube.set(self.cubestring)\n self.map_face()\n else:\n print(\"Cant Change Center Cubit\")", "def modes_off(self):\n bm = self.fitsimage.get_bindmap()\n bm.reset_mode(self.fitsimage)", "def ClearDisplay():\n display.fill(0)", "def m_fixed(self):\n self.mx_free = self.my_free = self.mz_free = False\n return self", "def update_flags(self):\n # view mode, filled vs wirefrom\n if self.view['wireframe']:\n gl.glPolygonMode(gl.GL_FRONT_AND_BACK, gl.GL_LINE)\n else:\n gl.glPolygonMode(gl.GL_FRONT_AND_BACK, gl.GL_FILL)\n\n # set fullscreen or windowed\n self.set_fullscreen(fullscreen=self.view['fullscreen'])\n\n # backface culling on or off\n if self.view['cull']:\n gl.glEnable(gl.GL_CULL_FACE)\n else:\n gl.glDisable(gl.GL_CULL_FACE)\n\n # case where we WANT an axis and NO vertexlist\n # is stored internally\n if self.view['axis'] and self._axis is None:\n from .. import creation\n # create an axis marker sized relative to the scene\n axis = creation.axis(origin_size=self.scene.scale / 100)\n # create ordered args for a vertex list\n args = rendering.mesh_to_vertexlist(axis)\n # store the axis as a reference\n self._axis = self.batch.add_indexed(*args)\n\n # case where we DON'T want an axis but a vertexlist\n # IS stored internally\n elif not self.view['axis'] and self._axis is not None:\n # remove the axis from the rendering batch\n self._axis.delete()\n # set the reference to None\n self._axis = None", "def remove_spurious_landmarks(self):\r\n \r\n remove = np.argwhere(self.lm_counter < 0)\r\n self.lm = np.delete(self.lm, remove, axis=0)\r\n self.lm_cvar = np.delete(self.lm_cvar, remove, axis=0)\r\n self.lm_counter = np.delete(self.lm_counter, remove)\r\n \r\n return # Replace this.\r", "def _mask_mode(self):\r\n self._mode_select(0)", "def plot_clear():\n plt.cla()", "def correct(self):\n self.parent.copyCurrentWinState(self.pltw)\n self.pltw.blklst[self.blkno][self.ypos] = self.data[1] - self.data[2]\n self.pltw.updatePlot()\n self.pltw.dirty = True\n self.pltw.activecurv = self.cpos\n self.parent.updateUI()\n self.hide()", "def reset(self):\n self.posXY = (0,0)\n self.magXY = (1.0,1.0)\n self.rot = 0.0\n self.trans = 255\n self.isDone = False\n self.isFirst = True\n\n self.kill()\n self.Group = pyglet.graphics.OrderedGroup(self.order)\n self.isReady = self.check()", "def test_DFT_rect_adj():\n test_DFT_rect(centering='ADJUSTIBLE', outname='DFT1Radj_')", "def reset_energizer_flag(self): \r\n self.energizer_flag = False", "def front_is_clear(): #py:front_is_clear\n return RUR._front_is_clear_()", "def update_center(self): \r\n \r\n self.grfx[0].center = self.center\r\n\r\n self.update_bbox()", "def free_curvature(self) -> None:\n self.n1.free = True\n self.n2.free = True", "def clear_crossfilter1(self):\n print ('Trigger clear')\n self.query_dict = {}\n self.plot_data = None\n self.create_figure_new()\n layout_doc.children[4].children[0] = self.p", "def enableRemoveDesign(self):\n if self.mode.selectedShipHull.aftQuadInfo == None:\n self.isDroneDesign = 1\n self.enableRemoveDroneDesign()\n else:\n self.isDroneDesign = 0\n self.enableRemoveShipDesign()", "def pixel_space(self):\n self.drawer.settransform()\n self.coordspace_bbox = [0, 0, self.width, self.height]\n self.coordspace_transform = (1, 0, 0,\n 0, 1, 0)", "def resetFrame(self):\n with self.lock:\n hbin = self.hbin\n vbin = self.vbin\n if hbin != 1:\n self.setHBin(1)\n if vbin != 1:\n self.setVBin(1)\n with self.lock:\n self.expArea = self.defaultExpArea\n x1, y1, x2, y2 = self.expArea\n self.xsize = x2 - x1\n self.ysize = y2 - y1", "def _set_up_acq_opt_direct(self):\n raise NotImplementedError('Not implemented DiRect yet.')", "def reset(self):\n if self.num == 1:\n self.rect.centerx = 320\n elif self.num == 2:\n self.rect.centerx = 341\n elif self.num == 3:\n self.rect.centerx = 362\n elif self.num == 4:\n self.rect.centerx = 383\n self.rect.centery = 371\n self.centerx = self.rect.centerx\n self.centery = self.rect.centery\n\n self.moving_right = False\n self.moving_left = False\n self.moving_up = True\n self.moving_down = False", "def clear_crossfilter2(self):\n print ('Trigger clear')\n self.query_dict = {}\n self.plot_data = None\n self.create_figure_new()\n layout_doc.children[4].children[1] = self.p", "def test_inverse( centering='SYMMETRIC'):\n\n\n npupil = 300 #156\n pctr = int(npupil/2)\n npix = 100 #1024\n u = 20 #100 # of lam/D\n\n npix, u = 2000, 200\n s = (npupil,npupil)\n\n\n\n\n mft1 = matrixDFT.MatrixFourierTransform(centering=centering)\n\n ctr = (float(npupil)/2.0, float(npupil)/2.0 )\n #print ctr\n pupil = makedisk(s=s, c=ctr, r=float(npupil)/2.0001, t=np.float64, grey=0)\n pupil /= np.sqrt(pupil.sum())\n\n pupil[100:200, 30:50] = 0\n pupil[0:50, 140:160] = 0\n\n plt.subplot(141)\n plt.imshow(pupil)\n\n print \"Pupil 1 total:\", pupil.sum() \n\n a = mft1.perform(pupil, u, npix)\n\n asf = a.real.copy()\n cpsf = a * a.conjugate()\n psf = cpsf.real.copy()\n print \"PSF total\", psf.sum()\n \n plt.subplot(142)\n plt.imshow(psf, norm=matplotlib.colors.LogNorm(1e-8, 1.0))\n\n plt.subplot(143)\n\n pupil2 = mft1.inverse(a, u, npupil)\n pupil2r = (pupil2 * pupil2.conjugate()).real\n plt.imshow( pupil2r)\n\n print \"Pupil 2 total:\", pupil2r.sum() \n\n\n\n a2 = mft1.perform(pupil2r, u, npix)\n psf2 = (a2*a2.conjugate()).real.copy()\n print \"PSF total\", psf2.sum()\n plt.subplot(144)\n plt.imshow(psf2, norm=matplotlib.colors.LogNorm(1e-8, 1.0))", "def off_screen(self):\n return self._x < 0", "def reset(self):\n self.vrp = np.matrix([0.5, 0.5, 1])\n self.vpn = np.matrix([0, 0, -1])\n self.vup = np.matrix([0, 1, 0])\n self.u = np.matrix([-1, 0, 0])\n self.extent = [1., 1., 1.]\n self.screen = [400., 400.]\n self.offset = [20., 20.]", "def no_gradient_fusion():\n pass", "def clean(self):\n\n if (self.clean_level == 'dusty') | (self.clean_level == 'clean'):\n idx, = np.where(self['B_flag'] == 0)\n self.data = self[idx, :]\n\n return", "def __reset_crosshair(self):\n self.lhor.set_ydata(self.y_coord)\n self.lver.set_xdata(self.x_coord)", "def clear():\n\tglobal _s\n\t_s.screen.fill(_s.back)\n\t_s.tab(0,0)\n\t_flip()", "def cla(self):\n self.disable_mouse_rotation()\n Axes.cla(self)\n self.grid(rcParams['axes3d.grid'])", "def _switch(self):\n self.fill= not self.fill", "def initializeDomainBorder(self):\n #Read image of the structure\n if (self.PictureExistance == \"'yes'\"):\n self.__processImage()\n #re-define the domain size with the layers of boundaries and ghost points\n self.ny, self.nx = self.effectiveDomain.shape\n print('Now the size of domain is %g and %g' %(self.ny, self.nx))\n else:\n self.isDomain = sp.empty([self.ny, self.nx], dtype = np.bool)\n self.isSolid = sp.empty([self.ny, self.nx], dtype = np.bool)\n self.isDomain, self.isSolid = defineGeometry(self.nx, self.ny)\n if (self.PictureExistance == \"'yes'\"):\n self.originalXdim = self.nx\n self.isDomain = sp.empty([self.ny, self.nx], dtype = np.bool)\n self.isSolid = sp.empty([self.ny, self.nx], dtype = np.bool)\n #define the boundary position\n if (self.isCycles == \"'yes'\"):\n self.isBoundaryFluid2 = sp.empty([self.ny, self.nx], \\\n dtype = np.bool)\n# self.isFluidBoundary = sp.empty([self.ny, self.nx], dtype = np.bool)\n self.isDomain[:, :] = 1; self.isSolid[:, :] = 0\n for i in sp.arange(self.ny):\n for j in sp.arange(self.nx):\n if (self.effectiveDomain[i, j] == 0.0):\n self.isDomain[i, j] = 0\n self.isSolid[i, j] = 1\n self.voidSpace = np.count_nonzero(self.isDomain)\n print('The number of vexls in void space is %g.' % self.voidSpace)\n print('The porosity of the layout is %f.' % (self.voidSpace / (self.isDomain.size)))", "def reset(self):\n self.position = np.zeros(self.ndegres)\n self.velocity = np.zeros(self.ndegres)\n self.state = np.zeros(2*self.ndegres)\n self.flag = 0\n self.h_ref = np.array([self.ref for _ in range(self.horizon)])\n self.action = np.zeros(self.ACTION_DIM) \n self.h_action = np.zeros(self.ACTION_DIM*self.horizon)", "def unoccupied(self):\n self.is_occupied = 0\n for hex in self.fon:\n hex.remove_neighbor()\n hex.set_quality()", "def set_defaults(self):\n if not self.HAS_DS9: # pragma: no cover\n return\n self.run('frame delete all')\n self.run('wcs degrees')\n if self.disp_parameters['tile']:\n self.run('tile yes')\n else:\n self.run('tile no')\n self.cs = str(self.disp_parameters['lock_image']).lower()\n self.lock()", "def is_compact(self):\n return self.n_rays()==0 and self.n_lines()==0", "def reset(self):\r\n # reset Wheel encoders\r\n self.start_time = time.time()\r\n [left_start, right_start] = self.Roomba.Query(43, 44)\r\n self.Motion.reset(left_start, right_start)\r\n # reset bumper\r\n self.bumper.reset()\r\n\r\n #reset grid world data\r\n self.action=[0.0,0.0]\r\n self.grid_state= [0,0,0]\r\n self.real_state = [0.0, 0.0, 0.0]\r\n self.trans_model = None\r\n pass", "def clear(self):\n if self.flag == 0:\n for coord in INDICES:\n self.kill(coord)\n self.chart[coord] = DEAD", "def fix(hobj):\n\n h.execute('create axon[2]', hobj)\n\n for sec in hobj.axon:\n sec.L = 30\n sec.diam = 1\n hobj.axonal.append(sec=sec)\n hobj.all.append(sec=sec) # need to remove this comment\n\n hobj.axon[0].connect(hobj.soma[0], 0.5, 0)\n hobj.axon[1].connect(hobj.axon[0], 1, 0)\n\n h.define_shape()", "def setUseGizmos(value=True):\n global cc\n cc = not value", "def clear_transforms(self): # -> None:\n ...", "def flip_faceup(self):\r\n self.faceup = True", "def cb_reset(event):\n axDirichlet.cla()\n # Reset Sliders\n sAlpha0.reset() # resetが駄目!一番最初に戻ってしまう\n sAlpha1.reset()\n sAlpha2.reset()\n alpha_update = [sAlpha0.val, sAlpha1.val, sAlpha2.val]\n print('alpha_update=', alpha_update)\n\n # ML\n lambda_ML = CatML.MLinfer(x_cat)\n\n axML.cla()\n drawBarGraph( axML, \"ML\", lambda_ML, bar_y_max, col_ML ) # Draw Bar graph\n\n\n # MAP\n dirichlet.set_param(alpha_update)\n lambda_MAP = CatMAP.MAPinfer(x_cat, dirichlet)\n\n axMAP.cla()\n drawBarGraph( axMAP, \"MAP\", lambda_MAP, bar_y_max, col_MAP ) # Draw Bar Graph\n\n # Bayes\n posteriorDirichlet.set_param(alpha_update)\n posteriorDirichlet.calcPosterior(x_cat)\n lambda_Bayes = np.zeros(3)\n for k in range(3):\n lambda_Bayes[k] = posteriorDirichlet.BayesInfer(k)\n\n axBayes.cla()\n drawBarGraph( axBayes, \"Bayes\", lambda_Bayes, bar_y_max, col_Bayes ) # Draw Bar Graph\n\n draw_pdf_contours(axDirichlet, dirichlet, True) # Draw Dirichlet\n\n print('Reset')\n print('lambda_ML =', lambda_ML)\n print('lambda_MAP =', lambda_MAP)\n print('lambda_Bayes=', lambda_Bayes)\n draw_point(axDirichlet, lambda_ML, col_ML)\n draw_point(axDirichlet, lambda_MAP, col_MAP)\n draw_point(axDirichlet, lambda_Bayes, col_Bayes)\n draw_point(axLikelihood, lambda_ML, col_ML)\n draw_point(axPosteriorDirichlet, lambda_MAP, col_MAP)\n draw_point(axPosteriorDirichlet, lambda_Bayes, col_Bayes)\n\n fig.canvas.draw_idle()", "def middlemakevisible(self, pos):\n pass", "def testCoords1D(realSpace, fourierSpace, X, dx):\n \n def cosines(freqs, Nyquist, real, fourier, x):\n for i in range(0, len(freqs)):\n real = real + np.cos(2*np.pi * freqs[i] * x)\n \n fourier = np.fft.fftshift(np.fft.fft(real))\n return real, fourier\n \n def gauss(sigma, mu, real, fourier, x): # Still need to fix\n real = 1/(sigma * np.sqrt(2 * np.pi)) * np.exp( - (x - mu)**2 / (2 * sigma**2) )\n fourier = np.fft.fftshift(real)\n return real, fourier\n \n def window(w, real, fourier, x):\n real = real*0 + 1\n mask = np.abs(x)-w/2 < 0\n real = real*mask\n #fourier = np.fft.fftshift(np.fft.fft(real))\n fourier = FFT1(real)\n #f1 = np.fft.fft(real)\n #ifft = np.fft.ifft(np.fft.fftshift(fourier))\n #ifft = np.fft.ifft(np.fft.ifftshift(fourier))\n ifft = IFFT1(fourier)\n return real, fourier, ifft\n \n N = len(realSpace)\n xVals = np.arange(-X/2, X/2 + dx, dx)\n f = np.linspace(0, X, N)/X\n const = 0*realSpace + 1\n impulse = np.fft.fft(const)\n #plt.plot(f, impulse)\n cosines, impulses = cosines([1], X/2, const*0, f, xVals)\n #plt.plot(xVals, cosines)\n # plt.plot(f*X, np.abs(impulses))\n gauss, gaussFT = gauss(1, 0, const*0, f, xVals)\n window, sinc, iwindow = window(X/4, const, f, xVals)\n plt.plot(f - f[-1]/2, np.abs(sinc))\n #plt.plot(xVals, iwindow)", "def Face_Cycle_R(self,event):\n t=event.widget.find_closest(event.x, event.y)[0]\n u=int(self.canvas.itemcget(t,\"tags\").split()[1])\n if u not in [4,13,22,31,40,49]:\n v=self.colours_face[self.ocol[self.canvas.itemcget(t,\"fill\")][1]][0][0]\n self.cubestring[u]=v\n self.cube.cube = self.cubestring#Cube.set(self.cubestring)\n self.map_face()\n else:\n print(\"Cant Change Center Cubit\")", "def debug_clear(self):\n if self.display:\n from capture_graphics_display import PacmanGraphics\n if isinstance(self.display, PacmanGraphics):\n self.display.clear_debug()", "def resetTransformations():\n dislin.trfres()", "def all_off(self):\n self.fill_off()\n self.update()\n self.fill_off()\n self.update()", "def fiducial_evolution():\n \n # fiducial model\n wangle = 180*u.deg\n pk = pickle.load(open('../data/gd1_fiducial.pkl', 'rb'))\n x = pk['x'].to(u.kpc)\n xorig = x[:2]\n \n plt.close()\n fig, ax = plt.subplots(1,1,figsize=(6,6))\n \n plt.sca(ax)\n \n Nsnap = 8\n times = np.linspace(0,0.5,Nsnap)[::-1]\n angles = np.linspace(0,322,Nsnap)[::-1]*u.deg\n\n for e, t in enumerate(times):\n c = mpl.cm.Blues(0.05+0.85*(Nsnap-e)/Nsnap)\n #a = 0.5 + 0.5*(Nsnap-e)/Nsnap\n \n pk = pickle.load(open('../data/gd1_fiducial_t{:.4f}.pkl'.format(t), 'rb'))\n x = pk['x'].to(u.kpc)\n x_, y_ = x[0], x[1]\n \n plt.plot(x_[120:-120], y_[120:-120], '.', color=c, ms=10, zorder=Nsnap-e, rasterized=False)\n \n xt = 24*np.cos(angles[e]+90*u.deg)\n yt = 24*np.sin(angles[e]+90*u.deg)\n if e<Nsnap-1:\n txt = plt.text(xt, yt, '+ {:.2f} Gyr'.format(t), va='center', ha='center', fontsize='small', color='0.2', rotation=(angles[e]).value, zorder=10)\n txt.set_bbox(dict(facecolor='w', alpha=0.7, ec='none'))\n \n plt.text(0, 24, 'Flyby', va='center', ha='center', fontsize='small', color='0.2')\n\n lim = 27\n plt.xlim(-lim,lim)\n plt.ylim(-lim,lim)\n plt.gca().set_aspect('equal')\n \n plt.xlabel('x [kpc]')\n plt.ylabel('y [kpc]')\n \n plt.tight_layout()\n plt.savefig('../plots/loop_evolution.pdf')", "def clear(self):\r\n self.SetPoint = 0.0\r\n\r\n self.PTerm = 0.0\r\n self.ITerm = 0.0\r\n self.DTerm = 0.0\r\n self.last_error = 0.0\r\n\r\n # Windup Guard\r\n self.int_error = 0.0\r\n self.windup_guard = 20.0\r\n\r\n self.output = 0.0", "def before_flip(self):\n from klibs.KLGraphics import blit\n\n if P.show_gaze_dot and self.el.recording:\n try:\n blit(self.tracker_dot, 5, self.el.gaze())\n except RuntimeError:\n pass", "def proz_noe():\r\n print(\"processing: \",CURDATA()[0]) \r\n Check_180turn(left_boundary,right_boundary)\r\n EFP() # Exponential window multiplication + Fourier Transformation + phase correction\r\n ABS() # Baseline correction \r\n Check_180turn(left_boundary,right_boundary)", "def enable3D(self):\r\n if(self.dataController.fileLoaded==True):\r\n self.dataController.toggleInteractiveMode()\r\n\r\n self.midsagittalView = False\r\n self.frontView = False\r\n self.topView = False\r\n self.bottomView = False\r\n self.threeDView = True", "def onSetToCustDims(self, evt):\n\t\tself.halfResampleZ.Enable(0)\n\t\tself.fourthResampleZ.Enable(0)\n\t\t\n\t\tfor obj in [self.factorLabel, self.dimLabel, self.newDimX, self.newDimY, self.newDimZ, self.factorX, self.factorY, self.factorZ]:\n\t\t\tobj.Enable(1)\n\t\ttry:\n\t\t\trx = int(self.newDimX.GetValue())\n\t\t\try = int(self.newDimY.GetValue())\n\t\t\trz = int(self.newDimZ.GetValue())\n\t\t\tself.currSize = (rx, ry, rz)\n\t\texcept:\n\t\t\tpass", "def flip(self):\n \n if self.faceup:\n self.faceup = False\n else:\n self.faceup = True", "def zero(self):\n\t\tself.angle = 0.0\n\t\tself.draw()\n\t\ttime.sleep(self.delay)", "def reset(self):\n self.u0.fill(0.)\n self.u1.fill(0.)\n self.u2.fill(0.)\n self.time = 0.", "def _e0s_center(self, hdr):\n # Called E0SNano in OpenMIMS\n # b field index and e0s center enabled added to sub dict from main nano header\n d = {}\n d['b field index'], d['detector'], d['start'], \\\n d['step size'], d['count time'], d['center'], \\\n d['80% width'], d['E0S center enabled'] = \\\n unpack(self._bo + '5i 2d i', hdr.read(40))\n\n d['E0S center enabled'] = bool(d['E0S center enabled'])\n d['count time'] /= 100 # 10 ms increments to seconds\n if d['detector'] < 0:\n d['detector'] = None\n if d['b field index'] < 0:\n d['b field index'] = None\n return d", "def restorePositionsOfUnownedAntennas() :\n if ( s.getInitializationFlag() == True ): return\n unownedAnts = subarrayAntSetup( True )\n progress(\"Setting positions of unowned and uninitialized antennas %s\" % helpers.formatAsRanges( unownedAnts) )\n progress(\"....Pads\")\n restoreAntCommand(pad, unownedAnts, subarray=DEFAULT)\n progress( \"....Pad Offsets\" )\n restoreAntCommand( padOffset, unownedAnts, subarray=DEFAULT )\n progress(\"....Antenna positional offset and axis non-intersection\")\n restoreAntCommand( antennaOffset, unownedAnts, subarray=DEFAULT )\n restoreAntCommand( axisNonIntersection, unownedAnts, subarray=DEFAULT )", "def flip(self):", "def backToMiddlePos():\n\tprogMode(True) # Active le couple de servos\n\taxDriver.goToPosition(axDriver.BROADCASTID, 0x1FF) # Renvoie a la position 0x1FF", "def setup_draw(self):\n pass", "def reset(self):\r\n self.x = self.initX\r\n self.y = self.initY\r\n self.dir= self.initDir", "def clickClearReferences(self, event):\n self.whiteReference = None\n self.lightBtn.color = '0.85'\n self.darkReference = None\n self.darkBtn.color = '0.85'\n plt.pause(0.3)\n self.axes.autoscale_view()", "def clear(self):\n\n self.index = 1\n self.degen = 1.\n self.nnnn_out = False\n self.json_out = False\n self.verbose = False\n self.ipol = 0\n self.ellip = 0.\n self.nepts = 0\n self.genfmt_order = 2\n self.genfmt_vers = \"\"\n self.exch_label = \"\"\n self.rs = 0.\n self.vint = 0.\n self.xmu = 0.\n self.edge = 0.\n self.kf = 0.\n self.rnorman = 0.\n self.gamach = 0.\n self.nepts = FEFF_maxpts\n\n dargs = dict(dtype=np.float64, order='F')\n largs = dict(dtype=np.int32, order='F')\n\n self.evec = np.zeros(3, **dargs)\n self.xivec = np.zeros(3, **dargs)\n self.ipot = np.zeros(1+FEFF_maxleg, **largs)\n self.beta = np.zeros(1+FEFF_maxleg, **dargs)\n self.eta = np.zeros(2+FEFF_maxleg, **dargs)\n self.ri = np.zeros(FEFF_maxleg, **dargs)\n self.rat = np.zeros((3, 2+FEFF_maxleg), **dargs)\n self.iz = np.zeros(1+FEFF_maxpot, **largs)\n self.kfeff = np.zeros(FEFF_maxpts, **dargs)\n self.real_phc = np.zeros(FEFF_maxpts, **dargs)\n self.mag_feff = np.zeros(FEFF_maxpts, **dargs)\n self.pha_feff = np.zeros(FEFF_maxpts, **dargs)\n self.red_fact = np.zeros(FEFF_maxpts, **dargs)\n self.lam = np.zeros(FEFF_maxpts, **dargs)\n self.rep = np.zeros(FEFF_maxpts, **dargs)\n self.nleg = 1", "def setup():\n size(SPACE['w'], SPACE['h'])\n colorMode(RGB, 1)", "def vis_mechanically_coupled_regions(img_dir,output_dir,data,dbscn_length,dbscn_min_size,display_not_save=False):\n #Read in the image that is segmented/labelled for nuclei\n img=imread(img_dir)\n\n #save plots to show clusters\n fig = plt.figure(figsize=(6, 2))\n ax0 = fig.add_subplot(131)\n ax1 = fig.add_subplot(132)\n ax3 = fig.add_subplot(133)\n #show segmented image labels\n ax0.imshow(img,aspect='auto') \n ax0.axis('off')\n #nuclear centroid color-coded by their orientation\n img1=ax1.scatter(data[\"Y\"], data[\"X\"], c=data[\"angles\"],s=1)\n ax1.set_xlim(0,img.shape[0])\n ax1.set_ylim(img.shape[1],0)\n plt.colorbar(img1)\n ax1.axis('off')\n\n # plot the cluster assignments\n img3=ax3.scatter(data[data[\"clusters\"]> -1][\"Y\"], data[data[\"clusters\"]> -1][\"X\"], \n c=data[data[\"clusters\"]> -1][\"clusters\"],cmap=\"plasma\",s=1)\n ax3.set_xlim(0,img.shape[0])\n ax3.set_ylim(img.shape[1],0)\n ax3.axis('off')\n\n #add titles\n ax0.title.set_text('Segmented Image')\n ax1.title.set_text('Filtered Orientation')\n ax3.title.set_text('Clusters')\n\n if display_not_save:\n plt.show()\n else: \n plt.savefig((output_dir+\"/\"+img_dir.rsplit('/', 1)[-1][:-4]+\"_\"+str(dbscn_length)+\"_\"+ str(dbscn_min_size)+\".png\"),dpi=600, bbox_inches = 'tight',pad_inches = 0)\n fig.clf()\n plt.close(fig)\n plt.close('all')\n \n \n del fig,ax0,ax1,ax3,img1,img3", "def clean_axes(axl):\n cleanAxes(axl)", "def setPxMode(self, b):\n self.setFlag(self.GraphicsItemFlag.ItemIgnoresTransformations, b)", "def clear(self):\n self.cmd(0x33) # $33 8-bit mode\n self.cmd(0x32) # $32 8-bit mode\n self.cmd(0x28) # $28 8-bit mode\n self.cmd(0x0C) # $0C 8-bit mode\n self.cmd(0x06) # $06 8-bit mode\n self.cmd(0x01) # $01 8-bit mode", "def _set_draw_mode(draw_mode):\n###############################################################################\n global _draw_mode\n _draw_mode = draw_mode", "def fig_craco_fiducial(outfile='fig_craco_fiducial.png',\n zmax=2.5,DMmax=2500,\n show_Macquart=False,\n log=True,\n label='$\\\\log_{10} \\; p(DM_{\\\\rm EG},z)$',\n Aconts=[0.01, 0.1, 0.5],\n cmap='jet', show=False, figsize=None,\n vmnx=(None,None),\n grid=None, survey=None):\n # Generate the grid\n if grid is None or survey is None:\n survey, grid = analy_H0_I.craco_mc_survey_grid()\n\n # Unpack\n full_zDMgrid, zvals, dmvals = grid.rates, grid.zvals, grid.dmvals\n FRBZ=survey.frbs['Z']\n FRBDM=survey.DMEGs\n \n ##### imshow of grid #######\n fsize = 14.\n plt.figure(figsize=figsize)\n ax1=plt.axes()\n plt.sca(ax1)\n \n plt.xlabel('z')\n plt.ylabel('${\\\\rm DM}_{\\\\rm EG}$')\n #plt.title(title+str(H0))\n \n # Cut down grid\n zvals, dmvals, zDMgrid = figures.proc_pgrid(\n full_zDMgrid, \n zvals, (0, zmax),\n dmvals, (0, DMmax))\n ddm=dmvals[1]-dmvals[0]\n dz=zvals[1]-zvals[0]\n nz, ndm = zDMgrid.shape\n\n # Contours\n alevels = figures.find_Alevels(full_zDMgrid, Aconts, log=True)\n \n # Ticks\n tvals, ticks = figures.ticks_pgrid(zvals)# , fmt='str4')\n plt.xticks(tvals, ticks)\n tvals, ticks = figures.ticks_pgrid(dmvals, fmt='int')# , fmt='str4')\n plt.yticks(tvals, ticks)\n\n # Image \n im=plt.imshow(zDMgrid.T,cmap=cmap,origin='lower', \n vmin=vmnx[0], vmax=vmnx[1],\n interpolation='None',\n aspect='auto')\n \n styles=['--','-.',':']\n ax=plt.gca()\n cs=ax.contour(zDMgrid.T,levels=alevels,origin='lower',colors=\"white\",linestyles=styles)\n\n ax=plt.gca()\n \n muDMhost=np.log(10**grid.state.host.lmean)\n sigmaDMhost=np.log(10**grid.state.host.lsigma)\n meanHost = np.exp(muDMhost + sigmaDMhost**2/2.)\n medianHost = np.exp(muDMhost) \n print(f\"Host: mean={meanHost}, median={medianHost}\")\n plt.ylim(0,ndm-1)\n plt.xlim(0,nz-1)\n zmax=zvals[-1]\n nz=zvals.size\n #DMbar, zeval = igm.average_DM(zmax, cumul=True, neval=nz+1)\n DM_cosmic = pcosmic.get_mean_DM(zvals, grid.state)\n\n \n #idea is that 1 point is 1, hence...\n zeval = zvals/dz\n DMEG_mean = (DM_cosmic+meanHost)/ddm\n DMEG_median = (DM_cosmic+medianHost)/ddm\n\n # Check median\n f_median = scipy.interpolate.interp1d(\n zvals, DM_cosmic+medianHost, \n fill_value='extrapolate')\n eval_DMEG = f_median(FRBZ)\n above = FRBDM > eval_DMEG\n print(f\"There are {np.sum(above)/len(FRBZ)} above the median\")\n\n if show_Macquart:\n plt.plot(zeval,DMEG_mean,color='gray',linewidth=2,\n label='Macquart relation (mean)')\n plt.plot(zeval,DMEG_median,color='gray',\n linewidth=2, ls='--',\n label='Macquart relation (median)')\n l=plt.legend(loc='lower right',fontsize=12)\n #l=plt.legend(bbox_to_anchor=(0.2, 0.8),fontsize=8)\n #for text in l.get_texts():\n #\ttext.set_color(\"white\")\n \n # limit to a reasonable range if logscale\n if log and vmnx[0] is None:\n themax=zDMgrid.max()\n themin=int(themax-4)\n themax=int(themax)\n plt.clim(themin,themax)\n \n ##### add FRB host galaxies at some DM/redshift #####\n if FRBZ is not None:\n iDMs=FRBDM/ddm\n iZ=FRBZ/dz\n # Restrict to plot range\n gd = (FRBDM < DMmax) & (FRBZ < zmax)\n plt.plot(iZ[gd],iDMs[gd],'ko',linestyle=\"\",markersize=2.)\n\n cbar=plt.colorbar(im,fraction=0.046, shrink=1.2,aspect=15,pad=0.05)\n cbar.set_label(label)\n\n fig_utils.set_fontsize(ax, fsize)\n \n plt.tight_layout()\n \n if show:\n plt.show()\n else:\n plt.savefig(outfile, dpi=300)\n print(f\"Wrote: {outfile}\")\n plt.close()", "def clear(self):\n row, col = self.selected\n if self.cubes[row][col].value == 0:\n self.cubes[row][col].set_temp(0)", "def clear_flags(self):\n self.flags.clear()", "def clear(self):\n self.SetPoint = 0.0\n\n self.PTerm = 0.0\n self.ITerm = 0.0\n self.DTerm = 0.0\n self.last_error = 0.0\n\n # Windup Guard\n self.int_error = 0.0\n self.windup_guard = 20.0\n\n self.output = 0.0", "def clear(self):\n self.SetPoint = 0.0\n\n self.PTerm = 0.0\n self.ITerm = 0.0\n self.DTerm = 0.0\n self.last_error = 0.0\n\n # Windup Guard\n self.int_error = 0.0\n self.windup_guard = 20.0\n\n self.output = 0.0", "def clear(self):\n self.SetPoint = 0.0\n\n self.PTerm = 0.0\n self.ITerm = 0.0\n self.DTerm = 0.0\n self.last_error = 0.0\n\n # Windup Guard\n self.int_error = 0.0\n self.windup_guard = 20.0\n\n self.output = 0.0", "def off(self):", "def cleanup(self):\n self.subpixel, self.pixel = self.stepup(self.subpixel, self.pixel, AxisDistance.pixelsize)\n self.pixel, self.tile = self.stepup(self.pixel, self.tile, AxisDistance.tilesize)", "def segmentNeedle(self):\n #productive #event\n profprint()\n if self.fiducialButton.isEnabled():\n print \"new checked state: \",not self.fiducialButton.checked\n self.onStartStopGivingNeedleTipsToggled(not self.fiducialButton.checked)", "def reset() -> None:\n\t_flag.clear()", "def freespaceImageAnalysis( fids, guesses = None, fit=True, bgInput=None, bgPcInput=None, shapes=[None], zeroCorrection=0, zeroCorrectionPC=0,\n keys=None, fitModule=bump, extraPicDictionaries=None, newAnnotation=False, onlyThisPic=None, pltVSize=5, \n plotSigmas=False, plotCounts=False, manualColorRange=None, calcTemperature=False, clearOutput=True, \n dataRange=None, guessTemp=10e-6, trackFitCenter=False, picsPerRep=1, startPic=0, binningParams=None, \n win=pw.PictureWindow(), transferAnalysisOpts=None, tferBinningParams=None, tferWin= pw.PictureWindow(),\n extraTferAnalysisArgs={}, emGainSetting=300, lastConditionIsBackGround=True, showTferAnalysisPlots=True,\n show2dFitsAndResiduals=True, plotFitAmps=False, indvColorRanges=False, fitF2D=gaussian_2d.f_notheta, \n rmHighCounts=True, useBase=True, weightBackgroundByLoading=True, returnPics=False, forceNoAnnotation=False):\n fids = [fids] if type(fids) == int else fids\n keys = [None for _ in fids] if keys is None else keys\n sortedStackedPics = {}\n initThresholds = [None]\n picsForBg = []\n bgWeights = []\n isAnnotatedList = []\n for filenum, fid in enumerate(fids):\n if transferAnalysisOpts is not None:\n res = ta.stage1TransferAnalysis( fid, transferAnalysisOpts, useBase=useBase, **extraTferAnalysisArgs )\n (initAtoms, tferAtoms, initAtomsPs, tferAtomsPs, key, keyName, initPicCounts, tferPicCounts, repetitions, initThresholds,\n avgPics, tferThresholds, initAtomImages, tferAtomImages, basicInfoStr, ensembleHits, groupedPostSelectedPics, isAnnotated) = res\n isAnnotatedList.append(isAnnotated)\n # assumes that you only want to look at the first condition. \n for varPics in groupedPostSelectedPics: # don't remember why 0 works if false...\n picsForBg.append(varPics[-1 if lastConditionIsBackGround else 0])\n bgWeights.append(len(varPics[0]))\n allFSIPics = [ varpics[0][startPic::picsPerRep] for varpics in groupedPostSelectedPics]\n if showTferAnalysisPlots:\n fig, axs = plt.subplots(1,2)\n mp.makeAvgPlts( axs[0], axs[1], avgPics, transferAnalysisOpts, ['r','g','b'] ) \n allFSIPics = [win.window( np.array(pics) ) for pics in allFSIPics]\n allFSIPics = ah.softwareBinning( binningParams, allFSIPics )\n elif type(fid) == int:\n ### For looking at either PGC imgs or FSI imgs \n with exp.ExpFile(fid) as file:\n # I think this only makes sense if there is a specific bg pic in the rotation\n picsForBg.append(list(file.get_pics()))\n allFSIPics = file.get_pics()[startPic::picsPerRep]\n _, key = file.get_key()\n if len(np.array(key).shape) == 2:\n key = key[:,0]\n file.get_basic_info()\n allFSIPics = win.window( allFSIPics )\n allFSIPics = ah.softwareBinning( binningParams, allFSIPics )\n allFSIPics = np.reshape( allFSIPics, (len(key), int(allFSIPics.shape[0]/len(key)), allFSIPics.shape[1], allFSIPics.shape[2]) )\n else:\n ### Assumes given pics have the same start pic and increment (picsPerRep).\n # doesn't combine well w/ transfer analysis\n picsForBg.append(fid)\n allFSIPics = fid[startPic::picsPerRep]\n print(\"Assuming input is list of all pics, then splices to get FSI pics. Old code assumed the given were FSI pics.\")\n allFSIPics = win.window( allFSIPics )\n allFSIPics = ah.softwareBinning( binningParams, allFSIPics )\n allFSIPics = np.reshape( allFSIPics, (len(key), int(allFSIPics.shape[0]/len(key)), allFSIPics.shape[1], allFSIPics.shape[2]) )\n # ##############\n if keys[filenum] is not None:\n key = keys[filenum]\n for i, keyV in enumerate(key):\n keyV = misc.round_sig_str(keyV)\n sortedStackedPics[keyV] = np.append(sortedStackedPics[keyV], allFSIPics[i],axis=0) if (keyV in sortedStackedPics) else allFSIPics[i] \n if lastConditionIsBackGround:\n bgInput, pcBgInput = getBgImgs(picsForBg, startPic = startPic, picsPerRep = picsPerRep, rmHighCounts=rmHighCounts, bgWeights=bgWeights, \n weightBackgrounds=weightBackgroundByLoading)\n elif bgInput == 'lastPic':\n bgInput, pcBgInput = getBgImgs(picsForBg, startPic = picsPerRep-1, picsPerRep = picsPerRep, rmHighCounts=rmHighCounts, bgWeights=bgWeights,\n weightBackgrounds=weightBackgroundByLoading )\n if bgInput is not None: # was broken and not working if not given bg\n bgInput = win.window(bgInput)\n bgInput = ah.softwareBinning(binningParams, bgInput)\n if bgPcInput is not None:\n bgPcInput = win.window(bgPcInput)\n bgPcInput = ah.softwareBinning(binningParams, bgPcInput) \n \n if extraPicDictionaries is not None:\n if type(extraPicDictionaries) == dict:\n extraPicDictionaries = [extraPicDictionaries]\n for dictionary in extraPicDictionaries:\n for keyV, pics in dictionary.items():\n sortedStackedPics[keyV] = (np.append(sortedStackedPics[keyV], pics,axis=0) if keyV in sortedStackedPics else pics) \n sortedStackedKeyFl = [float(keyStr) for keyStr in sortedStackedPics.keys()]\n sortedKey, sortedStackedPics = ah.applyDataRange(dataRange, sortedStackedPics, list(sorted(sortedStackedKeyFl)))\n numVars = len(sortedStackedPics.items())\n if len(np.array(shapes).shape) == 1:\n shapes = [shapes for _ in range(numVars)] \n if guesses is None:\n guesses = [[None for _ in range(4)] for _ in range(numVars)]\n if len(np.array(bgInput).shape) == 2 or bgInput == None:\n bgInput = [bgInput for _ in range(numVars)]\n if len(np.array(bgPcInput).shape) == 2 or bgPcInput == None:\n bgPcInput = [bgPcInput for _ in range(numVars)]\n \n datalen, avgFitSigmas, images, hFitParams, hFitErrs, vFitParams, vFitErrs, fitParams2D, fitErrs2D = [{} for _ in range(9)]\n titles = ['Bare', 'Photon-Count', 'Bare-mbg', 'Photon-Count-mbg']\n assert(len(sortedKey)>0)\n for vari, keyV in enumerate(sortedKey):\n keyV=misc.round_sig_str(keyV)\n if vari==0:\n initKeyv = keyV\n varPics = sortedStackedPics[keyV]\n # 0 is init atom pics for post-selection on atom number... if we wanted to.\n expansionPics = rmHighCountPics(varPics,7000) if rmHighCounts else varPics\n datalen[keyV] = len(expansionPics)\n expPhotonCountImage = photonCounting(expansionPics, 120)[0] / len(expansionPics)\n bgPhotonCountImage = np.zeros(expansionPics[0].shape) if bgPcInput[vari] is None else bgPcInput[vari]\n expAvg = np.mean(expansionPics, 0)\n bgAvg = np.zeros(expansionPics[0].shape) if (bgInput[vari] is None or len(bgInput[vari]) == 1) else bgInput[vari]\n \n if bgPhotonCountImage is None:\n print('no bg photon', expAvg.shape)\n bgPhotonCount = np.zeros(photonCountImage.shape)\n avg_mbg = expAvg - bgAvg\n avg_mbgpc = expPhotonCountImage - bgPhotonCountImage\n images[keyV] = [expAvg, expPhotonCountImage, avg_mbg, avg_mbgpc]\n hFitParams[keyV], hFitErrs[keyV], vFitParams[keyV], vFitErrs[keyV], fitParams2D[keyV], fitErrs2D[keyV] = [[] for _ in range(6)]\n for imnum, (im, guess) in enumerate(zip(images[keyV], guesses[vari])):\n if fit:\n # fancy guess_x and guess_y values use the initial fitted value, typically short time, as a guess.\n _, pictureFitParams2d, pictureFitErrors2d, v_params, v_errs, h_params, h_errs = ah.fitPic(\n im, guessSigma_x=5, guessSigma_y=5, showFit=False, \n guess_x=None if vari==0 else fitParams2D[initKeyv][imnum][1], guess_y=None if vari==0 else fitParams2D[initKeyv][imnum][2],\n fitF=fitF2D)\n fitParams2D[keyV].append(pictureFitParams2d)\n fitErrs2D[keyV].append(pictureFitErrors2d)\n hFitParams[keyV].append(h_params)\n hFitErrs[keyV].append(h_errs)\n vFitParams[keyV].append(v_params)\n vFitErrs[keyV].append(v_errs)\n # conversion from the num of pixels on the camera to microns at the focus of the tweezers\n cf = 16e-6/64\n mins, maxes = [[], []]\n imgs_ = np.array(list(images.values()))\n for imgInc in range(4):\n if indvColorRanges:\n mins.append(None)\n maxes.append(None)\n elif manualColorRange is None:\n mins.append(min(imgs_[:,imgInc].flatten()))\n maxes.append(max(imgs_[:,imgInc].flatten()))\n else:\n mins.append(manualColorRange[0])\n maxes.append(manualColorRange[1])\n numVariations = len(images)\n if onlyThisPic is None:\n fig, axs = plt.subplots(numVariations, 4, figsize=(20, pltVSize*numVariations))\n if numVariations == 1:\n axs = np.array([axs])\n bgFig, bgAxs = plt.subplots(1, 2, figsize=(20, pltVSize))\n else:\n numRows = int(np.ceil((numVariations+3)/4))\n fig, axs = plt.subplots(numRows, 4 if numVariations>1 else 3, figsize=(20, pltVSize*numRows))\n avgPicAx = axs.flatten()[-3]\n avgPicFig = fig\n bgAxs = [axs.flatten()[-1], axs.flatten()[-2]]\n bgFig = fig\n if show2dFitsAndResiduals:\n fig2d, axs2d = plt.subplots(*((2,numVariations) if numVariations>1 else (1,2)))\n keyPlt = np.zeros(len(images))\n (totalSignal, hfitCenter, hFitCenterErrs, hSigmas, hSigmaErrs, h_amp, hAmpErrs, vfitCenter, vFitCenterErrs, vSigmas, vSigmaErrs, v_amp, \n vAmpErrs, hSigma2D, hSigma2dErr, vSigma2D, vSigma2dErr) = [np.zeros((len(images), 4)) for _ in range(17)]\n \n for vari, ((keyV,ims), hParamSet, hErr_set, vParamSet, vErr_set, paramSet2D, errSet2D) in enumerate(zip(\n images.items(), *[dic.values() for dic in [hFitParams, hFitErrs, vFitParams, vFitErrs, fitParams2D, fitErrs2D]])):\n for which in range(4):\n if onlyThisPic is None:\n (im, ax, title, min_, max_, hparams, hErrs, vparams, vErrs, param2d, err2d\n ) = [obj[which] for obj in (ims, axs[vari], titles, mins, maxes, hParamSet, hErr_set, vParamSet, vErr_set, paramSet2D, errSet2D)] \n else:\n which = onlyThisPic\n ax = axs.flatten()[vari]\n (im, title, min_, max_, hparams, hErrs, vparams, vErrs, param2d, err2d\n ) = [obj[which] for obj in (ims, titles, mins, maxes, hParamSet, hErr_set, vParamSet, vErr_set, paramSet2D, errSet2D)] \n h_amp[vari][which], hfitCenter[vari][which], hSigmas[vari][which] = hparams[0], hparams[1], hparams[2]*cf*1e6\n hAmpErrs[vari][which], hFitCenterErrs[vari][which], hSigmaErrs[vari][which] = hErrs[0], hErrs[1], hErrs[2]*cf*1e6\n v_amp[vari][which], vfitCenter[vari][which], vSigmas[vari][which] = vparams[0], vparams[1], vparams[2]*cf*1e6\n vAmpErrs[vari][which], vFitCenterErrs[vari][which], vSigmaErrs[vari][which] = vErrs[0], vErrs[1], vErrs[2]*cf*1e6\n hSigma2D[vari][which], hSigma2dErr[vari][which], vSigma2D[vari][which], vSigma2dErr[vari][which] = [\n val*cf*1e6 for val in [param2d[-3], err2d[-3], param2d[-2], err2d[-2]]]\n \n totalSignal[vari][which] = np.sum(im.flatten())\n keyPlt[vari] = keyV\n res = mp.fancyImshow(fig, ax, im, imageArgs={'cmap':dark_viridis_cmap, 'vmin':min_, 'vmax':max_}, \n hFitParams=hparams, vFitParams=vparams, fitModule=fitModule, flipVAx = True, fitParams2D=param2d)\n ax.set_title(keyV + ': ' + str(datalen[keyV]) + ';\\n' + title + ': ' + misc.errString(hSigmas[vari][which],hSigmaErrs[vari][which]) \n + r'$\\mu m$ sigma, ' + misc.round_sig_str(totalSignal[vari][which],5), fontsize=12) \n if show2dFitsAndResiduals:\n X, Y = np.meshgrid(np.arange(len(im[0])), np.arange(len(im)))\n data_fitted = fitF2D((X,Y), *param2d)\n fitProper = data_fitted.reshape(im.shape[0],im.shape[1])\n ax1 = axs2d[0] if numVariations == 1 else axs2d[0,vari]\n ax2 = axs2d[1] if numVariations == 1 else axs2d[1,vari]\n imr = ax1.imshow(fitProper, vmin=min_, vmax=max_)\n mp.addAxColorbar(fig2d, ax1, imr)\n ax1.contour(np.arange(len(im[0])), np.arange(len(im)), fitProper, 4, colors='w', alpha=0.2)\n imr = ax2.imshow(fitProper-im)\n mp.addAxColorbar(fig2d, ax2, imr)\n ax2.contour(np.arange(len(im[0])), np.arange(len(im)), fitProper, 4, colors='w', alpha=0.2)\n if onlyThisPic is not None:\n break\n \n mp.fancyImshow(avgPicFig, avgPicAx, np.mean([img[onlyThisPic] for img in images.values()],axis=0), imageArgs={'cmap':dark_viridis_cmap},flipVAx = True)\n avgPicAx.set_title('Average Over Variations')\n ### Plotting background and photon counted background\n mp.fancyImshow(bgFig, bgAxs[0], bgAvg, imageArgs={'cmap':dark_viridis_cmap},flipVAx = True) \n bgAxs[0].set_title('Background image (' + str(len(picsForBg)/picsPerRep) + ')')\n mp.fancyImshow(bgFig, bgAxs[1], bgPhotonCountImage, imageArgs={'cmap':dark_viridis_cmap},flipVAx = True) \n bgAxs[1].set_title('Photon counted background image (' + str(len(picsForBg)/picsPerRep) + ')')\n fig.subplots_adjust(left=0,right=1,bottom=0.1, hspace=0.2, **({'top': 0.7, 'wspace': 0.4} if (onlyThisPic is None) else {'top': 0.9, 'wspace': 0.3}))\n \n disp.display(fig)\n temps, tempErrs, tempFitVs, = [],[],[]\n if calcTemperature: \n for sigmas, sigmaerrs in zip([hSigmas, vSigmas, hSigma2D, vSigma2D],[hSigmaErrs, vSigmaErrs, hSigma2dErr, vSigma2dErr]):\n mbgSigmas = np.array([elt[2] for elt in sigmas])\n mbgSigmaErrs = np.array([elt[2] for elt in sigmaerrs])\n myGuess = [0.0, min((mbgSigmas)*1e-6), guessTemp]\n temp, fitV, cov = ah.calcBallisticTemperature(keyPlt*1e-3, (mbgSigmas)*1e-6, guess = myGuess, sizeErrors = mbgSigmaErrs)\n error = np.sqrt(np.diag(cov))\n temps.append(temp)\n tempErrs.append(error[2])\n tempFitVs.append(fitV)\n numAxisCol = int(plotSigmas) + int(plotCounts) + int(trackFitCenter)\n if numAxisCol != 0:\n fig2, axs = plt.subplots(1, numAxisCol, figsize = (15, 5)) \n fig2.subplots_adjust(top=0.75, wspace = 0.4)\n colors = ['b','k','c','purple']\n if plotSigmas:\n ax = (axs if numAxisCol == 1 else axs[0]) \n stdStyle = dict(marker='o',linestyle='',capsize=3)\n if onlyThisPic is not None:\n ax.errorbar(keyPlt, hSigmas[:,onlyThisPic], hSigmaErrs[:,onlyThisPic], color=colors[0], label='h '+titles[onlyThisPic], **stdStyle);\n ax.errorbar(keyPlt, hSigma2D[:,onlyThisPic], hSigma2dErr[:,onlyThisPic], color=colors[1], label='2dh '+titles[onlyThisPic], **stdStyle);\n ax.errorbar(keyPlt, vSigmas[:,onlyThisPic], vSigmaErrs[:,onlyThisPic], color=colors[2], label='v '+titles[onlyThisPic], **stdStyle);\n ax.errorbar(keyPlt, vSigma2D[:,onlyThisPic], vSigma2dErr[:,onlyThisPic], color=colors[3], label='2dv '+titles[onlyThisPic], **stdStyle);\n else:\n for whichPic in range(4):\n ax.errorbar(keyPlt, hSigmas[:,whichPic], hSigmaErrs[:,whichPic], color='b', label='h '+titles[whichPic], **stdStyle);\n ax.errorbar(keyPlt, vSigmas[:,whichPic], vSigmaErrs[:,whichPic], color='c', label='v '+titles[whichPic], **stdStyle);\n ax.set_ylim(max(0,ax.get_ylim()[0]),min([ax.get_ylim()[1],5]))\n ax.set_ylabel(r'Fit Sigma ($\\mu m$)')\n \n if calcTemperature:\n # converting time to s, hSigmas in um \n xPoints = np.linspace(min(keyPlt), max(keyPlt))*1e-3\n for num, fitV in enumerate(tempFitVs):\n #ax.plot(xPoints*1e3, LargeBeamMotExpansion.f(xPoints, *myGuess)*1e6, label = 'guess')\n ax.plot(xPoints*1e3, LargeBeamMotExpansion.f(xPoints, *fitV)*1e6, color=colors[num])\n ax.legend()\n\n if plotFitAmps: \n ax = (axs if numAxisCol == 1 else axs[0])\n ampAx = ax.twinx()\n\n if onlyThisPic is not None:\n ampAx.errorbar(keyPlt, h_amp[:,onlyThisPic], hAmpErrs[:,onlyThisPic], label='h '+titles[onlyThisPic], color = 'orange', **stdStyle);\n ampAx.errorbar(keyPlt, v_amp[:,onlyThisPic], vAmpErrs[:,onlyThisPic], label='v '+titles[onlyThisPic], color = 'r', **stdStyle);\n else:\n for whichPic in range(4):\n ampAx.errorbar(keyPlt, h_amp[:,whichPic], hAmpErrs[:,whichPic], label='h '+titles[whichPic], color = 'orange', **stdStyle);\n ampAx.errorbar(keyPlt, v_amp[:,whichPic], vAmpErrs[:,whichPic], label='v '+titles[whichPic], color = 'r', **stdStyle);\n [tick.set_color('red') for tick in ampAx.yaxis.get_ticklines()]\n [tick.set_color('red') for tick in ampAx.yaxis.get_ticklabels()]\n ampAx.set_ylabel(r'Fit h_amps', color = 'r')\n \n hTotalPhotons, vTotalPhotons = None, None\n if plotCounts:\n # numAxCol = 1: ax = axs\n # numAxCol = 2: plotSigmas + plotCounts -- ax = axs[1]\n # numAxCol = 2: plotCounts + trackFitCenter -- ax = axs[0]\n # numAxCol = 3: ax = axs[1]\n if numAxisCol == 1:\n ax = axs\n elif numAxisCol == 2:\n ax = axs[1 if plotSigmas else 0]\n else:\n ax = axs[1]\n # Create axis to plot photon counts\n ax.set_ylabel(r'Integrated signal')\n photon_axis = ax.twinx()\n # This is not currently doing any correct for e.g. the loading rate.\n countToCameraPhotonEM = 0.018577 / (emGainSetting/200) # the float is is EM200. \n countToScatteredPhotonEM = 0.018577 / 0.07 / (emGainSetting/200)\n\n if onlyThisPic is not None:\n # calculate number of photons\n hamp = h_amp[:,onlyThisPic]*len(expansionPics[0][0]) # Horizontal \"un\"normalization for number of columns begin averaged.\n vamp = v_amp[:,onlyThisPic]*len(expansionPics[0]) \n hsigpx = hSigmas[:,onlyThisPic]/(16/64) # Convert from um back to to pixels.\n vsigpx = vSigmas[:,onlyThisPic]/(16/64)\n htotalCountsPerPic = bump.area_under(hamp, hsigpx)\n vtotalCountsPerPic = bump.area_under(vamp, vsigpx)\n hTotalPhotons = countToScatteredPhotonEM*htotalCountsPerPic\n vTotalPhotons = countToScatteredPhotonEM*vtotalCountsPerPic\n ax.plot(keyPlt, totalSignal[:,onlyThisPic], marker='o', linestyle='', label=titles[onlyThisPic]);\n photon_axis.plot(keyPlt, hTotalPhotons, marker='o', linestyle='', color = 'r', label='Horizontal')\n photon_axis.plot(keyPlt, vTotalPhotons, marker='o', linestyle='', color = 'orange', label='Vertical')\n else:\n for whichPic in range(4):\n # See above comments\n amp = h_amp[:,whichPic]*len(expansionPics[0][0]) \n sig = hSigmas[:,whichPic]/(16/64) \n totalCountsPerPic = bump.area_under(amp, sig)\n hTotalPhotons = countToScatteredPhotonEM*totalCountsPerPic\n ax.plot(keyPlt, totalSignal[:,whichPic], marker='o', linestyle='', label=titles[whichPic]);\n photon_axis.plot(keyPlt, hTotalPhotons, marker='o', linestyle='', color = ['red', 'orange', 'yellow', 'pink'][whichPic]) \n ax.legend()\n photon_axis.legend()\n [tick.set_color('red') for tick in photon_axis.yaxis.get_ticklines()]\n [tick.set_color('red') for tick in photon_axis.yaxis.get_ticklabels()]\n photon_axis.set_ylabel(r'Fit-Based Avg Scattered Photon/Img', color = 'r')\n if trackFitCenter:\n #numaxcol = 1: ax = axs\n #numaxcol = 2: trackfitcenter + plothSigmas: ax = axs[1]\n #numaxcol = 2: trackfitcenter + plotCounts: ax = axs[1]\n #numaxcol = 3: ax = axs[2]\n ax = (axs if numAxisCol == 1 else axs[-1])\n if onlyThisPic is not None:\n #ax.errorbar(keyPlt, hfitCenter[:,onlyThisPic], hFitCenterErrs[:,onlyThisPic], marker='o', linestyle='', capsize=3, label=titles[onlyThisPic]);\n ax.errorbar(keyPlt, vfitCenter[:,onlyThisPic], vFitCenterErrs[:,onlyThisPic], marker='o', linestyle='', capsize=3, label=titles[onlyThisPic]);\n #def accel(t, x0, a):\n # return x0 + 0.5*a*t**2\n #accelFit, AccelCov = opt.curve_fit(accel, keyPlt*1e-3, hfitCenter[:,onlyThisPic], sigma = hFitCenterErrs[:,onlyThisPic])\n #fitx = np.linspace(keyPlt[0], keyPlt[-1])*1e-3\n #fity = accel(fitx, *accelFit)\n #ax.plot(fitx*1e3, fity)\n else:\n for whichPic in range(4):\n ax.errorbar(keyPlt, hfitCenter[:,whichPic], hFitCenterErrs[:,whichPic], marker='o', linestyle='', capsize=3, label=titles[whichPic]);\n #accelErr = np.sqrt(np.diag(AccelCov))\n fig2.legend()\n ax.set_ylabel(r'Fit Centers (pix)')\n ax.set_xlabel('time (ms)')\n \n if numAxisCol != 0:\n disp.display(fig2) \n \n if not forceNoAnnotation:\n for fid, isAnnotated in zip(fids, isAnnotatedList):\n if not isAnnotated:\n if type(fid) == int or type(fid) == type(''):\n if newAnnotation or not exp.checkAnnotation(fid, force=False, quiet=True, useBase=useBase):\n exp.annotate(fid, useBase=useBase)\n if clearOutput:\n disp.clear_output()\n if calcTemperature: \n for temp, err, label in zip(temps, tempErrs, ['Hor', 'Vert', 'Hor2D', 'Vert2D']): \n print(label + ' temperature = ' + misc.errString(temp*1e6, err*1e6) + 'uk')\n\n for fid in fids:\n if type(fid) == int:\n expTitle, _, lev = exp.getAnnotation(fid)\n expTitle = ''.join('#' for _ in range(lev)) + ' File ' + str(fid) + ': ' + expTitle\n disp.display(disp.Markdown(expTitle))\n with exp.ExpFile(fid) as file:\n file.get_basic_info()\n if trackFitCenter:\n pass\n #print('Acceleration in Mpix/s^2 = ' + misc.errString(accelFit[1], accelErr[1]))\n if transferAnalysisOpts is not None and showTferAnalysisPlots:\n colors, colors2 = misc.getColors(len(transferAnalysisOpts.initLocs()) + 2)#, cmStr=dataColor)\n pltShape = (transferAnalysisOpts.initLocsIn[-1], transferAnalysisOpts.initLocsIn[-2])\n # mp.plotThresholdHists([initThresholds[0][0],initThresholds[1][0]], colors, shape=pltShape)\n mp.plotThresholdHists([initThresholds[0][0], initThresholds[0][0]], colors, shape=[1,2])\n returnDictionary = {'images':images, 'fits':hFitParams, 'errs':hFitErrs, 'hSigmas':hSigmas, 'sigmaErrors':hSigmaErrs, 'dataKey':keyPlt, \n 'hTotalPhotons':hTotalPhotons, 'tempCalc':temps, 'tempCalcErr':tempErrs, 'initThresholds':initThresholds[0], \n '2DFit':fitParams2D, '2DErr':fitErrs2D, 'bgPics':picsForBg, 'dataLength':datalen}\n if returnPics: \n returnDictionary['pics'] = sortedStackedPics\n return returnDictionary", "def test_one_center(self):\n sv=system_vars_c().init_xyzlike([ [8, [0.0, 0.0, 0.0]]])\n atom2rcut=np.array([5.0])\n g = dft.gen_grid.Grids(sv)\n g.level = 1 # precision as implemented in pyscf\n g.radi_method=leggauss_ab\n g.build(atom2rcut=atom2rcut)\n\n #print( max( np.linalg.norm(g.coords, axis=1) ) )\n #print( g.weights.sum(), 4.0 *np.pi*5.0**3 / 3.0 )\n self.assertAlmostEqual(max( np.linalg.norm(g.coords, axis=1) ), 4.9955942742763986)\n self.assertAlmostEqual(g.weights.sum(), 4.0 *np.pi*5.0**3 / 3.0)\n self.assertEqual(len(g.weights), 6248)" ]
[ "0.5788689", "0.56861943", "0.55636656", "0.54741186", "0.54616976", "0.5379296", "0.5328459", "0.5325154", "0.53217375", "0.52759594", "0.5238227", "0.52287537", "0.51961917", "0.5183579", "0.51768106", "0.51755387", "0.5134452", "0.5132298", "0.51291597", "0.5091773", "0.507863", "0.5062716", "0.50626713", "0.50611377", "0.5041864", "0.50362515", "0.50338167", "0.5023299", "0.5012111", "0.500488", "0.5003061", "0.5001117", "0.49978873", "0.4984368", "0.49780703", "0.49765906", "0.4967708", "0.49579346", "0.4955191", "0.4945549", "0.4935827", "0.49299422", "0.49242917", "0.49084103", "0.49023187", "0.48920098", "0.48881376", "0.48855007", "0.48842072", "0.4882124", "0.4882011", "0.48790082", "0.48783162", "0.48690233", "0.4865506", "0.48614988", "0.4861439", "0.48585626", "0.48479056", "0.4843599", "0.48422334", "0.48413506", "0.4836971", "0.48276988", "0.4826589", "0.4825967", "0.4820323", "0.4816191", "0.48134166", "0.48047507", "0.4798872", "0.47865376", "0.47818935", "0.4781128", "0.47693756", "0.47636947", "0.4761333", "0.47607824", "0.47602174", "0.4754133", "0.47537458", "0.4753715", "0.47501093", "0.47496614", "0.47490177", "0.47479004", "0.47474557", "0.474232", "0.47398394", "0.4739626", "0.47361323", "0.47352242", "0.47348428", "0.47348428", "0.47348428", "0.47325492", "0.47318292", "0.47287998", "0.47287625", "0.47276285", "0.47239158" ]
0.0
-1
Fail on ff.on, but still readout the arc.
def test_goto_field_boss_flat_on_fails(self): sopTester.updateModel('mcp', TestHelper.mcpState['all_off']) cmdState = self.actorState.gotoField cmdState.reinitialize(self.cmd) self.cmd.failOn = 'mcp ff.on' self._goto_field_boss(16, 71, 0, 1, cmdState, didFail=True, finish=True)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def friewallOn():\n pass", "def test_export_to_mff_no_device():\n pytest.importorskip(\"mffpy\", \"0.5.7\")\n evoked = read_evokeds_mff(egi_evoked_fname, condition=\"Category 1\")\n evoked.info[\"device_info\"] = None\n with pytest.raises(ValueError, match=\"No device type.\"):\n export_evokeds(\"output.mff\", evoked)", "def test_none_to_on_transition_no_reading():\n m = monitor.Monitor()\n t = datetime(2010, 1, 1, 0, 0)\n m.set_outside_temperature(10, t)\n assert m.temperature_update(20, t) == None\n m.boiler_on(t)\n assert m.temperature_update(22, t + timedelta(seconds=10)) == None", "async def test_set_fan_mode_bad_attr(opp):\n state = opp.states.get(ENTITY_CLIMATE)\n assert \"On High\" == state.attributes.get(ATTR_FAN_MODE)\n\n with pytest.raises(vol.Invalid):\n await common.async_set_fan_mode(opp, None, ENTITY_CLIMATE)\n await opp.async_block_till_done()\n\n state = opp.states.get(ENTITY_CLIMATE)\n assert \"On High\" == state.attributes.get(ATTR_FAN_MODE)", "def rtsOn():\n pass", "def off(self) -> None:\n ...", "def rtsOff():\n pass", "def off(self) -> None:", "def off(self):", "def ff_callback(self):\n self.rokucontrol.ff_callback()", "def testEdgeMarkerGeneric0(self):\n outname = self.prefix+'.asap'\n self.res = sdcal(infile=self.rawfile,calmode='otf',markonly=True,outfile=outname,outform='ASAP')\n refdir = self._readref( self.reffiles[0] )\n self._checkfile( outname ) \n self._checkmarker( outname, refdir )", "def test_no_alerts(fd, formatter, event):\n ao = AlertObserver(3, fd=fd, formatter=formatter, event=event)\n ao.start()\n event.run_until_wait() # no print\n event.stop()\n assert fd.getvalue() == ''", "def fainted(self):\n self.pkmn.faint()\n messages = self.effect.attemptAfterTurn(self.pkmn)\n assert messages == [], \"Should receive no messages since nothing was performed\"", "def arm(self):\n error(\"Attempted to access abstract trigger arming function.\")", "def test_read_disconnected(connection, reader, schedule, flush, loop):\n schedule(connection.connect(), connection.disconnect())\n flush()\n value = loop.run_until_complete(connection.read())\n assert not value\n assert not reader.used", "async def unexpected_ball_received(self):\n # We do nothing in that case", "def testEdgeMarkerRaster1(self):\n outname = self.prefix+'.asap'\n self.res = sdcal(infile=self.rawfile,calmode='otfraster',noff=1,markonly=True,outfile=outname,outform='ASAP')\n refdir = self._readref( self.reffiles[1] )\n self._checkfile( outname ) \n self._checkmarker( outname, refdir )", "def test_read_not_interested(self):\n try:\n self.reader.read(self.books[2], 0, 0)\n self.fail(\"Readed book not interested\")\n except AssertionError:\n pass", "def off(self):\n print(f\"RF {self.name} off\")\n self.status(False)", "def test_alert_low_doesnt_fire_twice(fd, formatter, event):\n ao = AlertObserver(3, fd=fd, formatter=formatter, event=event)\n ao.start()\n event.run_until_wait()\n for _ in range(360):\n ao.update(DummyLogRecord())\n event.run_until_wait() # 'alarm triggered'\n for _ in range(359):\n ao.update(DummyLogRecord())\n event.run_until_wait() # 'alarm recovered'\n for _ in range(359):\n ao.update(DummyLogRecord())\n event.run_until_wait() # no print\n event.stop()\n assert fd.getvalue() == 'alarm triggered\\nalarm recovered\\n'", "def _barf_if_closed(self):\n if self.closed:\n raise ValueError(\"I/O operation on closed file\")", "def testEdgeMarkerRaster0(self):\n outname = self.prefix+'.asap'\n self.res = sdcal(infile=self.rawfile,calmode='otfraster',markonly=True,outfile=outname,outform='ASAP')\n refdir = self._readref( self.reffiles[0] )\n self._checkfile( outname ) \n self._checkmarker( outname, refdir )", "def __post_init__(self):\n if self.steering == Direction.FWD:\n raise ValueError(\"Steering can't be FORWARD.\")", "def device_discovered():\n event.set()", "def test_fooreader():\n from .context import readersender\n\n fr = readersender.readers.FooReader()\n\n fr.connect()\n fr.read()\n fr.disconnect()", "def test_alert_low(fd, formatter, event):\n ao = AlertObserver(3, fd=fd, formatter=formatter, event=event)\n ao.start()\n event.run_until_wait()\n for _ in range(360):\n ao.update(DummyLogRecord())\n event.run_until_wait() # 'alarm triggered'\n for _ in range(359):\n ao.update(DummyLogRecord())\n event.run_until_wait() # 'alarm recovered'\n event.stop()\n assert fd.getvalue() == 'alarm triggered\\nalarm recovered\\n'", "def testEdgeMarkerGeneric1(self):\n outname = self.prefix+'.asap'\n self.res = sdcal(infile=self.rawfile,calmode='otf',fraction='3%',markonly=True,outfile=outname,outform='ASAP')\n refdir = self._readref( self.reffiles[1] )\n self._checkfile( outname ) \n self._checkmarker( outname, refdir )", "def notFainted(self):\n messages = self.effect.attemptAfterTurn(self.pkmn)\n assert messages == [AfterTurnEffect.message], \"Should receive messages from afterTurn function\"", "def as_of_action(self):\n return OFActionOutput(port=self.port)", "def off(self):\n self._current_stream = self._devnull", "def on(self):\n print(f\"RF {self.name} on\")\n self.status(True)", "def fftw_failed(f):\n return f", "def test_read_EOF(demo_data):\n\n openeeg = openEDF(demo_data)\n start = max(openeeg.header.samples) + 1\n arr = openeeg.read(start, start+100)\n assert arr.size == 0\n\n openeeg.close()", "def isReadError(self):\n return self.f5 is 'x'", "def test_switchoff(self):\n mic = mi.MicrophoneToText()\n\n mic.switchoff()\n\n with self.assertRaises(OSError):\n mic.stream.is_active()\n self.assertFalse(mic.switch)\n self.assertFalse(mic.audio_source.is_recording)\n self.assertTrue(mic.result.closed)", "def test_alert_high_doesnt_fire_twice(fd, formatter, event):\n ao = AlertObserver(3, fd=fd, formatter=formatter, event=event)\n ao.start()\n event.run_until_wait()\n for _ in range(360):\n ao.update(DummyLogRecord())\n event.run_until_wait() # 'alarm triggered'\n for _ in range(360):\n ao.update(DummyLogRecord())\n event.run_until_wait() # no print\n event.stop()\n assert fd.getvalue() == 'alarm triggered\\n'", "def _force_read(self, gas_measurement_enabled):\n self._set_gas_measurement(gas_measurement_enabled)\n\n temp = self._read_register_1ubyte(self.BME680_CTRL_MEAS)\n temp |= OperationModes.ForcedMode\n self._write_register(self.BME680_CTRL_MEAS, temp)\n\n while(self._get_measuring_status()):\n time.sleep(0.001)\n\n if (gas_measurement_enabled):\n while (self._get_gas_measuring_status()):\n time.sleep(0.001)", "def _off_received(self):\n self._call_subscribers(on_level=0x00)", "def r3_on_off():\n \n r3_cmd_packet = b'\\x04\\x14\\x04\\x00\\x00\\xe4\\x0f'\n ser_relay.write(r3_cmd_packet)", "def is_fcc_off(self):\n raise NotImplementedError", "def seekable(self):\n ...", "async def test_inverted_cover(hass: HomeAssistant, monkeypatch) -> None:\n config = {\n \"rflink\": {\"port\": \"/dev/ttyABC0\"},\n DOMAIN: {\n \"platform\": \"rflink\",\n \"devices\": {\n \"nonkaku_device_1\": {\n \"name\": \"nonkaku_type_standard\",\n \"type\": \"standard\",\n },\n \"nonkaku_device_2\": {\"name\": \"nonkaku_type_none\"},\n \"nonkaku_device_3\": {\n \"name\": \"nonkaku_type_inverted\",\n \"type\": \"inverted\",\n },\n \"newkaku_device_4\": {\n \"name\": \"newkaku_type_standard\",\n \"type\": \"standard\",\n },\n \"newkaku_device_5\": {\"name\": \"newkaku_type_none\"},\n \"newkaku_device_6\": {\n \"name\": \"newkaku_type_inverted\",\n \"type\": \"inverted\",\n },\n },\n },\n }\n\n # setup mocking rflink module\n event_callback, _, protocol, _ = await mock_rflink(\n hass, config, DOMAIN, monkeypatch\n )\n\n # test default state of cover loaded from config\n standard_cover = hass.states.get(f\"{DOMAIN}.nonkaku_type_standard\")\n assert standard_cover.state == STATE_CLOSED\n assert standard_cover.attributes[\"assumed_state\"]\n\n # mock incoming up command event for nonkaku_device_1\n event_callback({\"id\": \"nonkaku_device_1\", \"command\": \"up\"})\n await hass.async_block_till_done()\n\n standard_cover = hass.states.get(f\"{DOMAIN}.nonkaku_type_standard\")\n assert standard_cover.state == STATE_OPEN\n assert standard_cover.attributes.get(\"assumed_state\")\n\n # mock incoming up command event for nonkaku_device_2\n event_callback({\"id\": \"nonkaku_device_2\", \"command\": \"up\"})\n await hass.async_block_till_done()\n\n standard_cover = hass.states.get(f\"{DOMAIN}.nonkaku_type_none\")\n assert standard_cover.state == STATE_OPEN\n assert standard_cover.attributes.get(\"assumed_state\")\n\n # mock incoming up command event for nonkaku_device_3\n event_callback({\"id\": \"nonkaku_device_3\", \"command\": \"up\"})\n\n await hass.async_block_till_done()\n\n inverted_cover = hass.states.get(f\"{DOMAIN}.nonkaku_type_inverted\")\n assert inverted_cover.state == STATE_OPEN\n assert inverted_cover.attributes.get(\"assumed_state\")\n\n # mock incoming up command event for newkaku_device_4\n event_callback({\"id\": \"newkaku_device_4\", \"command\": \"up\"})\n\n await hass.async_block_till_done()\n\n inverted_cover = hass.states.get(f\"{DOMAIN}.newkaku_type_standard\")\n assert inverted_cover.state == STATE_OPEN\n assert inverted_cover.attributes.get(\"assumed_state\")\n\n # mock incoming up command event for newkaku_device_5\n event_callback({\"id\": \"newkaku_device_5\", \"command\": \"up\"})\n\n await hass.async_block_till_done()\n\n inverted_cover = hass.states.get(f\"{DOMAIN}.newkaku_type_none\")\n assert inverted_cover.state == STATE_OPEN\n assert inverted_cover.attributes.get(\"assumed_state\")\n\n # mock incoming up command event for newkaku_device_6\n event_callback({\"id\": \"newkaku_device_6\", \"command\": \"up\"})\n\n await hass.async_block_till_done()\n\n inverted_cover = hass.states.get(f\"{DOMAIN}.newkaku_type_inverted\")\n assert inverted_cover.state == STATE_OPEN\n assert inverted_cover.attributes.get(\"assumed_state\")\n\n # mock incoming down command event for nonkaku_device_1\n event_callback({\"id\": \"nonkaku_device_1\", \"command\": \"down\"})\n\n await hass.async_block_till_done()\n\n standard_cover = hass.states.get(f\"{DOMAIN}.nonkaku_type_standard\")\n assert standard_cover.state == STATE_CLOSED\n assert standard_cover.attributes.get(\"assumed_state\")\n\n # mock incoming down command event for nonkaku_device_2\n event_callback({\"id\": \"nonkaku_device_2\", \"command\": \"down\"})\n\n await hass.async_block_till_done()\n\n standard_cover = hass.states.get(f\"{DOMAIN}.nonkaku_type_none\")\n assert standard_cover.state == STATE_CLOSED\n assert standard_cover.attributes.get(\"assumed_state\")\n\n # mock incoming down command event for nonkaku_device_3\n event_callback({\"id\": \"nonkaku_device_3\", \"command\": \"down\"})\n\n await hass.async_block_till_done()\n\n inverted_cover = hass.states.get(f\"{DOMAIN}.nonkaku_type_inverted\")\n assert inverted_cover.state == STATE_CLOSED\n assert inverted_cover.attributes.get(\"assumed_state\")\n\n # mock incoming down command event for newkaku_device_4\n event_callback({\"id\": \"newkaku_device_4\", \"command\": \"down\"})\n\n await hass.async_block_till_done()\n\n inverted_cover = hass.states.get(f\"{DOMAIN}.newkaku_type_standard\")\n assert inverted_cover.state == STATE_CLOSED\n assert inverted_cover.attributes.get(\"assumed_state\")\n\n # mock incoming down command event for newkaku_device_5\n event_callback({\"id\": \"newkaku_device_5\", \"command\": \"down\"})\n\n await hass.async_block_till_done()\n\n inverted_cover = hass.states.get(f\"{DOMAIN}.newkaku_type_none\")\n assert inverted_cover.state == STATE_CLOSED\n assert inverted_cover.attributes.get(\"assumed_state\")\n\n # mock incoming down command event for newkaku_device_6\n event_callback({\"id\": \"newkaku_device_6\", \"command\": \"down\"})\n\n await hass.async_block_till_done()\n\n inverted_cover = hass.states.get(f\"{DOMAIN}.newkaku_type_inverted\")\n assert inverted_cover.state == STATE_CLOSED\n assert inverted_cover.attributes.get(\"assumed_state\")\n\n # We are only testing the 'inverted' devices, the 'standard' devices\n # are already covered by other test cases.\n\n # should respond to group command\n event_callback({\"id\": \"nonkaku_device_3\", \"command\": \"alloff\"})\n\n await hass.async_block_till_done()\n\n inverted_cover = hass.states.get(f\"{DOMAIN}.nonkaku_type_inverted\")\n assert inverted_cover.state == STATE_CLOSED\n\n # should respond to group command\n event_callback({\"id\": \"nonkaku_device_3\", \"command\": \"allon\"})\n\n await hass.async_block_till_done()\n\n inverted_cover = hass.states.get(f\"{DOMAIN}.nonkaku_type_inverted\")\n assert inverted_cover.state == STATE_OPEN\n\n # should respond to group command\n event_callback({\"id\": \"newkaku_device_4\", \"command\": \"alloff\"})\n\n await hass.async_block_till_done()\n\n inverted_cover = hass.states.get(f\"{DOMAIN}.newkaku_type_standard\")\n assert inverted_cover.state == STATE_CLOSED\n\n # should respond to group command\n event_callback({\"id\": \"newkaku_device_4\", \"command\": \"allon\"})\n\n await hass.async_block_till_done()\n\n inverted_cover = hass.states.get(f\"{DOMAIN}.newkaku_type_standard\")\n assert inverted_cover.state == STATE_OPEN\n\n # should respond to group command\n event_callback({\"id\": \"newkaku_device_5\", \"command\": \"alloff\"})\n\n await hass.async_block_till_done()\n\n inverted_cover = hass.states.get(f\"{DOMAIN}.newkaku_type_none\")\n assert inverted_cover.state == STATE_CLOSED\n\n # should respond to group command\n event_callback({\"id\": \"newkaku_device_5\", \"command\": \"allon\"})\n\n await hass.async_block_till_done()\n\n inverted_cover = hass.states.get(f\"{DOMAIN}.newkaku_type_none\")\n assert inverted_cover.state == STATE_OPEN\n\n # should respond to group command\n event_callback({\"id\": \"newkaku_device_6\", \"command\": \"alloff\"})\n\n await hass.async_block_till_done()\n\n inverted_cover = hass.states.get(f\"{DOMAIN}.newkaku_type_inverted\")\n assert inverted_cover.state == STATE_CLOSED\n\n # should respond to group command\n event_callback({\"id\": \"newkaku_device_6\", \"command\": \"allon\"})\n\n await hass.async_block_till_done()\n\n inverted_cover = hass.states.get(f\"{DOMAIN}.newkaku_type_inverted\")\n assert inverted_cover.state == STATE_OPEN\n\n # Sending the close command from HA should result\n # in an 'DOWN' command sent to a non-newkaku device\n # that has its type set to 'standard'.\n await hass.services.async_call(\n DOMAIN,\n SERVICE_CLOSE_COVER,\n {ATTR_ENTITY_ID: f\"{DOMAIN}.nonkaku_type_standard\"},\n )\n\n await hass.async_block_till_done()\n\n assert hass.states.get(f\"{DOMAIN}.nonkaku_type_standard\").state == STATE_CLOSED\n assert protocol.send_command_ack.call_args_list[0][0][0] == \"nonkaku_device_1\"\n assert protocol.send_command_ack.call_args_list[0][0][1] == \"DOWN\"\n\n # Sending the open command from HA should result\n # in an 'UP' command sent to a non-newkaku device\n # that has its type set to 'standard'.\n await hass.services.async_call(\n DOMAIN,\n SERVICE_OPEN_COVER,\n {ATTR_ENTITY_ID: f\"{DOMAIN}.nonkaku_type_standard\"},\n )\n\n await hass.async_block_till_done()\n\n assert hass.states.get(f\"{DOMAIN}.nonkaku_type_standard\").state == STATE_OPEN\n assert protocol.send_command_ack.call_args_list[1][0][0] == \"nonkaku_device_1\"\n assert protocol.send_command_ack.call_args_list[1][0][1] == \"UP\"\n\n # Sending the close command from HA should result\n # in an 'DOWN' command sent to a non-newkaku device\n # that has its type not specified.\n await hass.services.async_call(\n DOMAIN, SERVICE_CLOSE_COVER, {ATTR_ENTITY_ID: f\"{DOMAIN}.nonkaku_type_none\"}\n )\n\n await hass.async_block_till_done()\n\n assert hass.states.get(f\"{DOMAIN}.nonkaku_type_none\").state == STATE_CLOSED\n assert protocol.send_command_ack.call_args_list[2][0][0] == \"nonkaku_device_2\"\n assert protocol.send_command_ack.call_args_list[2][0][1] == \"DOWN\"\n\n # Sending the open command from HA should result\n # in an 'UP' command sent to a non-newkaku device\n # that has its type not specified.\n await hass.services.async_call(\n DOMAIN, SERVICE_OPEN_COVER, {ATTR_ENTITY_ID: f\"{DOMAIN}.nonkaku_type_none\"}\n )\n\n await hass.async_block_till_done()\n\n assert hass.states.get(f\"{DOMAIN}.nonkaku_type_none\").state == STATE_OPEN\n assert protocol.send_command_ack.call_args_list[3][0][0] == \"nonkaku_device_2\"\n assert protocol.send_command_ack.call_args_list[3][0][1] == \"UP\"\n\n # Sending the close command from HA should result\n # in an 'UP' command sent to a non-newkaku device\n # that has its type set to 'inverted'.\n await hass.services.async_call(\n DOMAIN,\n SERVICE_CLOSE_COVER,\n {ATTR_ENTITY_ID: f\"{DOMAIN}.nonkaku_type_inverted\"},\n )\n\n await hass.async_block_till_done()\n\n assert hass.states.get(f\"{DOMAIN}.nonkaku_type_inverted\").state == STATE_CLOSED\n assert protocol.send_command_ack.call_args_list[4][0][0] == \"nonkaku_device_3\"\n assert protocol.send_command_ack.call_args_list[4][0][1] == \"UP\"\n\n # Sending the open command from HA should result\n # in an 'DOWN' command sent to a non-newkaku device\n # that has its type set to 'inverted'.\n await hass.services.async_call(\n DOMAIN,\n SERVICE_OPEN_COVER,\n {ATTR_ENTITY_ID: f\"{DOMAIN}.nonkaku_type_inverted\"},\n )\n\n await hass.async_block_till_done()\n\n assert hass.states.get(f\"{DOMAIN}.nonkaku_type_inverted\").state == STATE_OPEN\n assert protocol.send_command_ack.call_args_list[5][0][0] == \"nonkaku_device_3\"\n assert protocol.send_command_ack.call_args_list[5][0][1] == \"DOWN\"\n\n # Sending the close command from HA should result\n # in an 'DOWN' command sent to a newkaku device\n # that has its type set to 'standard'.\n await hass.services.async_call(\n DOMAIN,\n SERVICE_CLOSE_COVER,\n {ATTR_ENTITY_ID: f\"{DOMAIN}.newkaku_type_standard\"},\n )\n\n await hass.async_block_till_done()\n\n assert hass.states.get(f\"{DOMAIN}.newkaku_type_standard\").state == STATE_CLOSED\n assert protocol.send_command_ack.call_args_list[6][0][0] == \"newkaku_device_4\"\n assert protocol.send_command_ack.call_args_list[6][0][1] == \"DOWN\"\n\n # Sending the open command from HA should result\n # in an 'UP' command sent to a newkaku device\n # that has its type set to 'standard'.\n await hass.services.async_call(\n DOMAIN,\n SERVICE_OPEN_COVER,\n {ATTR_ENTITY_ID: f\"{DOMAIN}.newkaku_type_standard\"},\n )\n\n await hass.async_block_till_done()\n\n assert hass.states.get(f\"{DOMAIN}.newkaku_type_standard\").state == STATE_OPEN\n assert protocol.send_command_ack.call_args_list[7][0][0] == \"newkaku_device_4\"\n assert protocol.send_command_ack.call_args_list[7][0][1] == \"UP\"\n\n # Sending the close command from HA should result\n # in an 'UP' command sent to a newkaku device\n # that has its type not specified.\n await hass.services.async_call(\n DOMAIN, SERVICE_CLOSE_COVER, {ATTR_ENTITY_ID: f\"{DOMAIN}.newkaku_type_none\"}\n )\n\n await hass.async_block_till_done()\n\n assert hass.states.get(f\"{DOMAIN}.newkaku_type_none\").state == STATE_CLOSED\n assert protocol.send_command_ack.call_args_list[8][0][0] == \"newkaku_device_5\"\n assert protocol.send_command_ack.call_args_list[8][0][1] == \"UP\"\n\n # Sending the open command from HA should result\n # in an 'DOWN' command sent to a newkaku device\n # that has its type not specified.\n await hass.services.async_call(\n DOMAIN, SERVICE_OPEN_COVER, {ATTR_ENTITY_ID: f\"{DOMAIN}.newkaku_type_none\"}\n )\n\n await hass.async_block_till_done()\n\n assert hass.states.get(f\"{DOMAIN}.newkaku_type_none\").state == STATE_OPEN\n assert protocol.send_command_ack.call_args_list[9][0][0] == \"newkaku_device_5\"\n assert protocol.send_command_ack.call_args_list[9][0][1] == \"DOWN\"\n\n # Sending the close command from HA should result\n # in an 'UP' command sent to a newkaku device\n # that has its type set to 'inverted'.\n await hass.services.async_call(\n DOMAIN,\n SERVICE_CLOSE_COVER,\n {ATTR_ENTITY_ID: f\"{DOMAIN}.newkaku_type_inverted\"},\n )\n\n await hass.async_block_till_done()\n\n assert hass.states.get(f\"{DOMAIN}.newkaku_type_inverted\").state == STATE_CLOSED\n assert protocol.send_command_ack.call_args_list[10][0][0] == \"newkaku_device_6\"\n assert protocol.send_command_ack.call_args_list[10][0][1] == \"UP\"\n\n # Sending the open command from HA should result\n # in an 'DOWN' command sent to a newkaku device\n # that has its type set to 'inverted'.\n await hass.services.async_call(\n DOMAIN,\n SERVICE_OPEN_COVER,\n {ATTR_ENTITY_ID: f\"{DOMAIN}.newkaku_type_inverted\"},\n )\n\n await hass.async_block_till_done()\n\n assert hass.states.get(f\"{DOMAIN}.newkaku_type_inverted\").state == STATE_OPEN\n assert protocol.send_command_ack.call_args_list[11][0][0] == \"newkaku_device_6\"\n assert protocol.send_command_ack.call_args_list[11][0][1] == \"DOWN\"", "def on(self) -> None:", "def go_infFD(self):\n\n response = self.send_lens_cmd(['05', '00', '00', '00'], fast_mode=True)\n self.wait_focus_move()", "def off(self):\n raise NotImplementedError", "async def test_fan_read_state(hass: HomeAssistant, utcnow) -> None:\n helper = await setup_test_component(hass, create_fan_service)\n\n state = await helper.async_update(\n ServicesTypes.FAN, {CharacteristicsTypes.ON: False}\n )\n assert state.state == \"off\"\n\n state = await helper.async_update(\n ServicesTypes.FAN, {CharacteristicsTypes.ON: True}\n )\n assert state.state == \"on\"", "def is_recoverable(self):\n return self.__is_recoverable", "def end_read():\n global ov_read\n ov_read = False\n GPIO.cleanup()", "def test_fas():\n ddir = os.path.join(\"data\", \"testdata\")\n datadir = pkg_resources.resource_filename(\"gmprocess\", ddir)\n fas_file = os.path.join(datadir, \"fas_geometric_mean.pkl\")\n p1 = os.path.join(datadir, \"peer\", \"RSN763_LOMAP_GIL067.AT2\")\n p2 = os.path.join(datadir, \"peer\", \"RSN763_LOMAP_GIL337.AT2\")\n\n stream = StationStream([])\n for idx, fpath in enumerate([p1, p2]):\n with open(fpath, encoding=\"utf-8\") as file_obj:\n for _ in range(3):\n next(file_obj)\n meta = re.findall(r\"[.0-9]+\", next(file_obj))\n dt = float(meta[1])\n accels = np.array(\n [col for line in file_obj for col in line.split()], dtype=float\n )\n trace = StationTrace(\n data=accels,\n header={\n \"channel\": \"H\" + str(idx),\n \"delta\": dt,\n \"standard\": {\n \"corner_frequency\": np.nan,\n \"station_name\": \"\",\n \"source\": \"json\",\n \"instrument\": \"\",\n \"instrument_period\": np.nan,\n \"source_format\": \"json\",\n \"comments\": \"\",\n \"structure_type\": \"\",\n \"sensor_serial_number\": \"\",\n \"source_file\": \"\",\n \"process_level\": \"raw counts\",\n \"process_time\": \"\",\n \"horizontal_orientation\": np.nan,\n \"vertical_orientation\": np.nan,\n \"units\": \"cm/s/s\",\n \"units_type\": \"acc\",\n \"instrument_sensitivity\": np.nan,\n \"instrument_damping\": np.nan,\n },\n },\n )\n stream.append(trace)\n\n for tr in stream:\n response = {\"input_units\": \"counts\", \"output_units\": \"cm/s^2\"}\n tr.setProvenance(\"remove_response\", response)\n\n target_df = pd.read_pickle(fas_file)\n ind_vals = target_df.index.values\n per = np.unique([float(i[0].split(\")\")[0].split(\"(\")[1]) for i in ind_vals])\n freqs = 1 / per\n imts = [\"fas\" + str(p) for p in per]\n summary = StationSummary.from_stream(stream, [\"geometric_mean\"], imts, bandwidth=30)\n\n pgms = summary.pgms\n # pgms.to_pickle(fas_file)\n for idx, f in enumerate(freqs):\n fstr = f\"FAS({1 / f:.3f})\"\n fval1 = pgms.loc[fstr, \"GEOMETRIC_MEAN\"].Result\n fval2 = target_df.loc[fstr, \"GEOMETRIC_MEAN\"].Result\n np.testing.assert_allclose(fval1, fval2, rtol=1e-5, atol=1e-5)", "def arc_set_complete(self, *args, **kwardgs):\n\n super(WeldTask, self).play()", "def tell(self):\n ...", "def test_recover(self):\n # Should work\n A = ConwayAgent(23, True)\n A.infect()\n A.recover()\n self.assertTrue(A.status() == 3)\n\n # Should do nothing\n A.reset()\n A.recover()\n self.assertTrue(A.status() == 1)\n\n # Should also do nothing\n A.infect()\n A.kill()\n A.recover()\n self.assertTrue(A.status() == 0)", "def test_verify_state_of_a_device_when_disconnected_from_the_device():", "def wantsReadEvent(self):\r\n if self.result != None:\r\n return self.result == 0\r\n return None", "def on(self):\n status = self.dev.ctrl_transfer(0x40, 0x01, 0x0001, 0xa0, [])\n if status == 0:\n self.ev.set()\n return (status == 0)", "def fAT(self):\n pass", "def ready():\n\tsh('c')\n\td1out()\n\td2out()\n\td3out()\n\t#marAuxiliary.openMarShield()\t\t# N.b. if mar disconnected, will just do nothing", "def test_writer_wrong():\n GCMT(write=2)\n assert not Writer.on", "def error_out(self) -> bool:\n return self._action == 'error'", "def stopReading(self):\n self.reading = False", "def at_aflight(seq, flag):\n at(\"AFLIGHT\", seq, [flag])", "def on(self):", "async def test_turn_off(hass: HomeAssistant, utcnow) -> None:\n helper = await setup_test_component(hass, create_fan_service)\n\n await helper.async_update(ServicesTypes.FAN, {CharacteristicsTypes.ON: 1})\n\n await hass.services.async_call(\n \"fan\",\n \"turn_off\",\n {\"entity_id\": \"fan.testdevice\"},\n blocking=True,\n )\n helper.async_assert_service_values(\n ServicesTypes.FAN,\n {\n CharacteristicsTypes.ON: 0,\n },\n )", "def probe(self):\n return False", "def error(err):\n print(\"ERROR: \" + err)\n avrprog.end()\n while True:\n pass", "def test_afni_nonvolume():\n im = ndar.Image('test_data/10_425-02_li1_146.png')\n nose.tools.assert_raises(AttributeError, lambda: im.afni)", "def unknown_departure(self, node):\n pass", "async def test_write_acn_error():\n f = asyncio.Future()\n f.set_result(None)\n pipe = Mock()\n pipe.write.return_value = f\n node_client = NodeClient(pipe, Mock())\n await node_client.write_acn_status_error(\"some message\")\n pipe.write.assert_called_once()", "async def expected_ball_received(self):\n # We do nothing in that case", "def test_wrong_ann_features():\n my_file_struct = FileStruct(os.path.join(\"fixtures\", \"chirp.mp3\"))\n my_file_struct.features_file = os.path.join(\"features\", \"no_file.json\")\n cqt = CQT(my_file_struct, FeatureTypes.ann_beatsync, sr=11025)\n with raises(FeatureTypeNotFound):\n cqt.features", "def check_result(ec):\r\n # NOTE: This will break some oscilloscopes that are powered by USB.\r\n # Some of the newer scopes, can actually be powered by USB and will\r\n # return a useful value. That should be given back to the user.\r\n # I guess we can deal with these edge cases in the functions themselves\r\n if ec == 0:\r\n return\r\n\r\n else:\r\n ecName = error_num_to_name(ec)\r\n ecDesc = error_num_to_desc(ec)\r\n raise IOError('Error calling %s: %s (%s)' % (\r\n str(inspect.stack()[1][3]), ecName, ecDesc))", "def onDone(self):\n pass", "def safe_to_dance(self):\n #check for all fil/early-termination conditions\n for _ in range(4):\n if self.read_distance() < 300:\n print(\"not safe to dance!\")\n return False\n else:\n self.turn_by_deg(90)\n #after all checks have been done, we deduce its safe to dance\n print(\"Dance on!\")\n return True", "def test_invalid_flag_record(self):\n log.info(\"START QUAL TEST INVALID FLAG RECORD\")\n\n # Made-up data with all flags except the first set to True.\n # First flag is not a zero or one.\n self.clear_sample_data()\n self.event_subscribers.clear_events()\n self.assert_initialize()\n self.create_sample_data('invalid_A0000003.DEC', \"A1000003.DEC\")\n\n # Verify an event was raised and we are in our retry state.\n self.verify_queue_empty()\n self.assert_event_received(ResourceAgentErrorEvent, 10)\n self.assert_state_change(ResourceAgentState.STREAMING, 10)\n\n log.info(\"END QUAL TEST INVALID FLAG RECORD\")", "def ultrasonicChecker() -> None:\n ...", "def test_read_one_signal_failure(self):\n cwd = os.path.dirname(os.path.abspath(__file__))\n dir1 = os.path.join(cwd, 'test_files/')\n dir2 = '/home/tomas/here-be-dragons/'\n signal1 = read_signal(dir1, 'here-be-dragons')\n signal2 = read_signal(dir2, 'test3')\n self.assertIsNone(signal1)\n self.assertIsNone(signal2)", "def test_fp_elf(elf, expected):\n elf_filename = os.path.join(elf_dir, elf)\n epiphany = Epiphany()\n with open(elf_filename, 'rb') as elf:\n epiphany.init_state(elf, elf_filename, '', [], False, is_test=True)\n epiphany.max_insts = 10000\n epiphany.run()\n expected.fp_check(epiphany.state)", "def event_m10_29_x39(flag1=105405, z15=10292020, z16=10292010, z17=100001, z18=100002, z21=10040000,\r\n z22=10320000, flag2=129010024):\r\n \"\"\"State 0,1: Is the destination door open?\"\"\"\r\n if GetEventFlag(flag2) != 0:\r\n pass\r\n else:\r\n Goto('L0')\r\n \"\"\"State 2: Intrusion MAP determination\"\"\"\r\n CompareEventFlag(0, z15, 1)\r\n if GetEventFlag(flag1) != 1:\r\n \"\"\"State 4: Wait for completion of reading Madura MAP\"\"\"\r\n IsMapReadAndBackreadStable(0, z21, 1)\r\n assert ConditionGroup(0)\r\n \"\"\"State 5: Open the Madura side door\"\"\"\r\n ChangeObjState(z15, 30)\r\n \"\"\"State 7: Made the Madura side door mesh mesh accessible\"\"\"\r\n DeleteNavimeshAttribute(z17, 2)\r\n else:\r\n \"\"\"State 3: Waiting for completion of reading of the Shadow Shadow Forest MAP\"\"\"\r\n IsMapReadAndBackreadStable(0, z22, 1)\r\n assert ConditionGroup(0)\r\n \"\"\"State 6: Open the door on the forest side of the hollow shadow\"\"\"\r\n ChangeObjState(z16, 30)\r\n \"\"\"State 8: Navimesh of the door on the forest side of the empty shadow is ready to enter\"\"\"\r\n DeleteNavimeshAttribute(z18, 2)\r\n \"\"\"State 9: Destination door opening complete\"\"\"\r\n return 0\r\n \"\"\"State 10: Destination door opening not completed\"\"\"\r\n Label('L0')\r\n return 1", "def after(self):\n return self.marker_seen and not self.throwaway", "def event_m20_11_3020():\n \"\"\"State 0,2: [Preset] Beautiful frog singing voice_flag_SubState\"\"\"\n assert event_m20_11_x110(z24=211000081, z25=802, z26=211020082)\n \"\"\"State 1: Finish\"\"\"\n EndMachine()", "def test_read_foreign_device_table_ack(self):\n if _debug: TestAnnexJCodec._debug(\"test_read_foreign_device_table_ack\")\n\n # read returns an empty table\n pdu_bytes = xtob('81.07.0004')\n\n self.request(ReadForeignDeviceTableAck([]))\n self.indication(pduData=pdu_bytes)\n\n self.response(PDU(pdu_bytes))\n self.confirmation(ReadForeignDeviceTableAck, bvlciFDT=[])\n\n # read returns a table with one entry\n fdte = FDTEntry()\n fdte.fdAddress = Address(\"192.168.0.10\")\n fdte.fdTTL = 30\n fdte.fdRemain = 15\n pdu_bytes = xtob('81.07.000e' # bvlci\n 'c0.a8.00.0a.ba.c0' # address\n '001e.000f' # ttl and remaining\n )\n\n self.request(ReadForeignDeviceTableAck([fdte]))\n self.indication(pduData=pdu_bytes)\n\n self.response(PDU(pdu_bytes))\n self.confirmation(ReadForeignDeviceTableAck, bvlciFDT=[fdte])", "def mark_done(cf):\n\n with open(cf) as f:\n hesh = hashlib.sha256(bytes(f.read(), 'utf-8')).hexdigest()\n with open('done_log', 'a') as f:\n f.write('{}\\t{}\\n'.format(cf, hesh))", "def onError(self, connection:MQTTConnection, rc:int) -> bool:\n\t\treturn True", "def r2_on_off():\n \n r2_cmd_packet = b'\\x04\\x14\\x02\\x00\\x00\\xe6\\x0f'\n ser_relay.write(r2_cmd_packet)", "def on(self) -> None:\n ...", "def off(self):\n status = self.dev.ctrl_transfer(0x40, 0x01, 0x0001, 0x20, [])\n if status == 0:\n self.ev.clear()\n return (status == 0)", "def feed(self):\n # or intelligence discard\n pass", "def test_is_affordable_off_chain(self):\n tx_message = TransactionMessage(\n performative=TransactionMessage.Performative.PROPOSE_FOR_SETTLEMENT,\n skill_callback_ids=[PublicId(\"author\", \"a_skill\", \"0.1.0\")],\n tx_id=self.tx_id,\n tx_sender_addr=self.tx_sender_addr,\n tx_counterparty_addr=self.tx_counterparty_addr,\n tx_amount_by_currency_id={\"FET\": -20},\n tx_sender_fee=0,\n tx_counterparty_fee=0,\n tx_quantities_by_good_id={\"good_id\": 10},\n ledger_id=\"off_chain\",\n info=self.info,\n tx_nonce=\"Transaction nonce\",\n )\n\n assert self.decision_maker._is_affordable(tx_message)", "def _read_no_check(self):\n on_data_fut = _async.Future(self._loop)\n def on_data():\n on_data_fut.set_result(None)\n self._loop.add_reader(self._channel.fileno(), on_data)\n\n try:\n yield From(on_data_fut)\n finally:\n self._loop.remove_reader(self._channel.fileno())\n\n d = self._channel.recv(_MAX_READ_AMOUNT).decode()\n if d == '':\n logger.debug(\"{}: Read returned {!r}\".format(self, d))\n raise TransportNotConnected\n\n logger.debug(\"{}: Read {!r}\".format(self, d))\n if d.endswith(\"\\r\\n\"):\n d = d[:-2] + \"\\n\"\n d = d.encode('ascii')\n\n raise Return(d)", "def test_fileDoesNotExist(self):\n fp = FilePath(self.mktemp())\n protocol = self.makeConnectedDccFileReceive(fp.path)\n\n self.allDataReceivedForProtocol(protocol, b\"I <3 Twisted\")\n\n self.assertEqual(fp.getContent(), b\"I <3 Twisted\")", "def test_export_to_mff_incompatible_sfreq():\n pytest.importorskip(\"mffpy\", \"0.5.7\")\n evoked = read_evokeds(fname_evoked)\n with pytest.raises(ValueError, match=f'sfreq: {evoked[0].info[\"sfreq\"]}'):\n export_evokeds(\"output.mff\", evoked)", "def test_fastaqual_output(self):\r\n self.assertRaises(IOError, convert_fastaqual, self.fasta_file_path,\r\n output_directory=self.read_only_output_dir)", "def emergency_recover_states_from_failure():\n _emergency_state_check()\n _emergency_iobuf_extract()", "def test_afni_fail():\n im = ndar.Image('test_data/bogus.mnc')\n nose.tools.assert_raises(AttributeError, lambda: im.afni)", "def event_m20_11_x84(z44=20111330):\n \"\"\"State 0,1: Wait for branch destruction\"\"\"\n IsObjBroken(0, z44, 1)\n assert ConditionGroup(0)\n \"\"\"State 2: End state\"\"\"\n return 0", "def test_goto_field_boss_ffs_open_fails(self):\n sopTester.updateModel('mcp', TestHelper.mcpState['all_off'])\n cmdState = self.actorState.gotoField\n cmdState.reinitialize(self.cmd)\n self.cmd.failOn = 'mcp ffs.open'\n self._goto_field_boss(21, 102, 1, 1, cmdState, didFail=True, finish=True)", "def test_handle_flag_error(self):\n pass", "def connectionTest(self):\n\n print(\"Turn on the Tiva\")\n g_byte = self.ser.read(1)\n o_byte = self.ser.read(1)\n\n if (g_byte.decode(\"ascii\") != \"G\") | (o_byte.decode(\"ascii\") != \"O\"):\n print(\"Unsuccessful Serial Connection to Tiva, Try Again\")\n else:\n print(\"Successful Serial Connection to Tiva\")", "def test_read_before_connected(connection, reader, loop):\n value = loop.run_until_complete(connection.read())\n assert not value\n assert not reader.used", "def eof(self):\n try:\n next_line = self.read_pkt_line()\n except HangupException:\n return True\n self.unread_pkt_line(next_line)\n return False" ]
[ "0.53135496", "0.530685", "0.5285612", "0.5125348", "0.51056355", "0.50907296", "0.50692505", "0.50690484", "0.4983307", "0.49766693", "0.4912766", "0.4895812", "0.48652288", "0.48498195", "0.48497722", "0.48134395", "0.4810314", "0.4796255", "0.47883302", "0.47843057", "0.4778215", "0.47579592", "0.4754952", "0.4753049", "0.47379392", "0.47262964", "0.47199935", "0.47079217", "0.47061372", "0.47047815", "0.46931088", "0.46908864", "0.46880132", "0.4687745", "0.46690676", "0.4654544", "0.4644666", "0.4640219", "0.4640051", "0.46346563", "0.46288723", "0.4624529", "0.46156088", "0.46131313", "0.46039304", "0.4602639", "0.45965955", "0.45952547", "0.45856345", "0.4580515", "0.45774114", "0.45696607", "0.4568556", "0.45683652", "0.45454282", "0.45399395", "0.45249984", "0.45239082", "0.45231768", "0.4520772", "0.45183682", "0.4503348", "0.4490995", "0.4488729", "0.44723383", "0.44720563", "0.4468204", "0.44625813", "0.446122", "0.44582427", "0.44578052", "0.4451904", "0.44389504", "0.4437572", "0.44323114", "0.44319713", "0.4428154", "0.44277015", "0.44255772", "0.44254854", "0.4425484", "0.4425338", "0.4421694", "0.44124645", "0.44053817", "0.4402132", "0.43965948", "0.43956834", "0.4387314", "0.43778116", "0.4372253", "0.43719193", "0.43677345", "0.4367365", "0.43663144", "0.43647474", "0.43646795", "0.43629315", "0.43576276", "0.43539232" ]
0.51799893
3
Hartmann succeeds but the blue ring move is out of tolerance.
def test_goto_field_boss_hartmann_blue_fails(self): sopTester.updateModel('mcp', TestHelper.mcpState['all_off']) sopTester.updateModel('hartmann', TestHelper.hartmannState['blue_fails']) cmdState = self.actorState.gotoField cmdState.reinitialize(self.cmd) self._goto_field_boss(12, 37, 0, 0, cmdState, didFail=True, finish=True)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_W_end(self):\t\t\n self.assertAlmostEqual(attempt.W[-1], 9.494852380803035)", "def steadyYet(newg, oldg, newe, olde, newh, oldh, newf, oldf, tolerance):\n steady_yet = True\n if oldg == 0 or (abs(newg-oldg)/oldg > tolerance or\n abs(newe-olde)/olde > tolerance or\n abs(newh-oldh)/oldh > tolerance or\n abs(newf-oldf)/oldf > tolerance):\n steady_yet = False\n return steady_yet", "def test_Z_start(self):\t\t\n self.assertAlmostEqual(attempt.Z[0], 40)", "def explicit_half_kick(self):\n rejected = False\n momentum = self.momentum.clone()\n old_momentum = momentum.clone()\n hamiltonian = self.get_hamiltonian()\n d_hamiltonian = torch.autograd.grad(hamiltonian, self.parameters, retain_graph=False)[0]\n momentum -= 0.5 * self.stepsize * d_hamiltonian\n if self._is_nanned(self.momentum):\n rejected = True\n self.momentum = old_momentum\n# if self.verbose:\n print('Nanned during explit momentum update {}'.format(iter))\n return momentum, rejected", "def test_full_chelyabinsk(self, tolerance=0.1):\n conditions = [8.5, 19.2e3, 3300, 4e6, 18.3*np.pi/180]\n system = asteroid(*conditions)\n system.solve_ode()\n peak = [system.burst['height (km)'], system.burst['ke Lost (kt/km)']]\n peak = np.array(peak)\n\n obs_peak = np.array([29.5578, 81.505])\n diff = abs(peak - obs_peak)\n assert np.all(diff < obs_peak * tolerance), \\\n \"chelyabinsk simulation does not match imperical data\"", "def _hill_diff(self, position):\n if position < 0:\n return 2 * position + 1\n else:\n return (1/math.sqrt(1 + 5 * position ** 2)\n - 5 * position ** 2 * (1 + 5 * position ** 2)**-1.5)", "def test_phantom_roll(self):\n self.assertAlmostEqual(self.cheese.catphan_roll, self.expected_roll, delta=0.3)", "def test_t(self):\n assert np.isclose(self.stepper.t, self.final_t)", "def test_EvaluatorWallHarmonic_energy(make_state):\n state = make_state['state']\n springConstant = 5.0\n cutoff = 10.0\n harmonicWall = EvaluatorWallHarmonic(springConstant, cutoff)\n\n distancesFromWall = [1.57079632679, # pi/2\n 3.14159265359, # pi\n 5.0, # 5\n 6.28318530718, # 2 * pi\n 9.32737905309] # sqrt(87)\n\n def energy(distanceFromWall):\n if (distanceFromWall < cutoff):\n return 0.5 * springConstant * ((cutoff - distanceFromWall) ** 2.0)\n else:\n return 0.0\n\n # test for each value of distance from the wall\n tolerance = 1e-10\n\n for _, item in enumerate(distancesFromWall):\n calculatedEnergy = harmonicWall.energy(item)\n expectedEnergy = energy(item)\n formatCode = \"{:<30s} {:>18.14f}\"\n print(formatCode.format(\"calculatedEnergy: \", calculatedEnergy))\n print(formatCode.format(\"expectedEnergy: \", expectedEnergy))\n\n absoluteDifference = np.abs(calculatedEnergy - expectedEnergy)\n\n relativeError = absoluteDifference / (np.abs(expectedEnergy))\n print((formatCode+\"\\n\").format(\"percentError: \", relativeError * 100.0))\n\n assert(relativeError <= tolerance)", "def test_single_caps(self):\n result_h = mmc.find_molar_mass('H')\n actual_h = 1.007940\n self.assertTrue(abs(percent_error(result_h, actual_h)) < self.ERR_THRESHOLD)", "def _hill_diff_diff(self, position):\n if position < 0:\n return 2\n else:\n return position * ((75 * (position ** 2)/((1 + 5 * position**2)**2.5)) - 5/((1 + 5 * position ** 2)**2.5)) \\\n - 10 * position/((1 + 5 * position ** 2)**1.5)", "def test_W_start(self):\t\t\n self.assertAlmostEqual(attempt.W[0], 12)", "def test_AKs_correct_preflop_odds(self):\n self.assertEqual(self.hand.preFlopOdds10, 20.7)", "def test_lorentz_broadening():\n v0 = 100\n I = 10\n w = 1.0\n\n assert lorentz(v0, v0, I, w) == approx(5)", "def calc_tolerance(wt):\n return 1 - wt", "def implicit_half_kick(self):\n stored_momentum = self.momentum.clone()\n rejected = False\n best_diff = torch.tensor(100.)\n for iter in range(self._max_fixed_point_iterations):\n old_momentum = self.momentum.clone()\n self.parameters = self.parameters.detach().requires_grad_(True)\n hamiltonian = self.get_hamiltonian()\n d_hamiltonian = torch.autograd.grad(hamiltonian,self.parameters,retain_graph = True)[0]\n self.momentum = stored_momentum - 0.5 * self.stepsize * d_hamiltonian\n if self._is_nanned(self.momentum):\n rejected = True\n self.momentum = old_momentum\n if self.verbose:\n print('Nanned during momentum update {}'.format(iter))\n break\n diff = torch.max((old_momentum - self.momentum) ** 2)\n if diff < self._fixed_point_threshold:\n break\n if diff<best_diff:\n best_diff = diff\n elif iter == self._max_fixed_point_iterations-1:\n rejected = True\n if self.verbose:\n print('Exceeded maximum iterations during momentum update')\n break\n# print('Warning: reached {} iterations in momentum update. Smallest iteration ({}) was selected'.format(self._max_fixed_point_iterations, best_diff.item()))\n momentum = self.momentum\n return momentum, rejected", "def nearest_test_pulse(self):", "def test_weierstrass(self):\n fun = get_problem('weierstrass', self.dimension, -100, 100)\n self.assertAlmostEqual(fun(self.array10), 0.0, delta=1e-4)", "def test_schaffer_n4(self):\n fun = get_problem('schaffer4', lower=-100, upper=100)\n self.assertAlmostEqual(fun(self.array10[:2]), 0.97545, delta=1e-4)", "def testMatchSwarpNearestExposure(self):\n self.compareToSwarp(\"nearest\", useWarpExposure=True, atol=60)", "def test_EvaluatorWallHarmonic_force(make_state):\n state = make_state['state']\n springConstant = 5.0\n cutoff = 10.0\n harmonicWall = EvaluatorWallHarmonic(springConstant, cutoff)\n\n # distancesFromWall; make it so that a couple results not representable as an integer\n distancesFromWall = [1.57079632679, # pi/2\n 3.14159265359, # pi\n 5.0, # 5\n 6.28318530718, # 2 * pi\n 9.32737905309] # sqrt(87)\n\n # forceDirection must have a magnitude of 1.0;\n # -- in DASH, this is normalized on instantiation of FixWallHarmonic\n forceDirection = Vector(1.0, 0.0, 0.0)\n\n # put explicitly the form of the expected force\n def force(distanceFromWall, direction):\n if (distanceFromWall <= cutoff):\n return direction * springConstant * np.abs((cutoff - distanceFromWall))\n else:\n return Vector(0.0, 0.0, 0.0)\n\n\n # test for each value of distance from the wall\n tolerance = 1e-10\n\n for _, item in enumerate(distancesFromWall):\n calculatedForce = harmonicWall.force(item, forceDirection)\n expectedForce = force(item, forceDirection)\n formatCode = '{:<30s} {:>18.14f} {:>18.14f} {:>18.14f}'\n print(formatCode.format(\"calculatedForce: \", calculatedForce[0], calculatedForce[1], calculatedForce[2]))\n print(formatCode.format(\"expectedForce: \", expectedForce[0], expectedForce[1], expectedForce[2]))\n\n absoluteDifference = (calculatedForce - expectedForce).len()\n\n relativeError = absoluteDifference / (expectedForce.len())\n print(\"{:<30s} {:>18.14f}\\n\".format(\"percent error: \", relativeError * 100.0))\n\n assert(relativeError <= tolerance)", "def testHeuristic(self):\n result = Sigmoid.heuristic(self, 12)\n self.assertEqual(0.28867513459481287, result)", "def test_expanded_griewank_plus_rosenbrock(self):\n fun = get_problem('expanded_griewank_plus_rosenbrock', self.dimension, -100, 100)\n self.assertAlmostEqual(fun(self.array), 2.2997, delta=1e2)", "def test_nearest_boundary_odd():\n assert _nearest_boundary(10, 19, 14, 0) == 0\n assert _nearest_boundary(10, 19, 14, 1) == 1", "def constant_r_success(r_target, tol):\n def isdone(node_dict):\n \"\"\"return a delta heading value to best turn to heading\"\"\"\n # compute heading difference\n hdiff = heading_diff(r_target, node_dict['pn'].heading)\n # return if we are we close enough\n return abs(hdiff) < abs(tol)\n\n # return the function we just created\n return isdone", "def test_ackley(self):\n fun = get_problem('ackley', self.dimension)\n self.assertAlmostEqual(fun(self.array), 0.0, places=10)", "def test_hgbat(self):\n fun = get_problem('hgbat', self.dimension, -100, 100)\n self.assertAlmostEqual(fun(self.array10), 61.91502622129181, delta=60)", "def test_highway_ramps(self):\n # import the experiment variable from the example\n exp = highway_ramps_example(render=False)\n\n # run the experiment for a few time steps to ensure it doesn't fail\n exp.run(1, 5)", "def test_99_correct_preflop_odds(self):\n self.assertEqual(self.hand.preFlopOdds10, 15.6)", "def wait(self):\n self.set_vals(spin=.2)\n nearest_deg = 0\n nearest_deg_dist = self.perim_dist + 1\n for i, x in enumerate(self.ranges):\n if (x != 0) and (x < nearest_deg_dist):\n nearest_deg = i\n nearest_deg_dist = x\n if nearest_deg_dist < self.perim_dist:\n nearest_deg = ((nearest_deg + 180) % 360) - 180\n self.center(degree=nearest_deg)\n self.current_state = \"follow\"", "def test_ForwardEuler_against_hand_calculations():\n def f(u, t):\n return u\n u, t = ForwardEuler(f, U0=1, T=0.2, n=2)\n exact = np.array([1, 1.1, 1.21]) # hand calculations\n error = np.abs(exact - u).max()\n success = error < 1E-14\n assert success, '|exact - u| = %g != 0' % error", "def hanging_man(self):\n self.data['hanging_man'] = (((self.data['High'] - self.data['Low']) > ((self.data['Open'] - self.data['Close'])*4)) & \\\n ((self.data['Close'] - self.data['Low']) / ((.001 + self.data['High'] - self.data['Low']) >= 0.75)) &\\\n ((self.data['Open'] - self.data['Low']) / ((.001 + self.data['High'] - self.data['Low']) >= .075)))", "def test_ridge(self):\n fun = get_problem('ridge', dimension=self.dimension, lower=-64, upper=64)\n self.assertAlmostEqual(fun(self.array10), 371.0, delta=1e-4)", "def test_comp_angle_wind_eq(self, test_dict):\n test_obj = test_dict[\"test_obj\"]\n result = test_obj.slot.comp_angle_wind_eq()\n\n a = result\n b = test_dict[\"Aw\"]\n msg = \"Return \" + str(a) + \" expected \" + str(b)\n self.assertAlmostEqual((a - b) / a, 0, delta=DELTA, msg=msg)", "def test_57o_correct_preflop_odds(self):\n self.assertEqual(self.hand.preFlopOdds10, 7.9)", "def heuristic_3_correction(h: float, root_h: float):\n # resultant =\n resultant = 1 - ((h / root_h) if h < root_h else 0)\n logger.debug(f'Corrected heuristic value 3: {resultant}')\n return resultant", "def test_percent_dewpoint_from_relative_humidity():\n td = dewpoint_from_relative_humidity(10.6 * units.degC, 37 * units.percent)\n assert_almost_equal(td, 26. * units.degF, 0)", "def _wait_for_pole_at_rest(self, thold_ang_vel: float = 0.1 / 180.0 * np.pi):\n cnt = 0\n while cnt < 1.5 / self._dt:\n # Get next measurement\n meas = self._qsoc.snd_rcv(np.zeros(self.act_space.shape))\n\n if np.abs(meas[2]) < thold_ang_vel and np.abs(meas[3]) < thold_ang_vel:\n cnt += 1\n else:\n cnt = 0", "def test_EvaluatorWallHarmonic_energy_device(make_state):\n state = make_state['state']\n springConstant = 5.0\n cutoff = 10.0\n harmonicWall = EvaluatorWallHarmonic(springConstant, cutoff)\n\n distancesFromWall = [1.57079632679, # pi/2\n 3.14159265359, # pi\n 5.0, # 5\n 6.28318530718, # 2 * pi\n 9.32737905309] # sqrt(87)\n\n def energy(distanceFromWall):\n if (distanceFromWall < cutoff):\n return 0.5 * springConstant * ((cutoff - distanceFromWall) ** 2.0)\n else:\n return 0.0\n\n # test for each value of distance from the wall\n tolerance = 1e-10\n\n for _, item in enumerate(distancesFromWall):\n calculatedEnergy = harmonicWall.energy_device(item)\n expectedEnergy = energy(item)\n formatCode = \"{:<30s} {:>18.14f}\"\n print(formatCode.format(\"calculatedEnergy: \", calculatedEnergy))\n print(formatCode.format(\"expectedEnergy: \", expectedEnergy))\n\n absoluteDifference = np.abs(calculatedEnergy - expectedEnergy)\n\n relativeError = absoluteDifference / (np.abs(expectedEnergy))\n print((formatCode+\"\\n\").format(\"percentError: \", relativeError * 100.0))\n\n assert(relativeError <= tolerance)", "def test_modified_schwefel(self):\n fun = get_problem('modified_schwefel', self.dimension, -100, 100)\n self.assertAlmostEqual(fun(self.array10), 6.9448853328785844, delta=350)", "def test_comp_surface_wind(self, test_dict):\n test_obj = test_dict[\"test_obj\"]\n result = test_obj.slot.comp_surface_wind()\n\n a = result\n b = test_dict[\"SW_exp\"]\n msg = \"Return \" + str(a) + \" expected \" + str(b)\n self.assertAlmostEqual((a - b) / a, 0, delta=DELTA, msg=msg)", "def test_G_H(self):\r\n chkmtx = (numpy.dot(golay.DEFAULT_G, golay.DEFAULT_H.T) % 2)\r\n self.assertTrue((chkmtx == 0).all())", "def minimum_spanning_arborescence(sol):", "def test_c(self):\n self.failIf(cgs.speed_of_light/mks.speed_of_light!=100)", "def test_isentropic_pressure_p_increase_rh_out():\n lev = [85000., 90000., 95000., 100000.] * units.Pa\n tmp = np.ones((4, 5, 5))\n tmp[0, :] = 288.\n tmp[1, :] = 290.\n tmp[2, :] = 292.\n tmp[3, :] = 296.\n tmpk = tmp * units.kelvin\n rh = np.ones((4, 5, 5))\n rh[0, :] = 20.\n rh[1, :] = 40.\n rh[2, :] = 80.\n rh[3, :] = 100.\n relh = rh * units.percent\n isentlev = 296. * units.kelvin\n isentprs = isentropic_interpolation(isentlev, lev, tmpk, relh)\n truerh = 100. * units.percent\n assert_almost_equal(isentprs[1], truerh, 3)", "def test_far_and_near(module_class: mantrap.modules.base.OptimizationModule.__class__,\n env_class: mantrap.environment.base.GraphBasedEnvironment.__class__):\n env = env_class(torch.tensor([-5, 100.0]), ego_type=mantrap.agents.IntegratorDTAgent, y_axis=(-100, 100))\n env.add_ado(position=torch.zeros(2))\n\n start_near, end_near = torch.tensor([-5, 0.1]), torch.tensor([5, 0.1])\n ego_path_near = mantrap.utility.maths.straight_line(start=start_near, end=end_near, steps=11)\n ego_trajectory_near = env.ego.expand_trajectory(ego_path_near, dt=env.dt)\n\n start_far, end_far = torch.tensor([-5, 100.0]), torch.tensor([5, 10.0])\n ego_path_far = mantrap.utility.maths.straight_line(start=start_far, end=end_far, steps=11)\n ego_trajectory_far = env.ego.expand_trajectory(ego_path_far, dt=env.dt)\n\n module = module_class(t_horizon=10, env=env)\n objective_near = module.objective(ego_trajectory_near, ado_ids=[], tag=\"test\")\n objective_far = module.objective(ego_trajectory_far, ado_ids=[], tag=\"test\")\n assert objective_near >= objective_far", "def _ftolCheck(self):\n oldLoss = biggestRecentLoss(self.loss, self.lookback)\n newLoss = float(self.loss[-1])\n fracDiff = 2 * (oldLoss - newLoss)/(oldLoss + newLoss)\n \n if fracDiff < self.ftol:\n \n self.converged = True", "def test_sense_distance(self):\n\n\t\tmeasurements = [29, 29, 28]\n\t\tself.driver.us_dist.side_effect = lambda x: measurements.pop()\n\t\texpected_measurement = int(ultrasonic_sensor_error(29))\n\n\t\tself.assertEqual(self.s.sense_distance(60), expected_measurement)\n\t\tself.mount.move.assert_called_once_with(x=60)", "def test_benjamini_hochberg_step_down(self):\r\n # r values\r\n # q = c(0.64771481, 0.93517796, 0.7169902 , 0.18223457, 0.26918556,\r\n # 0.1450153 , 0.22448242, 0.74723508, 0.89061034, 0.74007906)\r\n # p.adjust(q, method='BH')\r\n # [1] 0.9340439 0.9351780 0.9340439 0.6729639 0.6729639 0.6729639 0.6729639\r\n # [8] 0.9340439 0.9351780 0.9340439\r\n pvals = array([0.64771481, 0.93517796, 0.7169902, 0.18223457,\r\n 0.26918556, 0.1450153, 0.22448242, 0.74723508, 0.89061034,\r\n 0.74007906])\r\n exp = array([0.9340439, 0.9351780, 0.9340439, 0.6729639, 0.6729639,\r\n 0.6729639, 0.6729639, 0.9340439, 0.9351780, 0.9340439])\r\n obs = benjamini_hochberg_step_down(pvals)\r\n self.assertFloatEqual(obs, exp)\r\n # example 2\r\n pvals = array([1.32305426, 1.9345059, 0.87129877, 1.89957702,\r\n 1.85712616, 0.68757988, 0.41248969, 0.20751712, 1.97658599,\r\n 1.06209437])\r\n exp = array([1., 1., 1., 1., 1., 1., 1., 1., 1., 1.])\r\n obs = benjamini_hochberg_step_down(pvals)\r\n self.assertFloatEqual(obs, exp)", "def test_michalewicz(self):\n fun = get_problem('michalewicz', dimension=2, lower=0, upper=np.pi)\n self.assertAlmostEqual(fun(np.asarray([2.20, 1.57])), -1.8013, delta=1e-3)", "def test_williams_correction(self):\r\n n = 100\r\n a = 10\r\n G = 10.5783\r\n exp = 10.387855973813421\r\n self.assertFloatEqual(williams_correction(n, a, G), exp)\r\n # test with an example from Sokal and Rohlf pg 699\r\n n = 241\r\n a = 8\r\n G = 8.82396\r\n exp = 8.76938\r\n self.assertFloatEqual(williams_correction(n, a, G), exp)", "def step():\n x_rand = sample()\n x_nearest = new_nearest_neighbour(x_rand)\n x_new = steer(x_nearest, x_rand)\n if obstacle_free(x_nearest, x_new):\n X_near = new_neighbourhood(x_new)\n x_min = x_nearest\n c_min = x_nearest.cost + x_nearest.dist_to(x_new)\n for x_near in X_near:\n if obstacle_free(x_near, x_new) and (x_near.cost + x_near.dist_to(x_new) < c_min):\n x_min = x_near\n c_min = (x_near.cost + x_near.dist_to(x_new) < c_min)\n x_new_node = add_node(x_new, x_min, True)\n for x_near in X_near:\n if obstacle_free(x_near, x_new) and (x_new_node.cost + x_near.dist_to(x_new) < x_near.cost):\n x_near.change_parent(x_new_node)\n # Here I check for goal paths and draw the circle\n updated = False\n if shared.root_path:\n updated = goal_path_resolve(shared.root_path[0])\n updated = updated or goal_path_resolve(shared.nodes[-1])\n if updated:\n diameter = shared.root_path_length\n center = ((shared.root_path[0].x + shared.root_path[-1].x) / 2,\n (shared.root_path[0].y + shared.root_path[-1].y) / 2)\n if shared.region:\n shared.region.remove_from_batch()\n shared.region = ellipse.Ellipse(center[0], center[1], diameter)\n shared.region.add_to_batch()", "def checkRing(self, verbose = 0):\n angleSum = self.tfs.ANGLE.sum()\n if self.verbose: print (\"check\")\n print (\"---------------------------------- \\n Checking, if ring is closed: \\n\", \"angleSum = \", angleSum)\n twoPi = 2*pi\n \n if angleSum != twoPi:\n fudge = 2*pi - angleSum\n print (\" ** Ring not closed - offset of: \", fudge)", "def testgradsorientation(self):\r\n # since z-coordinates of atomcoords are all 0 for dvb, z-values of grads should be all 0\r\n assert numpy.alltrue(numpy.abs(self.data.grads[:,:,2]) < 1e-14)", "def testCorrectForTwoAtomCellWithoutPeriodicityNEEDED(self):\n\t\texpDist = 0.01*10\n\t\tself._checkExpMatchesActual(expDist)", "def u_turn(self, direction, diameter_in):\n \n# pdb.set_trace()\n # Calculate radius of turn for the inside wheel.\n r_in = diameter_in / 2\n\n # Outside radius is 20 inches from inside radius.\n r_out = r_in + MuleBot.WHEEL_BASE_LENGTH\n \n # Outside travel distance\n travel = r_out * 3.14159\n travel_revolutions = travel / MuleBot.CIRCUM_IN\n \n r_ratio = r_out / r_in\n #r_ratio_half = r_ratio / 2\n\n speed_multiplier = MuleBot.MAX_RPM / r_ratio\n\n outside_rpm = r_ratio * speed_multiplier\n inside_rpm = speed_multiplier\n \n \n # \n # minutes at outside_rpm\n minutes = travel_revolutions / outside_rpm\n seconds = minutes * MuleBot.SECONDS_PER_MINUTE\n \n # Something isn't quite perfect.\n if direction == 'left':\n if diameter_in < 25:\n seconds -= 1\n else:\n seconds -= 2\n else:\n if diameter_in < 25:\n seconds += 1\n else:\n seconds += 2\n\n if direction == 'left':\n v_l = self.rpm_to_rps(inside_rpm)\n v_r = self.rpm_to_rps(outside_rpm)\n else:\n v_r = self.rpm_to_rps(inside_rpm)\n v_l = self.rpm_to_rps(outside_rpm)\n\n #print(\"2inside: rpm: \", inside_rpm)\n #print(\"2outside: rpm: \", outside_rpm)\n \n #print(\"2.1: v_l: \", v_l)\n #print(\"2.1: v_r: \", v_r)\n\n # Set wheel drive rates.\n self.set_wheel_drive_rates(v_l, v_r)\n\n # Sleep during the turn.\n time.sleep(seconds)\n\n # Stop\n self.stop()\n \n # Move forward 24 inches.\n self.forward(24)", "def testSpeciesRichnessNear(self):\n self.assertAlmostEqual(1.0, self.tree1.get_number_individuals() / self.tree2.get_number_individuals(), 0)", "def check_offset(self):\n\n for d in range(self.n_dmps):\n if abs(self.y0[d] - self.goal[d]) < 1e-4:\n self.goal[d] += 1e-4", "def test_newton_rhapson_system(testFunctions, tol, printFlag): \n pass", "def test_hermitian_expectation(self, device, tol):\n n_wires = 2\n dev = device(n_wires)\n dev_def = qml.device(\"default.qubit\", wires=n_wires)\n\n if dev.shots is not None:\n pytest.skip(\"Device is in non-analytical mode.\")\n\n if \"Hermitian\" not in dev.observables:\n pytest.skip(\"Device does not support the Hermitian observable.\")\n\n if dev.name == dev_def.name:\n pytest.skip(\"Device is default.qubit.\")\n\n theta = 0.432\n phi = 0.123\n A_ = np.array(\n [\n [-6, 2 + 1j, -3, -5 + 2j],\n [2 - 1j, 0, 2 - 1j, -5 + 4j],\n [-3, 2 + 1j, 0, -4 + 3j],\n [-5 - 2j, -5 - 4j, -4 - 3j, -6],\n ]\n )\n A_.requires_grad = False\n\n def circuit(theta, phi):\n qml.RX(theta, wires=[0])\n qml.RX(phi, wires=[1])\n qml.CNOT(wires=[0, 1])\n return qml.expval(qml.Hermitian(A_, wires=[0, 1]))\n\n qnode_def = qml.QNode(circuit, dev_def)\n qnode = qml.QNode(circuit, dev)\n\n grad_def = qml.grad(qnode_def, argnum=[0, 1])\n grad = qml.grad(qnode, argnum=[0, 1])\n\n assert np.allclose(qnode(theta, phi), qnode_def(theta, phi), atol=tol(dev.shots))\n assert np.allclose(grad(theta, phi), grad_def(theta, phi), atol=tol(dev.shots))", "def test_jaro_winkler():\n assert textdistance.jaro_winkler(\"campell\", \"campbell\") == pytest.approx(0.9792, abs=0.01)\n assert textdistance.jaro_winkler(\"shakelford\", \"shakleford\") == pytest.approx(0.9848, abs=0.01)\n assert textdistance.jaro_winkler(\"dwayne\", \"duane\") == pytest.approx(0.84, abs=0.01)", "def _check_redshift(self, red):\n if np.min(np.abs(red - self.zout)) > 0.01:\n return 0\n return 1", "def test_fill_glass__with_overflow__returns_expected(number):\n glass = moet.create_glass(\"A\")\n overflow = glass.fill(number)\n assert overflow == number - 250\n assert glass.quantity == 250", "def test_H_hat(self):\n\t\tposition = [0.0, 1.57079, 3.14159, 4.71238, 6.28318, 7.85398, 9.42477]\n\t\tpotential = [0.0, 6.0, 0.0, -6.0, 0.0, 6.0, 0.0]\n\t\tc = 1\n\t\tposition = tf.constant(position, shape = [1, len(position)], dtype = tf.float32)\n\t\tpotential = tf.constant(potential, shape = [1, len(potential)], dtype = tf.float32)\n\t\tbasis = schrodinger.create_basis(5)\n\t\tv = schrodinger.v0(position, potential, basis)\n\t\tcoeff = schrodinger.coefficient(position, basis)\n\t\tv0_hat = tf.linalg.solve(coeff, v)\n\t\tH = schrodinger.H_hat(c, len(basis), v0_hat)\n\t\tself.assertEqual(coeff.get_shape(), [len(basis), len(basis)])", "def test_setMassFrac(self):\n target35 = 0.2\n self.fuel.setMassFrac(\"U235\", target35)\n self.assertAlmostEqual(self.fuel.getMassFrac(\"U235\"), target35)", "def test_relaxation_end(self):\n tau = 50.0\n mrate = 40.0\n Mrate = 120.0\n\n tmax = 50.0\n dt = 0.1\n relaxation = 20.0\n\n self.rule.tau = tau\n self.rule.min_rate = mrate\n self.rule.max_rate = Mrate\n self.rule.compress_rates = False\n self.rule.relaxation = relaxation\n\n self.motor.error_fct = lambda _: np.ones(self.Nsrc)\n\n M = simulation.StateMonitor(self.rule, 'out')\n\n sim = simulation.Simulation(self.source, self.motor, self.rule, M, dt=dt)\n sim.run(tmax)\n\n mask = (M.t > tmax - relaxation/2)\n mavg = 0.5*(mrate + Mrate)\n\n self.assertAlmostEqual(np.mean(np.abs(M.out[:, mask] - mavg)), 0.0)", "def test_Z_end(self):\t\t\n self.assertAlmostEqual(attempt.Z[-1], 41.47999849170943)", "def test_stokes_drag():\n assert DM.stokes_drag(fluid_velocity=1.0, particle_velocity=0.0,\n diameter=1.0, rho=1.0, fluid_viscosity=1.0) == 18.0", "def look_ahead_heuristic(game, player):\n if game.is_loser(player):\n return float('-inf')\n\n if game.is_winner(player):\n return float('inf')\n\n own_legal_moves = game.get_legal_moves(player)\n own_moves = len(own_legal_moves)\n for m in own_legal_moves:\n own_moves += len(game.__get_moves__(m))\n\n opp_legal_moves = game.get_legal_moves(game.get_opponent(player))\n opp_moves = len(opp_legal_moves)\n for m in opp_legal_moves:\n opp_moves += len(game.__get_moves__(m))\n\n return float(own_moves - opp_moves)", "def use_manhatten_heur(self):\r\n\t\tdistance = 0\r\n\r\n\t\tfor row in range(self.n):\r\n\t\t\tfor col in range(self.n):\r\n\t\t\t\tintendedX, intendedY = BoardClass.goalTileLocations[self.board[row][col]]\r\n\t\t\t\tdistance += (abs(row - intendedX) + abs(col - intendedY))\r\n\r\n\t\tself.heuristic = distance", "def test_hanoi(disks=10):\n t = Hanoi(disks, verbose=False)\n assert t.tower == {'a': list(range(disks, 0, -1)), 'b': [], 'c': []}\n t.solve_hanoi()\n assert t.tower == {'a': [], 'b': [], 'c': list(range(disks, 0, -1))}", "def dichotomous_search(loss_function: rosenbrock, start: point, direction: list, epsilon=0.1) -> float:\n a, b = advance_retreat_method(loss_function, start, direction)\n\n # find the minimum\n e = epsilon / 3\n p, q = (a + b) / 2 - e, (a + b) / 2 + e\n while abs(a - b) > epsilon:\n f_p = loss_function.f(start + point(direction[0] * p, direction[1] * p))\n f_q = loss_function.f(start + point(direction[0] * q, direction[1] * q))\n if f_p < f_q:\n b = q\n else:\n a = p\n p, q = (a + b) / 2 - e, (a + b) / 2 + e\n\n return (a + b) / 2", "def next_move(hunter_position, hunter_heading, target_measurement, max_distance, OTHER = None):\n # This function will be called after each time the target moves.\n\n # The OTHER variable is a place for you to store any historical information about\n # the progress of the hunt (or maybe some localization information). Your return format\n # must be as follows in order to be graded properly.\n\n # helper function to map all angles onto [-pi, pi]\n def angle_truncate(a):\n while a < 0.0:\n a += pi * 2\n return ((a + pi) % (pi * 2)) - pi\n\n #print \"true heading\"\n #print test_target.heading\n I = matrix([[1, 0, 0],\n [0, 1, 0],\n [0, 0, 1]]) #identity matrix\n\n R = matrix([[measurement_noise, 0], [0, measurement_noise]])\n\n H = matrix([[0, 1, 0],\n [0, 0, 1]]) #Jacobian of the measurement function\n\n u = matrix([[0],\n [0],\n [0]])\n\n F = []\n\n heading = 0 #WILD ASS GUESS\n\n if OTHER is not None:\n print \"-----------------\"\n current_measurement = target_measurement\n last_measurement = OTHER['last_measurement']\n OTHER['measurements'].append(target_measurement)\n #I know this is stupid but I just want to save the data... Memory management be damned\n\n heading = atan2(target_measurement[1] - last_measurement[1], target_measurement[0] - last_measurement[0])\n print \"calculated heading\"\n print heading\n X = OTHER['X']\n P = OTHER['P']\n\n if 'last_heading' not in OTHER:\n OTHER['last_heading'] = heading\n xy_estimate = [X.value[1][0], X.value[2][0]]\n OTHER['last_measurement'] = target_measurement\n else:\n print \"OTHER is:\", OTHER\n turning_angle = heading - OTHER['last_heading']\n print \"turning angle:\", turning_angle\n print \"turning angle actual:\", target.turning\n #last_heading = OTHER['last_heading']\n\n\n #do some guessing\n D = distance_between(target_measurement, last_measurement)\n print \"this is the D\"\n print D\n theta = (heading+turning_angle)%(2*pi)\n print \"theta:\", theta\n print \"theta - heading current:\", theta - target.heading\n\n #estimation step\n\n #is it \"last heading\" or \"theta\"????\n # X = matrix([[theta],\n # [X.value[1][0] + D * cos(theta)],\n # [X.value[2][0] + D * sin(theta)]])\n\n delta_x = D * cos(theta)\n delta_y = D * sin(theta)\n\n nextX = target_measurement[0] + delta_x\n nextY = target_measurement[1] + delta_y\n\n # nextX = X.value[1][0] + delta_x\n # nextY = X.value[2][0] + delta_y\n\n #print \"the distance to the next guessed point is:\", distance_between([nextX,nextY], measurement)\n\n X = matrix([[theta],\n [nextX],\n [nextY]])\n\n print \"I'm projecting X out to:\", X\n print \"Note, the current robot stats:\", target.heading, target.x, target.y\n\n F = matrix([[1, 0, 0],\n [-D*sin(theta), 1, 0],\n [D*cos(theta), 0, 1]])\n\n P = OTHER['P']\n #X = OTHER['X']\n\n\n H = matrix([[0, 1, 0],\n [0, 0, 1]])\n\n # #Prediction\n # X = (F * X) + u\n # P = F * P * F.transpose() # + Q\n\n P = F * P * F.transpose() # + Q\n\n #measurement update\n observations = matrix([[target_measurement[0]],\n [target_measurement[1]]]) #truth\n Z = H*X\n Y = observations - Z\n print \"this is Y\"\n print Y\n S = H * P * H.transpose() + R\n K = P * H.transpose() * S.inverse()\n X = X + (K*Y)\n\n P = (I - (K * H)) * P\n\n X.value[0][0] = angle_truncate(X.value[0][0])\n\n\n OTHER['X'] = X\n\n OTHER['P'] = P\n x_estimate = OTHER['X'].value[1][0]\n y_estimate = OTHER['X'].value[2][0]\n print \"Currently, the robot state is:\", target.heading, observations\n print \"This is what Kalman thinks X will be:\", OTHER['X']\n xy_estimate = [x_estimate, y_estimate]\n\n OTHER['last_heading'] = heading\n OTHER['last_measurement'] = target_measurement\n\n\n else:\n #x = theta, x, y\n X = matrix([[0.5],\n [2],\n [4]])\n #convariance matrix\n P = matrix([[1000, 0, 0],\n [0, 1000, 0],\n [0, 0, 1000]])\n OTHER = {'last_measurement': target_measurement, 'X': X, 'P': P, 'measurements': [target_measurement]}\n xy_estimate = [X.value[1][0], X.value[2][0]]\n\n # if not OTHER: # first time calling this function, set up my OTHER variables.\n # measurements = [target_measurement]\n # hunter_positions = [hunter_position]\n # hunter_headings = [hunter_heading]\n # OTHER = (measurements, hunter_positions, hunter_headings) # now I can keep track of history\n # else: # not the first time, update my history\n # OTHER[0].append(target_measurement)\n # OTHER[1].append(hunter_position)\n # OTHER[2].append(hunter_heading)\n # measurements, hunter_positions, hunter_headings = OTHER # now I can always refer to these variables\n\n #plugging in the Hunter to target the next anticipated area for the target\n\n if distance_between(hunter_position, xy_estimate) > max_distance: #if I can't get to the position in time\n # I want to go to a known point and keep going there.\n heading_to_target = get_heading(hunter_position, OTHER['measurements'][0]) #grab the first measurement\n heading_difference = heading_to_target - hunter_heading\n turning = heading_difference\n distance = max_distance # full speed ahead!\n print \"I'm moving to the point\"\n if distance_between(hunter_position, OTHER['measurements'][0]) <= max_distance/2:\n distance = 0 #stay put\n heading_to_target = get_heading(hunter_position, OTHER['measurements'][1]) #point at the next one\n heading_difference = heading_to_target - hunter_heading\n turning = heading_difference\n print \"I'm staying at the point in waiting\"\n else:\n heading_to_target = get_heading(hunter_position, xy_estimate)\n heading_difference = heading_to_target - hunter_heading\n turning = heading_difference # turn towards the target\n distance_to_point = distance_between(hunter_position, xy_estimate)\n distance = distance_to_point #I don't want to travel full speed LOL\n print \"ATTACK!\"\n\n return turning, distance, OTHER", "def test_above_freezing_above(self):\n\n expected = True\n actual = temperature.above_freezing(5.2)\n self.assertEqual(expected, actual,\n \"The temperature is above freezing.\")", "def testBearingWest(self):\n\n eps = 1.0e-12\n B = 270 # True bearing\n\n p1 = Point(0.0, 0.0)\n p3 = Point(0.0, 1.0)\n\n b = p3.bearing_to(p1)\n msg = 'Computed southward bearing %d. Expected %d' % (b, B)\n assert numpy.allclose(b, B, rtol=eps, atol=eps), msg", "def test_depolarizing_error_2q_gate(self):\n p_depol = 0.3\n error = depolarizing_error(p_depol, 2, standard_gates=True)\n target_circs = [[{\"name\": \"id\", \"qubits\": [0]}],\n [{\"name\": \"x\", \"qubits\": [0]}],\n [{\"name\": \"y\", \"qubits\": [0]}],\n [{\"name\": \"z\", \"qubits\": [0]}],\n [{\"name\": \"x\", \"qubits\": [1]}],\n [{\"name\": \"y\", \"qubits\": [1]}],\n [{\"name\": \"z\", \"qubits\": [1]}],\n [{\"name\": \"x\", \"qubits\": [0]}, {\"name\": \"x\", \"qubits\": [1]}],\n [{\"name\": \"x\", \"qubits\": [0]}, {\"name\": \"y\", \"qubits\": [1]}],\n [{\"name\": \"x\", \"qubits\": [0]}, {\"name\": \"z\", \"qubits\": [1]}],\n [{\"name\": \"y\", \"qubits\": [0]}, {\"name\": \"x\", \"qubits\": [1]}],\n [{\"name\": \"y\", \"qubits\": [0]}, {\"name\": \"y\", \"qubits\": [1]}],\n [{\"name\": \"y\", \"qubits\": [0]}, {\"name\": \"z\", \"qubits\": [1]}],\n [{\"name\": \"z\", \"qubits\": [0]}, {\"name\": \"x\", \"qubits\": [1]}],\n [{\"name\": \"z\", \"qubits\": [0]}, {\"name\": \"y\", \"qubits\": [1]}],\n [{\"name\": \"z\", \"qubits\": [0]}, {\"name\": \"z\", \"qubits\": [1]}]]\n for j in range(16):\n circ, p = error.error_term(j)\n self.remove_if_found(circ, target_circs)\n if circ == [{\"name\": \"id\", \"qubits\": [0]}]:\n self.assertAlmostEqual(p, 1 - p_depol + p_depol / 16,\n msg=\"Incorrect identity probability\")\n else:\n self.assertAlmostEqual(p, p_depol / 16, msg=\"Incorrect Pauli probability\")\n self.assertEqual(target_circs, [], msg=\"Incorrect unitaries\")", "def test_basic_newton_finder(self):\n forwards = np.array([1.0, 1.0, 1.0, 1.0, 1.0, 1.0])\n strikes = np.array([1.0, 2.0, 1.0, 0.5, 1.0, 1.0])\n expiries = np.array([1.0, 1.0, 1.0, 1.0, 0.5, 2.0])\n discounts = np.array([1.0, 1.0, 1.0, 1.0, 1.0, 1.0])\n init_vols = np.array([2.0, 0.5, 2.0, 0.5, 1.5, 1.5])\n option_signs = np.array([1.0, 1.0, -1.0, -1.0, 1.0, 1.0])\n volatilities = np.array([1.0, 1.0, 1.0, 1.0, 1.0, 1.0])\n prices = np.array([\n 0.38292492, 0.19061012, 0.38292492, 0.09530506, 0.27632639, 0.52049988\n ])\n results = implied_vol(\n forwards,\n strikes,\n expiries,\n discounts,\n prices,\n option_signs,\n initial_volatilities=init_vols,\n max_iterations=100)\n implied_vols, converged, failed = self.evaluate(results)\n num_volatilities = len(volatilities)\n self.assertAllEqual(np.ones(num_volatilities, dtype=np.bool), converged)\n self.assertAllEqual(np.zeros(num_volatilities, dtype=np.bool), failed)\n self.assertArrayNear(volatilities, implied_vols, 1e-7)", "def test_EvaluatorWallHarmonic_force_device(make_state):\n state = make_state['state']\n springConstant = 5.0\n cutoff = 10.0\n harmonicWall = EvaluatorWallHarmonic(springConstant, cutoff)\n\n # distancesFromWall; make it so that a couple results not representable as an integer\n distancesFromWall = [1.57079632679, # pi/2\n 3.14159265359, # pi\n 5.0, # 5\n 6.28318530718, # 2 * pi\n 9.32737905309] # sqrt(87)\n\n # forceDirection must have a magnitude of 1.0;\n # -- in DASH, this is normalized on instantiation of FixWallHarmonic\n forceDirection = Vector(1.0, 0.0, 0.0)\n\n # put explicitly the form of the expected force\n def force(distanceFromWall, direction):\n if (distanceFromWall <= cutoff):\n return direction * springConstant * np.abs((cutoff - distanceFromWall))\n else:\n return Vector(0.0, 0.0, 0.0)\n\n\n # test for each value of distance from the wall\n tolerance = 1e-10\n\n for _, item in enumerate(distancesFromWall):\n calculatedForce = harmonicWall.force_device(item, forceDirection)\n expectedForce = force(item, forceDirection)\n formatCode = '{:<30s} {:>18.14f} {:>18.14f} {:>18.14f}'\n print(formatCode.format(\"calculatedForce: \", calculatedForce[0], calculatedForce[1], calculatedForce[2]))\n print(formatCode.format(\"expectedForce: \", expectedForce[0], expectedForce[1], expectedForce[2]))\n\n absoluteDifference = (calculatedForce - expectedForce).len()\n\n relativeError = absoluteDifference / (expectedForce.len())\n print(\"{:<30s} {:>18.14f}\\n\".format(\"percent error: \", relativeError * 100.0))\n\n assert(relativeError <= tolerance)", "def test_calculate_circumsolar_shading():\n # Test for one value of 20% of the diameter being covered\n percentage_distance_covered = 20.\n percent_shading = calculate_circumsolar_shading(\n percentage_distance_covered, model='uniform_disk')\n\n # Compare to expected\n expected_disk_shading_perc = 14.2378489933\n atol = 0\n rtol = 1e-8\n np.testing.assert_allclose(expected_disk_shading_perc, percent_shading,\n atol=atol, rtol=rtol)", "def check_heated_up(error):\n error = np.array(error)\n error = np.abs(error)\n if len(error) < 30 or sum(error[-30:-1]) > 15:\n return False\n else:\n return True", "def test_pos_1024() -> None:\n assert sw.walk_to(1024).distance == 31", "def test_scalar_dewpoint_from_relative_humidity():\n td = dewpoint_from_relative_humidity(10.6 * units.degC, 0.37)\n assert_almost_equal(td, 26. * units.degF, 0)", "def test_moist_lapse_downwards():\n temp = moist_lapse(units.Quantity([600, 700], 'mbar'), units.Quantity(0, 'degC'))\n assert_almost_equal(temp, units.Quantity([0, 6.47748353], units.degC), 4)", "def test_parity(float_tolerance, l):\n x = torch.randn(3)\n Y1 = (-1)**l * o3.spherical_harmonics(l, x, False)\n Y2 = o3.spherical_harmonics(l, -x, False)\n assert (Y1 - Y2).abs().max() < float_tolerance", "def test_griewank(self):\n fun = get_problem('griewank', self.dimension)\n self.assertEqual(fun(self.array), 0.0)", "def run(self):\n move_cmd = Twist()\n move_cmd.linear.x = 0\n move_cmd.angular.z = 0\n\n while not rospy.is_shutdown():\n # bump logic as previous psets\n if self.bump:\n self.bump = False\n # move backwards\n move_cmd.linear.x = LIN_SPEED * -1\n for i in range(5):\n self.cmd_vel.publish(move_cmd)\n self.rate.sleep()\n rospy.sleep(1)\n\n # turn randomly in a random direction\n move_cmd.linear.x = 0\n move_cmd.angular.z = ROT_SPEED * ((-1)**random.randint(1,2))\n\n if self.bump == 0:\n move_cmd.angular.z = ROT_SPEED * (-1)\n elif self.bump == 2:\n move_cmd.angular.z = ROT_SPEED\n\n for i in range(random.randint(5,15)):\n self.cmd_vel.publish(move_cmd)\n self.rate.sleep()\n rospy.sleep(1)\n\n move_cmd.angular.z = 0\n # if somethin in the screen is really close\n elif self.min_val < MIN_THRESHOLD:\n # make sure it's not the sock/leg warmer, and is actually an obstacle\n if self.obstacle_x <= self.x or self.obstacle_x >= self.x + self.w or abs(self.min_val - self.dist) > 0.1:\n move_cmd.linear.x = 0\n # turn away\n if self.obstacle_x > 320:\n move_cmd.angular.z = ROT_SPEED / 2\n else:\n move_cmd.angular.z = -ROT_SPEED / 2\n # self.min_val = 100\n for i in range(10):\n self.cmd_vel.publish(move_cmd)\n self.rate.sleep()\n self.last_move = rospy.Time.now()\n else:\n rospy.loginfo(\"Perimeter \" + str(self.perimeter_size))\n rospy.loginfo(\"Distance is \" + str(self.dist))\n\n # normalize angle error to rot speed\n ang_error_norm = -float(self.ang_error) / 100\n\n # set min and max rot speed\n if ang_error_norm < -ROT_SPEED:\n ang_error_norm = -ROT_SPEED\n elif ang_error_norm > ROT_SPEED:\n ang_error_norm = ROT_SPEED\n\n move_cmd.angular.z = ang_error_norm\n\n if RACE == False:\n # normalize dist error to lin speed\n self.dist_error = self.dist - 0.5\n dist_error_norm = float(self.dist_error) / 2\n\n if dist_error_norm < 0:\n # if NaN (self.dist gets set to -1)\n if dist_error_norm > -0.7:\n self.lost = 0\n # if too close\n else:\n self.lost += 1\n # if it's been more than 2 seconds\n if rospy.Time.now() > self.last_move + rospy.Duration(2):\n dist_error_norm = 0\n # if been lost for a while rotate and beep\n if self.lost > 20:\n move_cmd.angular.z = ROT_SPEED / 4\n self.beep.publish(4)\n else:\n # continue as previous\n dist_error_norm = self.last_speed\n else:\n # set max lin speed\n if dist_error_norm > LIN_SPEED:\n dist_error_norm = LIN_SPEED\n\n # reset lost stats\n self.lost = 0\n self.last_speed = dist_error_norm\n self.last_move = rospy.Time.now()\n\n move_cmd.linear.x = dist_error_norm\n else:\n move_cmd.linear.x = LIN_SPEED\n\n self.cmd_vel.publish(move_cmd)", "def double_middle_drop(progress):\n eps1 = 0.75\n eps2 = 0.25\n if 1 - progress < eps1:\n if 1 - progress < eps2:\n return eps2 * 0.5\n return eps1 * 0.1\n return 1 - progress", "def test_uneven_sw():\n B = 100\n t = 1\n H = 30\n E = 20000\n sections = ((2 * B, t, 0, E), (B, t, H - t, E))\n EI, top, bot = bm.EI(sections, E)\n assert 1.95 < abs(bot) / top < 1.96", "def test_bs(self):\r\n res = BlackScholes(100, 0.10, 0.5, 1.00)\r\n self.assertEqual(round(res, 2), 23.93)", "def test_newton_rhapson(testFunctions, tol, printFlag): \n pass", "def test_tract_split_housing(self):\n # Validate first new tract from the split tract\n # Tract 35.01\n tract1 = self.geographies.find({ 'geoid': '15003003501' })\n self.assertEqual(tract1.count(), 1)\n tract1 = tract1[0]\n \n split_tract_house_2000 = 3370 \n tract1_house_pct = 0.383 \n tract1_house_2000 = int(tract1_house_pct * split_tract_house_2000)\n tract1_house_2010 = 1353 \n tract1_house_delta = tract1_house_2010 - tract1_house_2000\n tract1_house_pct_change = float(tract1_house_delta) / tract1_house_2000\n\n self.assertAlmostEqual(tract1['xwalk']['15003003500']['HUPCT00'], tract1_house_pct, places=4)\n self.assertAlmostEqual(tract1['data']['2000']['H1']['H001001'], tract1_house_2000)\n self.assertAlmostEqual(float(tract1['data']['2010']['H1']['H001001']), tract1_house_2010)\n self.assertAlmostEqual(float(tract1['data']['delta']['H1']['H001001']), tract1_house_delta)\n self.assertAlmostEqual(float(tract1['data']['pct_change']['H1']['H001001']), tract1_house_pct_change)\n\n # Validate second new part from the split tract\n # Tract 35.02\n tract2 = self.geographies.find({ 'geoid': '15003003502' })\n self.assertEqual(tract2.count(), 1)\n tract2 = tract2[0]\n\n tract2_house_pct = 0.617\n tract2_house_2000 = int(tract2_house_pct * split_tract_house_2000)\n tract2_house_2010 = 2180 \n tract2_house_delta = tract2_house_2010 - tract2_house_2000\n tract2_house_pct_change = float(tract2_house_delta) / tract2_house_2000 \n \n self.assertAlmostEqual(tract2['xwalk']['15003003500']['HUPCT00'], tract2_house_pct, places=4)\n self.assertAlmostEqual(tract2['data']['2000']['H1']['H001001'], tract2_house_2000)\n self.assertAlmostEqual(float(tract2['data']['2010']['H1']['H001001']), tract2_house_2010)\n self.assertAlmostEqual(float(tract2['data']['delta']['H1']['H001001']), tract2_house_delta)\n self.assertAlmostEqual(float(tract2['data']['pct_change']['H1']['H001001']), tract2_house_pct_change)", "def toluene():\n coords = [\n [1.2264, 0.0427, 0.0670],\n [1.0031, -1.3293, 0.0600],\n [-0.2945, -1.8256, -0.0060],\n [-1.3704, -0.9461, -0.0646],\n [-1.1511, 0.4266, -0.0578],\n [0.1497, 0.9292, 0.0066],\n [0.3871, 2.3956, -0.0022],\n [2.2495, 0.4310, 0.1211],\n [1.8510, -2.0202, 0.1071],\n [-0.4688, -2.9062, -0.0109],\n [-2.3926, -1.3347, -0.1157],\n [-2.0006, 1.1172, -0.1021],\n [0.5024, 2.7582, -1.0330],\n [1.2994, 2.6647, 0.5466],\n [-0.4475, 2.9470, 0.4506],\n ]\n coords = [[float(j) / Bohr for j in i] for i in coords]\n\n symbols = [\n \"C\",\n \"C\",\n \"C\",\n \"C\",\n \"C\",\n \"C\",\n \"C\",\n \"H\",\n \"H\",\n \"H\",\n \"H\",\n \"H\",\n \"H\",\n \"H\",\n \"H\",\n ]\n\n atoms = []\n for i, _ in enumerate(coords):\n atoms.append(Atom(symbols[i], position=coords[i]))\n return Molecule(symbols=atoms)", "def brents(f, x0, x1, max_iter=50, tolerance=1e-5):\n \n fx0 = f(x0)\n fx1 = f(x1)\n \n assert (fx0 * fx1) <= 0, \"Root not bracketed\" \n \n if abs(fx0) < abs(fx1):\n x0, x1 = x1, x0\n fx0, fx1 = fx1, fx0\n \n x2, fx2 = x0, fx0\n \n d = np.nan\n mflag = True\n steps_taken = 0\n \n while steps_taken < max_iter and abs(x1-x0) > tolerance:\n fx0 = f(x0)\n fx1 = f(x1)\n fx2 = f(x2)\n \n if fx0 != fx2 and fx1 != fx2:\n L0 = (x0 * fx1 * fx2) / ((fx0 - fx1) * (fx0 - fx2))\n L1 = (x1 * fx0 * fx2) / ((fx1 - fx0) * (fx1 - fx2))\n L2 = (x2 * fx1 * fx0) / ((fx2 - fx0) * (fx2 - fx1))\n new = L0 + L1 + L2\n \n else:\n new = x1 - ( (fx1 * (x1 - x0)) / (fx1 - fx0) )\n \n tt1 = (new < ((3 * x0 + x1) / 4) or new > x1)\n tt2 = (mflag == True and (abs(new - x1)) >= (abs(x1 - x2) / 2))\n tt3 = (mflag == False and (abs(new - x1)) >= (abs(x2 - d) / 2))\n tt4 = (mflag == True and (abs(x1 - x2)) < tolerance)\n tt5 = (mflag == False and (abs(x2 - d)) < tolerance)\n if (tt1 or\n tt2 or\n tt3 or\n tt4 or\n tt5):\n new = (x0 + x1) / 2\n mflag = True\n \n else:\n mflag = False\n \n fnew = f(new)\n d, x2 = x2, x1\n \n if (fx0 * fnew) < 0:\n x1 = new\n else:\n x0 = new\n \n if abs(fx0) < abs(fx1):\n x0, x1 = x1, x0\n \n steps_taken += 1\n \n return x1, steps_taken", "def test_fix_fingers_w_thresh(self):\n\t\tdetails = self.watcher.analyze(layers=[17], fix_fingers='clip_xmax', finger_thresh=10.0)\n\t\tactual_alpha = details.alpha.to_numpy()[0]\n\t\tactual_raw_alpha = details.raw_alpha.to_numpy()[0]\n\t\tactual_num_fingers = details.num_fingers.to_numpy()[0]\n\n\t\texpected_alpha = 6.883742\n\t\texpected_raw_alpha = expected_alpha\n\t\texpected_num_fingers = 0\n\t\tself.assertAlmostEqual(actual_alpha,expected_alpha, delta=0.1 )\n\t\tself.assertAlmostEqual(actual_raw_alpha,expected_raw_alpha, delta=0.01 )\n\t\tself.assertEqual(actual_num_fingers,expected_num_fingers)", "def test_GBL_tau_star():\n z = 1.0\n\n # Fully ionized H and He\n x_ionH = 1.0\n x_ionHe = 2.0\n\n cosmo = {}\n cosmo['omega_M_0'] = numpy.array([[0.3],[0.6],[1.0]])\n cosmo['omega_lambda_0'] = 1. - cosmo['omega_M_0']\n cosmo['h'] = 0.65\n cosmo['omega_b_0'] = 0.02 / cosmo['h']**2.\n cosmo['Y_He'] = 0.24\n cd.set_omega_k_0(cosmo)\n\n tau_inst, tau_star = cr.optical_depth_instant(z, \n x_ionH=x_ionH, \n x_ionHe=x_ionHe, \n return_tau_star=True,\n **cosmo)\n print(\"tau_star = %.7f\" % (tau_star))\n print(\"tau_star/(h Omega_b) = %.7f =? 0.061\" % \n (tau_star / (cosmo['h'] * cosmo['omega_b_0'])))\n\n ntest.assert_approx_equal(tau_star / (cosmo['h'] * cosmo['omega_b_0']),\n 0.061,\n 2)\n\n print(\"(1 - Y_He/2) = %.3f =? 0.88\" % (1. - (cosmo['Y_He']/2.)))\n ntest.assert_approx_equal((1. - (cosmo['Y_He']/2.)),\n 0.88,\n 7)\n\n H_0 = cc.H100_s * cosmo['h']\n\n # s^-1 * Mpc s^-1 * Mpc^2 / Mpc^3 msun^-1 s^-2 / Msun -> \n tau_star_explicit = ((1. - (cosmo['Y_He']/2.)) * \n ((3. * H_0 * cosmo['omega_b_0'] * cc.c_light_Mpc_s *\n cc.sigma_T_Mpc) / \n (8. * math.pi * cc.G_const_Mpc_Msun_s * \n (cc.m_p_g/cc.M_sun_g))))\n\n print(\"tau_star_explicit = %.7f =? tau_star\" % (tau_star_explicit))\n ntest.assert_approx_equal(tau_star, tau_star_explicit, 3)", "def test_positive_definite2(dist, alpha, divergence):\n assert divergence(dist, dist, alpha, rvs=[0, 1]) == pytest.approx(0)\n assert hellinger_sum(dist, dist, alpha, rvs=[0, 1]) == pytest.approx(1)", "def close(a,b):\n return abs(a-b) < epsilon", "def test_moist_lapse_nan_ref_press():\n temp = moist_lapse(40 * units.hPa, -20 * units.degC, np.nan * units.hPa)\n assert_nan(temp, units.degC)", "def Catch(X,Tolerance=0):\n if X < (.5-(Tolerance/2)):\n return(0)\n elif X > (.5+(Tolerance/2)):\n return(1)\n else:\n return(.5)", "def heron(num: int):\n try:\n if num < 0:\n raise ZeroDivisionError\n else:\n guess = num\n tolerance = 0.00001\n while abs(guess * guess - num) > tolerance: # continue finding a more accurate guess if the guess is not\n # close enough to the true root, as defined by the tolerance\n guess = (guess + num / guess) / 2 # this formula is a step towards finding the square root of a number\n return float(round(guess, 2))\n except ZeroDivisionError:\n print(\"You have entered a negative integer, that is not valid.\")\n return -1", "def epsilon_delta(self):" ]
[ "0.6039334", "0.5930424", "0.5919679", "0.5912854", "0.58305484", "0.5801236", "0.57621497", "0.5761265", "0.57383764", "0.57260656", "0.5721062", "0.5718691", "0.57167894", "0.5701574", "0.5683872", "0.56769484", "0.56751597", "0.56513155", "0.56409764", "0.5639347", "0.5600166", "0.5595595", "0.55947536", "0.55648094", "0.5562508", "0.554382", "0.553768", "0.55016816", "0.5493007", "0.5487738", "0.54811543", "0.54670274", "0.5465295", "0.54636616", "0.5462555", "0.54621357", "0.54403603", "0.54338336", "0.5430973", "0.54275393", "0.5416691", "0.5403474", "0.54013175", "0.5393482", "0.53908914", "0.53684443", "0.5367628", "0.5366582", "0.5362684", "0.53603977", "0.53571343", "0.5355136", "0.5353467", "0.53439564", "0.5338917", "0.5334645", "0.5330583", "0.53297484", "0.5327329", "0.5319604", "0.53176063", "0.5304038", "0.5300962", "0.5297576", "0.52794546", "0.5278923", "0.5278383", "0.5269465", "0.52635705", "0.5260979", "0.52565384", "0.52501845", "0.5246738", "0.52463764", "0.5246162", "0.5240646", "0.5240567", "0.52356464", "0.5235644", "0.5234966", "0.52333486", "0.5231699", "0.5230116", "0.5227097", "0.52270657", "0.5223344", "0.5217175", "0.5213797", "0.52130395", "0.5202438", "0.519991", "0.51971173", "0.51959497", "0.51901466", "0.5179456", "0.51790196", "0.51785815", "0.5164492", "0.5162965", "0.5161438", "0.51582825" ]
0.0
-1
Fail on ffs.open, but still readout flat.
def test_goto_field_boss_ffs_open_fails(self): sopTester.updateModel('mcp', TestHelper.mcpState['all_off']) cmdState = self.actorState.gotoField cmdState.reinitialize(self.cmd) self.cmd.failOn = 'mcp ffs.open' self._goto_field_boss(21, 102, 1, 1, cmdState, didFail=True, finish=True)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_fileobj_not_closed(self):\n\n f = open(self.data('test0.fits'), 'rb')\n data = fits.getdata(f)\n assert not f.closed\n\n f.seek(0)\n header = fits.getheader(f)\n assert not f.closed", "def test_fileobj_not_closed(self):\n\n f = open(self.data(\"test0.fits\"), \"rb\")\n _ = fits.getdata(f)\n assert not f.closed\n\n f.seek(0)\n _ = fits.getheader(f)\n assert not f.closed\n\n f.close() # Close it now", "def test_file_readas_missing_file(self):\n with (self.assertRaises(IOError)):\n FileReader(self.bogus_path).read_as(\"utf-8\")", "def test_file_read_bin_missing_file(self):\n with (self.assertRaises(IOError)):\n FileReader(self.bogus_path).read_bin()", "def test_read_raw_unsupported_multi(fname, tmp_path):\n fname = tmp_path / fname\n fname.write_text('')\n with pytest.raises(RuntimeError, match='Could not read.*using any'):\n read_raw(fname)", "def test_file_read_utf8_missing_file(self):\n with (self.assertRaises(IOError)):\n FileReader(self.bogus_path).read_utf8()", "def test_file_readlines_utf8_missing_file(self):\n with (self.assertRaises(IOError)):\n FileReader(self.bogus_path).readlines_utf8()", "def test_read_raw_suggested(fname):\n with pytest.raises(ValueError, match='Try reading'):\n read_raw(fname)", "def test_get_all_read_fail(self):\n file_handler = open(self.test_task_filename, 'w')\n file_handler.write('Mock corrupt data')\n file_handler.close()\n os.chmod(self.test_task_filename, 000)\n\n self.assertRaises(IOError, self.task_storage.get_all)", "def test_is_not_delicious_file(self):\r\n bad_file = StringIO.StringIO()\r\n bad_file.write('failing tests please')\r\n bad_file.seek(0)\r\n\r\n self.assertTrue(\r\n not DelXMLImporter.can_handle(bad_file),\r\n \"DelXMLImporter cannot handle this file\")\r\n\r\n bad_file.close()", "def test_is_not_delicious_file(self):\r\n bad_file = StringIO.StringIO()\r\n bad_file.write('failing tests please')\r\n bad_file.seek(0)\r\n\r\n self.assertTrue(\r\n not DelImporter.can_handle(bad_file),\r\n \"DelImporter cannot handle this file\")\r\n\r\n bad_file.close()", "def test_file_read_missing_file(self):\n with (self.assertRaises(IOError)):\n FileReader(self.bogus_path).read()", "def attempt_file_reset(f):\r\n if hasattr(f, 'seek'):\r\n f.seek(0)", "def test_read_fail2(self):\n mod_fn = os.path.join(tests.TEST_DATA_PATH, 'segmentations', 'test_bad_data2.mod')\n with self.assertRaises(ValueError):\n modreader.get_data(mod_fn) # missing 'IEOF' end", "def test_empty_stream(self):\r\n mock_file = MockFile([b\"\"])\r\n for _ in BinaryIOStream(mock_file):\r\n # This should never happen\r\n self.assertFalse(True)", "def eofTestHelperStress(self, pfactory):\n # TODO: we should make sure this covers more of the code paths\n\n for i in xrange(0, len(self.data) + 1):\n trans = TTransport.TMemoryBuffer(self.data[0:i])\n prot = pfactory.getProtocol(trans)\n try:\n x = Xtruct()\n x.read(prot)\n x.read(prot)\n x.read(prot)\n except EOFError:\n continue\n self.fail(\"Should have gotten an EOFError\")", "def test_fileio(tmp_path):\n async def aiotests():\n fio = fileio.FileIO(Path(tmp_path))\n\n with pytest.raises(PermissionError):\n fio.resolve_and_sanitize('../abba123')\n\n with pytest.raises(PermissionError):\n fio.resolve_and_sanitize('/abba123')\n\n with pytest.raises(FileNotFoundError):\n fio.resolve_and_sanitize('abba123', must_exist=True)\n\n with pytest.raises(FileNotFoundError):\n fio.open_and_seek('abba123', 0, for_write=False)\n\n with pytest.raises(PermissionError):\n fio.open_and_seek('../abba123', 0, for_write=False)\n\n with pytest.raises(aiohttp.web.HTTPForbidden):\n await fio.upload_chunk(chunker.FileChunk(path='../abba1234', pos=0, size=123, cmpratio=1, hash='abcdef1234'), None)\n\n with pytest.raises(aiohttp.web.HTTPInternalServerError):\n await fio.upload_chunk(chunker.FileChunk(path=False, pos=0, size=123, cmpratio=1, hash='abcdef1234'), None)\n\n assert await fio.copy_chunk_locally(\n chunker.FileChunk(path='abba1234', pos=0, size=123, cmpratio=1, hash='abcdef1234'),\n chunker.FileChunk(path='abba1234', pos=0, size=123, cmpratio=1, hash='abcdef1234')) is True\n\n with pytest.raises(ValueError):\n assert await fio.copy_chunk_locally(\n chunker.FileChunk(path='abba1234', pos=0, size=123, cmpratio=1, hash='abcdef1234'),\n chunker.FileChunk(path='asdfklna', pos=0, size=123, cmpratio=1, hash='ABABABABAB'))\n\n assert fio.try_precreate_large_sparse_file('sparsefiletest.bin', 1234) == fio.resolve_and_sanitize('sparsefiletest.bin').exists()\n await fio.remove_file_and_paths('sparsefiletest.bin')\n\n asyncio.run(aiotests())", "def test_ioerror_buffer_position(self):\n bytes = pyamf.encode(u'foo', [1, 2, 3], encoding=pyamf.AMF3).getvalue()\n\n self.buf.write(bytes[:-1])\n self.buf.seek(0)\n\n self.decoder.readElement()\n self.assertEqual(self.buf.tell(), 5)\n\n self.assertRaises(IOError, self.decoder.readElement)\n self.assertEqual(self.buf.tell(), 5)", "def test_issue_172_1():\n input_data = 8 * os.urandom(1024)\n with lz4.frame.open('testfile_small', 'wb') as fp:\n bytes_written = fp.write(input_data) # noqa: F841\n\n with lz4.frame.open('testfile_small', 'rb') as fp:\n data = fp.read(10)\n assert len(data) == 10", "def test_open_by_unnamed_fobj(self):\n with open(ZIPPATH, 'rb') as zf:\n with io.FileIO(zf.fileno(), mode='r', closefd=False) as f:\n self._test_listing_content(f)", "def test_read_EOF(demo_data):\n\n openeeg = openEDF(demo_data)\n start = max(openeeg.header.samples) + 1\n arr = openeeg.read(start, start+100)\n assert arr.size == 0\n\n openeeg.close()", "def test_cantChangeFileMode(self):\n if runtime.platform.isWindows():\n name, directory = \"NUL\", \"\"\n expectedPath = \"NUL\"\n else:\n name, directory = \"null\", \"/dev\"\n expectedPath = \"/dev/null\"\n\n log = logfile.LogFile(name, directory, defaultMode=0o555)\n self.addCleanup(log.close)\n\n self.assertEqual(log.path, expectedPath)\n self.assertEqual(log.defaultMode, 0o555)", "def test_empty_file(self):\n with open(os.path.join(test_dir, \"empty_file.txt\")) as f:\n for idx, line in enumerate(reverse_readline(f)):\n raise ValueError(\"an empty file is being read!\")", "def test_file_readlines_missing_file(self):\n with (self.assertRaises(IOError)):\n FileReader(self.bogus_path).readlines()", "def rewind(f):\n\tf.seek(0)", "def test_garbage_stream(self):\r\n valid: bytes = b\"!AIVDM,1,1,,B,B43JRq00LhTWc5VejDI>wwWUoP06,0*29\"\r\n mock_file = MockFile([b\"Foo\", b\"Bar\", b\"1337\", valid])\r\n for msg in BinaryIOStream(mock_file):\r\n self.assertEqual(msg.raw, valid)", "def test_cache_pollution(self):\n with self._test_checksum_setup(self.tempdir.name) as setupdata:\n filename, data, expected_checksum = setupdata\n\n # corrupt the file\n with open(os.path.join(self.tempdir.name, filename), \"r+b\") as fh:\n fh.seek(0)\n real_first_byte = fh.read(1).decode(\"latin-1\")\n fh.seek(0)\n fh.write(chr(ord(real_first_byte) ^ 0xff).encode(\"latin-1\"))\n\n with self.assertRaises(ChecksumValidationError):\n with self.caching_backend.read_contextmanager(filename, expected_checksum) as cm:\n self.assertEqual(cm.read(), data)\n\n # un-corrupt the file\n with open(os.path.join(self.tempdir.name, filename), \"r+b\") as fh:\n fh.seek(0)\n real_first_byte = fh.read(1).decode(\"latin-1\")\n fh.seek(0)\n fh.write(chr(ord(real_first_byte) ^ 0xff).encode(\"latin-1\"))\n\n with self.caching_backend.read_contextmanager(filename, expected_checksum) as cm:\n self.assertEqual(cm.read(), data)", "def test_read_raw_unsupported_single(fname):\n with pytest.raises(ValueError, match='Unsupported file type'):\n read_raw(fname)", "def test_failToOpenLocalFile(self):\n fp = FilePath(self.mktemp()).child(\"child-with-no-existing-parent\")\n\n self.assertRaises(IOError, self.makeConnectedDccFileReceive, fp.path)", "def test_find_read_fail(self):\n file_handler = open(self.test_task_filename, 'w')\n file_handler.write('Mock corrupt data')\n file_handler.close()\n os.chmod(self.test_task_filename, 000)\n\n self.assertRaises(IOError, self.task_storage.find, self.my_task)", "def test_file_read_gzip_missing_file(self):\n with (self.assertRaises(IOError)):\n FileReader(self.bogus_path).read_gzip()", "def open_from(self, f: BinaryIO):\n raise NotImplementedError", "def test_empty_file(self):\n\n temp = tempfile.NamedTemporaryFile()\n temp.flush()\n self.assertRaises(MalformedFileError, NBTFile, temp.name)", "def rewind(f):\n f.seek(0)", "def test_get_read_fail(self):\n file_handler = open(self.test_key_filename, 'w')\n file_handler.write('Mock corrupt data')\n file_handler.close()\n os.chmod(self.test_key_filename, 000)\n\n self.assertRaises(IOError, self.key_gen.get)", "def read_fd_decode_safely(fd, size=4096):\n data = os.read(fd.fileno(), size)\n for _ in range(3):\n try:\n return data, data.decode(\"utf-8\")\n except UnicodeDecodeError as e:\n if e.reason != \"unexpected end of data\":\n raise\n data += os.read(fd.fileno(), 1)\n\n return data, data.decode(\"utf-8\")", "def test_level2_fobj(filename, use_seek):\n f = get_test_data(filename)\n if not use_seek:\n class SeeklessReader:\n \"\"\"Simulate file-like object access without seek.\"\"\"\n\n def __init__(self, f):\n self._f = f\n\n def read(self, n=None):\n \"\"\"Read bytes.\"\"\"\n return self._f.read(n)\n\n f = SeeklessReader(f)\n Level2File(f)", "def test_read_fail1(self):\n mod_fn = os.path.join(tests.TEST_DATA_PATH, 'segmentations', 'test_bad_data1.mod')\n with self.assertRaises(ValueError):\n modreader.get_data(mod_fn) # missing 'IMOD' start", "def test_no_eof(self):", "def test_file_unused(self):\n try:\n with get_temp_file() as (fd, name):\n pass\n except Exception as err:\n self.fail('Failed with exception \"{}\"'.format(err))\n else:\n file_exists = os.access(name, os.F_OK)\n self.assertFalse(file_exists)", "def _barf_if_closed(self):\n if self.closed:\n raise ValueError(\"I/O operation on closed file\")", "def _check_truncation(self):\n\n temp_pos = self._handle.tell()\n self._handle.seek(-28, 2)\n eof = self._handle.read()\n self._handle.seek(temp_pos)\n if eof == _bgzf_eof:\n return False\n else:\n warnings.BytesWarning('No EOF character found. File may be truncated')\n return True", "def test_not_implemented():\n for not_implemented_ext in ['.mef', '.nwb']:\n data_path = _TempDir()\n raw_fname = op.join(data_path, 'test' + not_implemented_ext)\n with open(raw_fname, 'w', encoding='utf-8'):\n pass\n with pytest.raises(ValueError, match=('there is no IO support for '\n 'this file format yet')):\n _read_raw(raw_fname)", "def __init__(self, fname, endian='<', checksum_size=None, debug_level=0):\n self.endian = endian\n self.f = open(expanduser(fname), 'rb')\n self.f.seek(0, 2)\n self.fsize = self.tell()\n self.f.seek(0, 0)\n self.close = self.f.close\n if checksum_size:\n pass\n #self.cs = checksum(self, 0, checksum_size)\n else:\n self.cs = checksum_size\n self.debug_level = debug_level", "def test_read():\n f = open('test', mode='r')\n line = f.read()\n f.close()", "def test_fastq_output(self):\r\n self.assertRaises(IOError, convert_fastq, self.fasta_file_path,\r\n self.qual_file_path, output_directory=self.read_only_output_dir)", "def _verify_descriptors(self, msg):\n self.assertTrue(is_writable_file(msg.chlderr))\n self.assertTrue(is_writable_file(msg.chldout))\n self.assertTrue(is_writable_file(msg.chldnul))", "def test_malformed(self):\n fdesc, fname = tempfile.mkstemp()\n tfile = os.fdopen(fdesc, 'w')\n tfile.write(self.file_str2)\n tfile.close()\n assert_raises(Exception, grades.writers.GradesFile, fname)\n os.unlink(fname)", "def test_noPermission(self):\n log = logfile.LogFile(self.name, self.dir)\n self.addCleanup(log.close)\n log.write(\"abc\")\n\n # change permissions so rotation would fail\n os.chmod(self.dir, 0o555)\n\n # if this succeeds, chmod doesn't restrict us, so we can't\n # do the test\n try:\n f = open(os.path.join(self.dir, \"xxx\"), \"w\")\n except OSError:\n pass\n else:\n f.close()\n return\n\n log.rotate() # this should not fail\n\n log.write(\"def\")\n log.flush()\n\n f = log._file\n self.assertEqual(f.tell(), 6)\n f.seek(0, 0)\n self.assertEqual(f.read(), b\"abcdef\")", "def _unblock_open_fifo_operation(self) -> None:\n if os.path.exists(self._fifo_out_path):\n open(self._fifo_out_path, 'wb', buffering=0)\n if os.path.exists(self._fifo_in_path):\n open(self._fifo_in_path, 'rb', buffering=0)", "def test_fiberflat_rw(self):\n from ..fiberflat import FiberFlat\n from ..io.fiberflat import read_fiberflat, write_fiberflat\n nspec, nwave, ndiag = 10, 20, 3\n flat = np.random.uniform(size=(nspec, nwave))\n ivar = np.random.uniform(size=(nspec, nwave))\n mask = np.zeros(shape=(nspec, nwave), dtype=int)\n meanspec = np.random.uniform(size=(nwave,))\n wave = np.arange(nwave)\n\n ff = FiberFlat(wave, flat, ivar, mask, meanspec)\n\n write_fiberflat(self.testfile, ff)\n xff = read_fiberflat(self.testfile)\n\n self.assertTrue(np.all(ff.fiberflat.astype('f4').astype('f8') == xff.fiberflat))\n self.assertTrue(np.all(ff.ivar.astype('f4').astype('f8') == xff.ivar))\n self.assertTrue(np.all(ff.mask == xff.mask))\n self.assertTrue(np.all(ff.meanspec.astype('f4').astype('f8') == xff.meanspec))\n self.assertTrue(np.all(ff.wave.astype('f4').astype('f8') == xff.wave))\n\n self.assertTrue(xff.fiberflat.dtype.isnative)\n self.assertTrue(xff.ivar.dtype.isnative)\n self.assertTrue(xff.mask.dtype.isnative)\n self.assertTrue(xff.meanspec.dtype.isnative)\n self.assertTrue(xff.wave.dtype.isnative)", "def test_fasta_file(self):\r\n self.assertRaises(IOError, convert_fastaqual_fastq,\r\n self.read_only_output_dir, self.qual_file_path)", "def test_filter_sff_file(self):\r\n\r\n try:\r\n fh = open(self.tiny_test)\r\n except IOError:\r\n self.fail(\r\n \"Could not open test file %s. Skipping test\" %\r\n self.tiny_test)\r\n\r\n # With no filters all flowgram should be in out file\r\n flowgrams, header = lazy_parse_sff_handle(fh)\r\n filter_list = []\r\n fd, out_file_name = mkstemp(\r\n prefix=\"test_filter_sff_file\",\r\n suffix=\".sff.txt\")\r\n close(fd)\r\n out_fh = open(out_file_name, \"w\")\r\n l = filter_sff_file(flowgrams, header, filter_list, out_fh)\r\n remove(out_file_name)\r\n fh.close()\r\n self.assertEqual(l, 114)\r\n\r\n # With good filters some should survive\r\n fh = open(self.tiny_test)\r\n flowgrams, header = lazy_parse_sff_handle(fh)\r\n filter_list = [lambda f:within_length(f, 100, 300)]\r\n fd, out_file_name = mkstemp(\r\n prefix=\"test_filter_sff_file\",\r\n suffix=\".sff.txt\")\r\n close(fd)\r\n out_fh = open(out_file_name, \"w\")\r\n l = filter_sff_file(flowgrams, header, filter_list, out_fh)\r\n remove(out_file_name)\r\n fh.close()\r\n self.assertEqual(l, 112)\r\n\r\n # With strong filters nothing should be in\r\n fh = open(self.tiny_test)\r\n flowgrams, header = lazy_parse_sff_handle(fh)\r\n filter_list = [lambda f:within_length(f, 0, 0)]\r\n fd, out_file_name = mkstemp(\r\n prefix=\"test_filter_sff_file\",\r\n suffix=\".sff.txt\")\r\n close(fd)\r\n out_fh = open(out_file_name, \"w\")\r\n l = filter_sff_file(flowgrams, header, filter_list, out_fh)\r\n remove(out_file_name)\r\n self.assertEqual(l, 0)", "def test_doubled_file():\n with contextlib.closing(get_test_data('Level2_KFTG_20150430_1419.ar2v')) as infile:\n data = infile.read()\n fobj = BytesIO(data + data)\n f = Level2File(fobj)\n assert len(f.sweeps) == 12", "def read(fname):\n f = fabio.open(fname)\n data = f.data\n del f; # close file\n return data", "def test_do_not_load_in_child_dir(self, tmp_path):\n nested_directory = tmp_path / os.path.join(\"a\", \"b\", \"c\")\n os.makedirs(nested_directory, exist_ok=True)\n\n # Create a FF in a nested directory\n ForceField(\"openff-1.0.0.offxml\").to_file(\n os.path.join(nested_directory, \"force-field.offxml\")\n )\n\n # Check that the file does not exist in the current working directory.\n assert not os.path.isfile(\"force-field.offxml\")\n\n with pytest.raises(\n OSError, match=\"Source 'force-field.offxml' could not be read.\"\n ):\n ForceField(\"force-field.offxml\")", "def seekable(self):\n ...", "def test_from_file_fail(self):\n with TemporaryDirectory() as tmp:\n fp = os.path.join(tmp, \"test.txt\")\n open(fp, 'a').close()\n assert os.path.exists(fp)\n with self.assertRaises(TypeError):\n BaseDataClass.from_file(fp)", "def test_init_flowgram_file(self):\r\n fh, tmp_filename = init_flowgram_file(n=100, l=400)\r\n self.assert_(exists(tmp_filename))\r\n self.tmp_filename = tmp_filename\r\n fh.close()\r\n result_file_content = list(open(tmp_filename))\r\n\r\n self.assertEqual(result_file_content, [\"100 400\\n\"])", "def test_as_file_false(self):\n with TemporaryDirectory() as tmp:\n # define path to file\n fp = os.path.join(tmp, \"asdf.txt\")\n\n # invoke atomic_write with param as_file set to False\n # this should return a temporary file path string\n with atomic_write(fp, as_file=False) as f:\n self.assertIsInstance(f, str)", "def test_get_filled_attributes_from_file_non_existent_file(tmp_path):\n assert get_filled_attributes_from_file(tmp_path / \"does not exist\") == frozenset([])", "def test_add_read_fail(self):\n file_handler = open(self.test_task_filename, 'w')\n file_handler.write('Mock corrupt data')\n file_handler.close()\n os.chmod(self.test_task_filename, 000)\n\n self.assertRaises(IOError, self.task_storage.add, self.my_task)", "def test_noPermission(self):\n log = logfile.LogFile(self.name, self.dir)\n log.write(\"abc\")\n\n # change permissions so rotation would fail\n os.chmod(self.dir, 0555)\n\n # if this succeeds, chmod doesn't restrict us, so we can't\n # do the test\n try:\n f = open(os.path.join(self.dir,\"xxx\"), \"w\")\n except (OSError, IOError):\n pass\n else:\n f.close()\n return\n\n log.rotate() # this should not fail\n\n log.write(\"def\")\n log.flush()\n\n f = log._file\n self.assertEquals(f.tell(), 6)\n f.seek(0, 0)\n self.assertEquals(f.read(), \"abcdef\")\n log.close()", "def test_read_not_interested(self):\n try:\n self.reader.read(self.books[2], 0, 0)\n self.fail(\"Readed book not interested\")\n except AssertionError:\n pass", "def uninitializedRead(self, address: ghidra.program.model.address.Address, size: int, buf: List[int], bufOffset: int) -> bool:\n ...", "def test_open_read(self, remote_mock_dir):\n\n file_path = posixpath.join(remote_mock_dir, \"test.txt\")\n with HdfsHook() as hook:\n with hook.open(file_path) as file_:\n content = file_.read()\n assert content == b\"Test file\\n\"", "def test_nonfile(self):\n self.assertEqual(None,readfiles.read_file(\"tests.txt))", "def safe_fd(fd):\n toclose = []\n try:\n while fd < 3:\n toclose.append(fd)\n fd = _os.dup(fd)\n finally:\n for dfd in toclose:\n try:\n _os.close(dfd)\n except OSError:\n pass\n return fd", "def test_close_stream_first(self):\n z = ZipFile(self.f, 'r')\n stream = z.readstream(FILENAMES[0])\n z.close()\n try:\n stream.read()\n except:\n self.fail(\"Reading stream from closed archive failed!\")\n stream.close()\n # Now the archive should close.\n self.assertIsNone(z._a)\n self.assertTrue(stream.closed)\n self.assertIsNone(z._stream)", "def test_bug1003(capfd):\n input_dir = get_input_path('bug1003')\n temp_dir = get_temp_dir_path('bug1003var')\n copytree(input_dir, temp_dir)\n ds_path = os.path.join(temp_dir, 'bug1003.designspace')\n runner(CMD + ['-o', 'd', f'_{ds_path}'])\n captured = capfd.readouterr()\n assert \"The input set requires compatibilization\" in captured.err", "def test_read_components_from_rulefile_rulefile_not_specified2(tmp_path):\n with pytest.raises(SystemExit):\n _read_components_from_rulefile()", "def test_open_fill(self):", "def test_readbadformat(self):\n\n self.assertRaises(ParseError, self.hw, self.badfile)", "def test_file_open_bug():\n \n value = Value('test', context, 'reentrant_test', clsmap['file'], data_dir='./cache')\n \n try:\n os.remove(value.namespacemanager.file)\n except OSError:\n pass\n \n value.set_value(\"x\")\n\n f = open(value.namespacemanager.file, 'w')\n f.write(\"BLAH BLAH BLAH\")\n f.close()\n \n # TODO: do we have an assertRaises() in nose to use here ?\n try:\n value.set_value(\"y\")\n assert False\n except:\n pass\n \n _synchronizers.clear()\n context.clear()\n value = Value('test', context, 'reentrant_test', clsmap['file'], data_dir='./cache')\n\n # TODO: do we have an assertRaises() in nose to use here ?\n try:\n value.set_value(\"z\")\n assert False\n except:\n pass", "def test_read_EOF2(demo_data):\n\n openeeg = openEDF(demo_data)\n #read 200 samples starting from 100 samples before EOF\n start = max(openeeg.header.samples) - 100\n arr = openeeg.read(start, start + 200)\n assert arr.shape[-1] == 100\n\n openeeg.close()", "def readinto(self, buf: bytes, /) -> Optional[int]:", "def test_read_0_2_smirnoff(self):\n ForceField(\n get_data_file_path(\n \"test_forcefields/smirnoff99Frosst_reference_0_2_spec.offxml\"\n )\n )", "def test_fileDoesNotExist(self):\n fp = FilePath(self.mktemp())\n protocol = self.makeConnectedDccFileReceive(fp.path)\n\n self.allDataReceivedForProtocol(protocol, b\"I <3 Twisted\")\n\n self.assertEqual(fp.getContent(), b\"I <3 Twisted\")", "def testTransportReadAll(self):\n trans = TTransport.TMemoryBuffer(self.data)\n trans.readAll(1)\n\n try:\n trans.readAll(10000)\n except EOFError:\n return\n\n self.fail(\"Should have gotten EOFError\")", "def close_datafile(fs):\r\n fs.close() # fs is the output from init_datafile\r", "def test_qual_file(self):\r\n self.assertRaises(IOError, convert_fastaqual_fastq,\r\n self.fasta_file_path, self.read_only_output_dir)", "def testBinaryProtocolAcceleratedEof(self):\n self.eofTestHelper(TBinaryProtocol.TBinaryProtocolAcceleratedFactory())\n self.eofTestHelperStress(TBinaryProtocol.TBinaryProtocolAcceleratedFactory())", "def test_read_raw_supported(fname):\n read_raw(fname)\n read_raw(fname, verbose=False)\n raw = read_raw(fname, preload=True)\n assert \"data loaded\" in str(raw)", "def test_fortran_reader_notbasic():\n\n tabstr = dedent(\n \"\"\"\n a b\n 1 1.23D4\n 2 5.67D-8\n \"\"\"\n )[1:-1]\n\n t1 = ascii.read(tabstr.split(\"\\n\"), fast_reader={\"exponent_style\": \"D\"})\n\n assert t1[\"b\"].dtype.kind == \"f\"\n\n tabrdb = dedent(\n \"\"\"\n a\\tb\n # A simple RDB table\n N\\tN\n 1\\t 1.23D4\n 2\\t 5.67-008\n \"\"\"\n )[1:-1]\n\n t2 = ascii.read(\n tabrdb.split(\"\\n\"), format=\"rdb\", fast_reader={\"exponent_style\": \"fortran\"}\n )\n\n assert t2[\"b\"].dtype.kind == \"f\"\n\n tabrst = dedent(\n \"\"\"\n = =======\n a b\n = =======\n 1 1.23E4\n 2 5.67E-8\n = =======\n \"\"\"\n )[1:-1]\n\n t3 = ascii.read(tabrst.split(\"\\n\"), format=\"rst\")\n\n assert t3[\"b\"].dtype.kind == \"f\"\n\n t4 = ascii.read(tabrst.split(\"\\n\"), guess=True)\n\n assert t4[\"b\"].dtype.kind == \"f\"\n\n # In the special case of fast_converter=True (the default),\n # incompatibility is ignored\n t5 = ascii.read(tabrst.split(\"\\n\"), format=\"rst\", fast_reader=True)\n\n assert t5[\"b\"].dtype.kind == \"f\"\n\n with pytest.raises(ParameterError):\n ascii.read(tabrst.split(\"\\n\"), format=\"rst\", guess=False, fast_reader=\"force\")\n\n with pytest.raises(ParameterError):\n ascii.read(\n tabrst.split(\"\\n\"),\n format=\"rst\",\n guess=False,\n fast_reader={\"use_fast_converter\": False},\n )\n\n tabrst = tabrst.replace(\"E\", \"D\")\n\n with pytest.raises(ParameterError):\n ascii.read(\n tabrst.split(\"\\n\"),\n format=\"rst\",\n guess=False,\n fast_reader={\"exponent_style\": \"D\"},\n )", "def test_corrupt_dataofs(logger):\n packet = layers.packet.Packet(IP(src=\"127.0.0.1\", dst=\"127.0.0.1\")/TCP(sport=2222, dport=3333, seq=100, ack=100, flags=\"S\", dataofs=\"6L\"))\n original = copy.deepcopy(packet)\n tamper = actions.tamper.TamperAction(None, field=\"dataofs\", tamper_type=\"corrupt\")\n\n tamper.tamper(packet, logger)\n\n # Confirm tamper actually corrupted the checksum\n assert packet[TCP].dataofs != \"0\"\n new_value = packet[TCP].dataofs\n\n # Must run this check repeatedly - if a scapy fuzz-ed value is not properly\n # ._fix()-ed, it will return different values each time it's requested\n for _ in range(0, 5):\n assert packet[TCP].dataofs == new_value, \"Corrupted value is not stable\"\n\n # Confirm tamper didn't corrupt anything else in the TCP header\n assert confirm_unchanged(packet, original, TCP, [\"dataofs\"])\n\n # Confirm tamper didn't corrupt anything in the IP header\n assert confirm_unchanged(packet, original, IP, [])", "def test_read_0_1_smirnoff(self):\n ForceField(\n get_data_file_path(\n \"test_forcefields/smirnoff99Frosst_reference_0_1_spec.offxml\"\n )\n )", "def test_LogReaderReadsZeroLine(self):\n # We don't need any content, just a file path that can be opened.\n with open(self.path, \"w\"):\n pass\n\n reader = logfile.LogReader(self.path)\n self.addCleanup(reader.close)\n self.assertEqual([], reader.readLines(0))", "def test_fastaqual_output(self):\r\n self.assertRaises(IOError, convert_fastaqual, self.fasta_file_path,\r\n output_directory=self.read_only_output_dir)", "def test_unicode(self):\n name = unicode(os.path.basename(self.cbf_filename))\n obj = fabio.open(self.cbf_filename)\n obj.write(os.path.join(self.tempdir, name))\n other = fabio.open(os.path.join(self.tempdir, name))\n self.assertEqual(abs(obj.data - other.data).max(), 0, \"data are the same\")\n for key in obj.header:\n if key in[ \"filename\", \"X-Binary-Size-Padding\"]:\n continue\n self.assertTrue(key in other.header, \"Key %s is in header\" % key)\n self.assertEqual(obj.header[key], other.header[key], \"value are the same for key %s [%s|%s]\" % (key, obj.header[key], other.header[key]))", "def safe_open(fname, mode, buffering=-1):\n # file descriptors\n try:\n return open(fname, mode, buffering=buffering)\n except PermissionError as ex:\n raise xt.XonshError(f\"xonsh: {fname}: permission denied\") from ex\n except FileNotFoundError as ex:\n raise xt.XonshError(f\"xonsh: {fname}: no such file or directory\") from ex\n except Exception as ex:\n raise xt.XonshError(f\"xonsh: {fname}: unable to open file\") from ex", "def test_conversion_lowlevel(path):\n gff_file = gff.GFFFile.read(join(data_dir(\"sequence\"), path))\n ref_entries = [entry for entry in gff_file]\n\n gff_file = gff.GFFFile()\n for entry in ref_entries:\n gff_file.append(*entry)\n temp = TemporaryFile(\"w+\")\n gff_file.write(temp)\n\n temp.seek(0)\n gff_file = gff.GFFFile.read(temp)\n temp.close()\n test_entries = [field for field in gff_file]\n assert test_entries == ref_entries", "def test_supply_file(self):\n f = open(self.junk_file, 'w')\n f.close()\n self.assertRaises(argparse.ArgumentTypeError, generic.check_path, self.junk_file)", "def test_read_0_1_smirff(self):\n ForceField(\n get_data_file_path(\n \"test_forcefields/smirff99Frosst_reference_0_1_spec.offxml\"\n )\n )", "def testGetDataStream(self):\n path_spec = path_spec_factory.Factory.NewPathSpec(\n definitions.TYPE_INDICATOR_HFS,\n identifier=self._IDENTIFIER_ANOTHER_FILE,\n location='/a_directory/another_file',\n parent=self._raw_path_spec)\n file_entry = self._file_system.GetFileEntryByPathSpec(path_spec)\n self.assertIsNotNone(file_entry)\n\n data_stream = file_entry.GetDataStream('')\n self.assertIsNotNone(data_stream)\n\n path_spec = path_spec_factory.Factory.NewPathSpec(\n definitions.TYPE_INDICATOR_HFS, identifier=25,\n location='/a_directory/a_resourcefork', parent=self._raw_path_spec)\n file_entry = self._file_system.GetFileEntryByPathSpec(path_spec)\n self.assertIsNotNone(file_entry)\n\n data_stream = file_entry.GetDataStream('rsrc')\n self.assertIsNotNone(data_stream)", "def verify_fileobj(fileobj, writable=False):\n\n try:\n data = fileobj.read(0)\n except Exception:\n if not hasattr(fileobj, \"read\"):\n raise ValueError(\"%r not a valid file object\" % fileobj)\n raise ValueError(\"Can't read from file object %r\" % fileobj)\n\n if not isinstance(data, bytes):\n raise ValueError(\n \"file object %r not opened in binary mode\" % fileobj)\n\n if writable:\n try:\n fileobj.write(b\"\")\n except Exception:\n if not hasattr(fileobj, \"write\"):\n raise ValueError(\"%r not a valid file object\" % fileobj)\n raise ValueError(\"Can't write to file object %r\" % fileobj)", "def read(self, size=-1):\n ...", "def test_prep_sffs_in_dir_no_trim(self):\r\n output_dir = mkdtemp()\r\n gz_output_dir = mkdtemp()\r\n\r\n try:\r\n check_sffinfo()\r\n perform_test = True\r\n except:\r\n perform_test = False\r\n\r\n if perform_test:\r\n prep_sffs_in_dir(self.sff_dir, output_dir, make_flowgram=False,\r\n convert_to_flx=False, use_sfftools=True,\r\n no_trim=True)\r\n\r\n fna_fp = os.path.join(output_dir, 'test.fna')\r\n\r\n self.assertEqual(open(fna_fp).read(), fna_notrim_txt)\r\n\r\n qual_fp = os.path.join(output_dir, 'test.qual')\r\n self.assertEqual(open(qual_fp).read(), qual_notrim_txt)\r\n\r\n self.assertRaises(TypeError, \"gzipped SFF\", prep_sffs_in_dir,\r\n self.gz_sff_dir, gz_output_dir, make_flowgram=False,\r\n convert_to_flx=False, use_sfftools=True,\r\n no_trim=True)\r\n\r\n shutil.rmtree(output_dir)\r\n shutil.rmtree(gz_output_dir)", "def test_good_input():\n\n out_file = 'unclustered.fa'\n if os.path.isfile(out_file):\n os.remove(out_file)\n\n try:\n rv, out = getstatusoutput(f'{prg} -c {cdhit} -p {proteins}')\n assert rv == 0\n assert out == ('Wrote 309 of 220,520 unclustered '\n 'proteins to \"unclustered.fa\"')\n\n assert os.path.isfile(out_file)\n\n seqs = list(SeqIO.parse(out_file, 'fasta'))\n\n assert len(seqs) == 309\n finally:\n if os.path.isfile(out_file):\n os.remove(out_file)", "def test_fileinuse (self):\t\t\n\t\t# open a file:\n\t\ttestfilepath = os.path.join (self.testfolder, \"fileinuse.txt\")\n\t\tf = open(testfilepath,\"a\") #opens file with name of \"test.txt\"\n\t\tf.write(\"This file is now opened and i'm writting on it \\n\")\n\t\tself.assertEqual (MD.fileinuse(testfilepath), True) # Checks a file that it is beign written.\n\t\tself.assertEqual (MD.folderinuse(self.testfolder), True) # Checks if any file inside the folder is beign used.\n\t\tf.close()\n\t\tself.assertEqual (MD.fileinuse(testfilepath), False) # Cheks a file that it is closed.\n\t\tself.assertEqual (MD.folderinuse(self.testfolder), False)", "def read_and_unpack(self, fmt):\n try:\n return unpack(\n self.byte_order + fmt,\n self.read(calcsize(self.byte_order + fmt)))\n except Exception as e:\n if e.args[0].startswith('unpack requires a buffer of'):\n raise EOFError(e)\n else:\n raise" ]
[ "0.6067853", "0.59570915", "0.5916047", "0.5873482", "0.5731206", "0.57067436", "0.5683966", "0.5675375", "0.5667967", "0.5606461", "0.5593845", "0.557636", "0.556104", "0.5553911", "0.55134106", "0.5493101", "0.5419356", "0.5418688", "0.5415278", "0.5410945", "0.53708446", "0.53554213", "0.5353923", "0.5341025", "0.5335815", "0.5308719", "0.5305879", "0.529503", "0.5286936", "0.52552974", "0.52549446", "0.522892", "0.522241", "0.52190894", "0.5208856", "0.519914", "0.51810575", "0.51706123", "0.5170023", "0.5160514", "0.5157703", "0.51474035", "0.51467794", "0.51281923", "0.5126071", "0.5094816", "0.5073266", "0.50692946", "0.5055498", "0.5044338", "0.5038003", "0.50298226", "0.50290805", "0.5028495", "0.50222665", "0.50205314", "0.5020005", "0.50121886", "0.5010824", "0.49968317", "0.4980761", "0.49775398", "0.4975638", "0.49726763", "0.49697733", "0.49649665", "0.49615636", "0.49597025", "0.49581894", "0.4937652", "0.49274033", "0.49250996", "0.49224308", "0.49157324", "0.49020436", "0.4899269", "0.48950526", "0.48834905", "0.48825037", "0.486239", "0.48560664", "0.48537058", "0.48506728", "0.4850396", "0.48503083", "0.48493025", "0.48431367", "0.48429972", "0.48422137", "0.48411658", "0.48378643", "0.48365957", "0.48299414", "0.48230642", "0.48139456", "0.48126745", "0.48100168", "0.4807779", "0.48005205", "0.47998807" ]
0.48357132
92
Tests gotoField if there is a mismatch between MCP and guider.
def test_goto_field_cartridge_mismatch(self): sopTester.updateModel('guider', TestHelper.guiderState['bossLoaded']) mcpState = TestHelper.mcpState['boss_science'] mcpState.update({'instrumentNum': [15]}) sopTester.updateModel('mcp', mcpState) cmdState = self.actorState.gotoField cmdState.reinitialize(self.cmd) masterThread.goto_field(self.cmd, cmdState, myGlobals.actorState) self._check_cmd(0, 14, 0, 0, finish=True, didFail=True)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_goto_field_apogee_no_guider(self):\n cmdState = self.actorState.gotoField\n cmdState.reinitialize(self.cmd)\n cmdState.doGuider = False\n self._goto_feld_apogee(3, 11, 0, 0, cmdState)", "def test_goto_field_apogee(self):\n cmdState = self.actorState.gotoField\n cmdState.reinitialize(self.cmd)\n self._goto_feld_apogee(13, 46, 0, 0, cmdState)", "def test_goto_field_boss_flat_on_fails(self):\n sopTester.updateModel('mcp', TestHelper.mcpState['all_off'])\n cmdState = self.actorState.gotoField\n cmdState.reinitialize(self.cmd)\n self.cmd.failOn = 'mcp ff.on'\n self._goto_field_boss(16, 71, 0, 1, cmdState, didFail=True, finish=True)", "def test_goto_field_boss_guider(self):\n sopTester.updateModel('mcp', TestHelper.mcpState['all_off'])\n sopTester.updateModel('guider', TestHelper.guiderState['guiderOnDecenter'])\n cmdState = self.actorState.gotoField\n cmdState.reinitialize(self.cmd)\n cmdState.doSlew = False\n cmdState.doHartmann = False\n cmdState.doCalibs = False\n cmdState.arcTime = 0\n cmdState.flatTime = 0\n self._goto_field_boss(9, 37, 0, 0, cmdState)", "def test_goto_field_apogee_no_slew(self):\n cmdState = self.actorState.gotoField\n cmdState.reinitialize(self.cmd)\n cmdState.doSlew = False\n self._goto_feld_apogee(9, 37, 0, 0, cmdState)", "def test_goto_field_boss_ffs_open_fails(self):\n sopTester.updateModel('mcp', TestHelper.mcpState['all_off'])\n cmdState = self.actorState.gotoField\n cmdState.reinitialize(self.cmd)\n self.cmd.failOn = 'mcp ffs.open'\n self._goto_field_boss(21, 102, 1, 1, cmdState, didFail=True, finish=True)", "def test_goto_field_apogee_no_slew_decenter_off(self):\n sopTester.updateModel('mcp', TestHelper.mcpState['all_off'])\n sopTester.updateModel('guider', TestHelper.guiderState['guiderOnDecenter'])\n cmdState = self.actorState.gotoField\n cmdState.reinitialize(self.cmd)\n cmdState.doSlew = False\n self._goto_feld_apogee(9, 37, 0, 0, cmdState)", "def test_goto_field_boss_hartmann_blue_fails(self):\n\n sopTester.updateModel('mcp', TestHelper.mcpState['all_off'])\n sopTester.updateModel('hartmann', TestHelper.hartmannState['blue_fails'])\n\n cmdState = self.actorState.gotoField\n cmdState.reinitialize(self.cmd)\n\n self._goto_field_boss(12, 37, 0, 0, cmdState, didFail=True, finish=True)", "def test_goto_field_apogee_bypass_gangToCart(self):\n self._prep_bypass('gangToCart', clear=True)\n cmdState = self.actorState.gotoField\n cmdState.reinitialize(self.cmd)\n self._goto_feld_apogee(13, 44, 4, 0, cmdState)", "def test_goto_field_boss_calibs(self):\n sopTester.updateModel('mcp', TestHelper.mcpState['all_off'])\n cmdState = self.actorState.gotoField\n cmdState.reinitialize(self.cmd)\n cmdState.doSlew = False\n cmdState.doHartmann = False\n cmdState.doGuider = False\n self._goto_field_boss(10, 57, 0, 0, cmdState)", "def test_goto_field_apogee_no_slew_shutter_open(self):\n sopTester.updateModel('apogee', TestHelper.apogeeState['B_open'])\n cmdState = self.actorState.gotoField\n cmdState.reinitialize(self.cmd)\n cmdState.doSlew = False\n self._goto_feld_apogee(10, 37, 0, 0, cmdState)", "def test_defining_only_or_defer_on_nonexistant_fields_fails(self):", "def test_goto_field_boss_slew(self):\n sopTester.updateModel('mcp', TestHelper.mcpState['all_off'])\n cmdState = self.actorState.gotoField\n cmdState.reinitialize(self.cmd)\n cmdState.doGuider = False\n cmdState.doHartmann = False\n cmdState.doCalibs = False\n cmdState.arcTime = 0\n cmdState.flatTime = 0\n self._goto_field_boss(3, 26, 0, 0, cmdState)", "def _validator_target(self, field, value):\n if not REG.match(value):\n self._error(field, \"{} is not a valid target\".format(value))", "def verifyField(self, pv, field, reference):\n full_pv = pv + \".\" + field\n if (caget(full_pv) != reference):\n msg = \"ERROR: \" + full_pv + \" not equal to \" + str(reference)\n raise Exception(__name__ + msg)\n\n return self.__g.SUCCESS", "def test_mutate_field(self):\n # Test adding a field\n with self.assertRaises(ValueError):\n self.email.add_field('', '')\n\n self.email.add_field(self.key, self.regex)\n\n found_key = False\n found_regex = r''\n for field in self.email.fields:\n if field['key'] == self.key:\n found_key = True\n found_regex = field['regex']\n\n self.assertTrue(found_key)\n self.assertEqual(found_regex, self.regex)\n\n # Test getting a field\n with self.assertRaises(LookupError):\n self.email.get_field('')\n\n field = self.email.get_field(self.key)\n self.assertEqual(\n field, {'key': self.key, 'regex': self.regex, 'value': []})\n\n # Test removing a field\n with self.assertRaises(LookupError):\n self.email.remove_field('')\n\n self.email.remove_field(self.key)\n\n found_key = False\n found_regex = r''\n for field in self.email.fields:\n if field['key'] == self.key:\n found_key = True\n found_regex = field['regex']\n\n self.assertFalse(found_key)\n self.assertNotEqual(found_regex, self.regex)", "def test_goto_field_boss_hartmann(self):\n sopTester.updateModel('mcp', TestHelper.mcpState['all_off'])\n cmdState = self.actorState.gotoField\n cmdState.reinitialize(self.cmd)\n cmdState.doSlew = False\n cmdState.doCalibs = False\n cmdState.arcTime = 0\n cmdState.flatTime = 0\n cmdState.doGuider = False\n self._goto_field_boss(5, 29, 0, 0, cmdState)", "def test_invalid_field_access(self):\r\n out = self.c.post(self.url, {'op': 'delete hints', 'field': 'all your private data'})\r\n print out\r\n self.assertTrue('an invalid field was accessed' in out.content)", "def test_field_id_lt(self):\n field = '<6'\n ref_idx = [0]\n self.res=self.run_task(infile=self.rawfile,field=field,calmode=self.calmode,outfile=self.outname,outform='ASAP')\n self.assertEqual(self.res,None,\n msg='Any error occurred during calibration')\n self._compare_with_analytic(self.outname, self.line, self.baseline, ref_idx)", "def test_51(self):\n assert 'False' == Api.requestBlock('test-51', CustomFields=True)", "def test_field_id_gt(self):\n field = '>7'\n ref_idx = [3]\n self.res=self.run_task(infile=self.rawfile,field=field,calmode=self.calmode,outfile=self.outname,outform='ASAP')\n self.assertEqual(self.res,None,\n msg='Any error occurred during calibration')\n self._compare_with_analytic(self.outname, self.line, self.baseline, ref_idx)", "def test_select_field():", "def ask_for_field(self, row, col):\n field = self.map.fields[row][col]\n # return the field kind, team, and if there is an entity or not\n return field.passable, field.team, field.entity is not None", "def checkField(fieldset, text=True):\n if text:\n print \"\\nFieldset contains the following fields:\"\n for i in range(len(fieldset.fields)):\n print fieldset.fields[i].name\n\n ulon = fieldset.U.grid.lon\n ulat = fieldset.U.grid.lat\n udep = fieldset.U.grid.depth\n vlon = fieldset.V.grid.lon\n vlat = fieldset.V.grid.lat\n vdep = fieldset.V.grid.depth\n\n if text:\n if np.all(ulon == vlon):\n print \"longitudes are the same for U and V\"\n else:\n print \"longitudes are not the same for U and V. Note that not all functions will work as intended.\"\n if np.all(ulat == vlat):\n print \"latitudes are the same for U and V\"\n else:\n print \"latitudes are not the same for U and V. Note that not all functions will work as intended.\"\n if np.all(udep == vdep):\n print \"depths are the same for U and V\"\n else:\n print \"depths are not the same for U and V. Note that not all functions will work as intended.\"\n\n return np.all(ulon == vlon) and np.all(ulat == vlat) and np.all(udep == vdep)", "def check_field_name(field_name):\n\n error_checking.assert_is_string(field_name)\n if field_name in ALL_PREDICTOR_NAMES + ALL_TARGET_NAMES:\n return\n\n error_string = (\n '\\nField \"{0:s}\" is not valid predictor or target variable. Valid '\n 'options listed below:\\n{1:s}'\n ).format(field_name, str(ALL_PREDICTOR_NAMES + ALL_TARGET_NAMES))\n\n raise ValueError(error_string)", "def _is_ifgoto(self, words):\n if words[0] == 'if-goto':\n if len(words) != 2:\n raise SyntaxError(\"File line {}: Invalid number of arguments for C_IFGOTO command.\".format(self._file_line))\n return True\n else:\n return False", "def test_02_visit_again(self):", "def test_get_critical_from_existing_fields(self):\n name = 'generic_field'\n opts = {'names': (name, ), 'alt_field': '', 'computed': False}\n expected_field = self.form.fields.get(name, None)\n actual_name, actual_field = self.form.get_critical_field(opts['names'])\n self.assertEqual(name, actual_name)\n self.assertEqual(expected_field, actual_field)", "def assert_known_field(self, name):\n if not (name == self.id_field_name or self.is_known_field(name)):\n raise ChipsError(\"Unknown field in model %s [%s]\", self.__class__.__name__, name)", "def check():", "def _is_goto(self, words):\n if words[0] == 'goto':\n if len(words) != 2:\n raise SyntaxError(\"File line {}: Invalid number of arguments for C_GOTO command.\".format(self._file_line))\n return True\n else:\n return False", "def test_field_rules():", "def test(self):\n self.build()\n exe = self.getBuildArtifact(\"a.out\")\n self.runCmd(\"file \" + exe, CURRENT_EXECUTABLE_SET)\n\n lldbutil.run_to_source_breakpoint(\n self, '// Breakpoint for bitfield', lldb.SBFileSpec(\"main.c\"))\n\n self.expect(\"fr var a\", DATA_TYPES_DISPLAYED_CORRECTLY,\n patterns=[' = A$'])\n self.expect(\"fr var b\", DATA_TYPES_DISPLAYED_CORRECTLY,\n patterns=[' = B$'])\n self.expect(\"fr var c\", DATA_TYPES_DISPLAYED_CORRECTLY,\n patterns=[' = C$'])\n self.expect(\"fr var ab\", DATA_TYPES_DISPLAYED_CORRECTLY,\n patterns=[' = AB$'])\n self.expect(\"fr var ac\", DATA_TYPES_DISPLAYED_CORRECTLY,\n patterns=[' = A | C$'])\n self.expect(\"fr var all\", DATA_TYPES_DISPLAYED_CORRECTLY,\n patterns=[' = ALL$'])\n # Test that an enum that doesn't match the heuristic we use in\n # ClangASTContext::DumpEnumValue, gets printed as a raw integer.\n self.expect(\"fr var omega\", DATA_TYPES_DISPLAYED_CORRECTLY,\n patterns=[' = 7$'])\n # Test the behavior in case have a variable of a type considered\n # 'bitfield' by the heuristic, but the value isn't actually fully\n # covered by the enumerators.\n self.expect(\"p (enum bitfield)nonsense\", DATA_TYPES_DISPLAYED_CORRECTLY,\n patterns=[' = B | C | 0x10$'])\n\n # Break inside the main.\n bkpt_id = lldbutil.run_break_set_by_file_and_line(\n self, \"main.c\", self.line, num_expected_locations=1, loc_exact=True)\n self.runCmd(\"c\", RUN_SUCCEEDED)\n\n # The stop reason of the thread should be breakpoint.\n self.expect(\"thread list\", STOPPED_DUE_TO_BREAKPOINT,\n substrs=['stopped',\n 'stop reason = breakpoint'])\n\n # The breakpoint should have a hit count of 1.\n self.expect(\"breakpoint list -f\", BREAKPOINT_HIT_ONCE,\n substrs=[' resolved, hit count = 1'])\n\n # Look up information about the 'days' enum type.\n # Check for correct display.\n self.expect(\"image lookup -t days\", DATA_TYPES_DISPLAYED_CORRECTLY,\n substrs=['enum days {',\n 'Monday',\n 'Tuesday',\n 'Wednesday',\n 'Thursday',\n 'Friday',\n 'Saturday',\n 'Sunday',\n 'kNumDays',\n '}'])\n\n enum_values = ['-4',\n 'Monday',\n 'Tuesday',\n 'Wednesday',\n 'Thursday',\n 'Friday',\n 'Saturday',\n 'Sunday',\n 'kNumDays',\n '5']\n\n # Make sure a pointer to an anonymous enum type does crash LLDB and displays correctly using\n # frame variable and expression commands\n self.expect(\n 'frame variable f.op',\n DATA_TYPES_DISPLAYED_CORRECTLY,\n substrs=[\n 'ops *',\n 'f.op'],\n patterns=['0x0+$'])\n self.expect(\n 'frame variable *f.op',\n DATA_TYPES_DISPLAYED_CORRECTLY,\n substrs=[\n 'ops',\n '*f.op',\n '<parent is NULL>'])\n self.expect(\n 'expr f.op',\n DATA_TYPES_DISPLAYED_CORRECTLY,\n substrs=[\n 'ops *',\n '$'],\n patterns=['0x0+$'])\n self.expect(\n 'expr *f.op',\n DATA_TYPES_DISPLAYED_CORRECTLY,\n substrs=['error:'],\n error=True)\n\n bkpt = self.target().FindBreakpointByID(bkpt_id)\n for enum_value in enum_values:\n self.expect(\n \"frame variable day\",\n 'check for valid enumeration value',\n substrs=[enum_value])\n lldbutil.continue_to_breakpoint(self.process(), bkpt)", "def test_login_field(self):\n field = self.record.find('field[@name=\\'login\\']')\n self.assertEqual(field.text, 'adt', 'Incorrect login Field')", "def test_47(self):\n assert 'False' == Api.requestBlock('test-47', CustomFields=True)", "def test_check_source_8(self):\n self.src1._organism_host_genus = \"Gordonia\"\n import_genome.check_source(self.src1, self.eval_flags,\n host_genus=\"Mycobacterium\")\n count = count_status(self.src1, \"error\", \"warning\")\n self.assertEqual(count, 1)", "def op_jump_preconditions(self,piece):\n\n # Flag must be FREE or JUMP\n if(self.next_move == self.CAPT or self.next_move == self.ADDPIECE_1 or self.next_move == self.ADDPIECE_2):\n return False\n\n # Check if the piece is in the next pieces (deals with obligatory jumps)\n if(self.next_move == self.JUMP and piece not in self.next_pieces):\n return False\n\n return True", "def test_get_field_state_comparisons_bad_comp_state(self):\r\n self.assertRaises(ValueError, get_field_state_comparisons,\r\n self.dist_matrix_header, self.dist_matrix,\r\n self.mapping_header, self.mapping, self.field,\r\n ['T0', 'Fast'])\r\n self.assertRaises(ValueError, get_field_state_comparisons,\r\n self.dist_matrix_header, self.dist_matrix,\r\n self.mapping_header, self.mapping, self.field,\r\n ['Fast', 'T0'])", "def trivialFieldChecker(self, event):\n\n self.widget_insert_page_item[event.widget].is_good_flag = True", "def checkField(fromFieldType, toFieldType, delimiter):\n\n if fromFieldType == \"String\":\n if not toFieldType == \"String\":\n arcpy.AddError(\"Copy To Field must be of type text when Read From Field is of type text.\")\n else:\n if not toFieldType == \"String\":\n if delimiter != \"\":\n arcpy.AddError(\"Copy To Field must be of type text when Read From Field is of type numeric or date and you are using a delimiter.\")\n\n if delimiter == \"\":\n if fromFieldType == \"SmallInteger\":\n if not toFieldType in [\"Integer\", \"SmallInteger\", \"Float\", \"Double\"]:\n if toFieldType == \"Date\":\n arcpy.AddError(\"Copy To Field must be of type text.\")\n\n if fromFieldType == \"Integer\":\n if toFieldType in [\"SmallInteger\", \"Integer\", \"Float\", \"Double\", \"Date\"]:\n arcpy.AddError(\"Copy To Field must be of type text.\")\n\n else:\n if fromFieldType in [\"Float\", \"Double\" , \"Date\"]:\n if toFieldType in [\"Integer\", \"SmallInteger\", \"Float\", \"Double\" , \"Date\"]:\n arcpy.AddError(\"Copy To Field must be of type text.\")", "def test_should_name_field(self):\n self.assertIn(\"name\", self.fields)", "def test_single_field_success(self, client):\n field_dependency = client.single_field_dependency(9)\n assert field_dependency == self.test_field_dep", "def _guider_start(self, nCall, nInfo, nWarn, nErr, finish=False, didFail=False):\n cmdState = self.actorState.gotoField\n cmdState.reinitialize(self.cmd)\n result = masterThread.guider_start(self.cmd, cmdState, myGlobals.actorState, 'gotoField')\n self.assertEqual(result, not didFail)\n self._check_cmd(nCall, nInfo, nWarn, nErr, finish, didFail=didFail)", "def test_get_curr_bc_added_field_bc_and_added_field_no_hit(self):\r\n\r\n curr_bc = \"\"\r\n ids_bcs_added_field = {(\"\", \"1\"): \"s1\",\r\n (\"\", \"2\"): \"s2\", (\"\", \"3\"): \"s3\"}\r\n fasta_label = \"123ABC region=1 length=255\"\r\n all_bcs = [\"\"]\r\n barcode_type = 0,\r\n disable_bc_correction = False\r\n added_demultiplex_field = 'region'\r\n corrected_bc, num_errors, added_field =\\\r\n get_curr_bc_added_field(curr_bc, ids_bcs_added_field, fasta_label,\r\n all_bcs, barcode_type, disable_bc_correction, added_demultiplex_field)\r\n\r\n expected_corrected_bc = \"\"\r\n expected_num_errors = 0\r\n expected_added_field = \"1\"\r\n\r\n self.assertEqual(corrected_bc, expected_corrected_bc)\r\n self.assertEqual(num_errors, expected_num_errors)\r\n self.assertEqual(added_field, expected_added_field)", "def test_check_source_9(self):\n self.src1._host_host_genus = \"Gordonia\"\n import_genome.check_source(self.src1, self.eval_flags,\n host_genus=\"Mycobacterium\")\n count = count_status(self.src1, \"error\", \"warning\")\n self.assertEqual(count, 1)", "def required_field(variable_to_test: any, err_string: str) -> None:\n if variable_to_test is None:\n print('\\n' + err_string + '\\n')\n sys.exit(1)", "def test_region_check(self):\n reference = {'region': 'reference'}\n target = {'region': 'target'}\n\n # Check that IOError is raised for nonmatching regions\n self.assertRaises(IOError, librad_drift.RadiometricDrift.check_fields, reference, target)\n\n # Check no error raised if regions match\n librad_drift.RadiometricDrift.check_fields(reference, reference)", "def test_field_value_exact(self):\n field = 'M30'\n ref_idx = [2]\n self.res=self.run_task(infile=self.rawfile,field=field,calmode=self.calmode,outfile=self.outname,outform='ASAP')\n self.assertEqual(self.res,None,\n msg='Any error occurred during calibration')\n self._compare_with_analytic(self.outname, self.line, self.baseline, ref_idx)", "def test_48(self):\n assert 'False' == Api.requestBlock('test-48', CustomFields=True)", "def _validate_fields(self, change_fields):\n pass", "def test_49(self):\n assert 'False' == Api.requestBlock('test-49', CustomFields=True)", "def is_field(self, proof = True):\n return True", "def check_general(self, gb, gr):\n gb = General(\"BLUE\")\n gr = General(\"RED\")\n # Look to see if the generals are in the same column\n \n gr_row = self.ind(new_pos)[0]\n gr_col = self.ind(new_pos)[1]\n gb_row = self.ind(cur_pos)[0]\n gb_col = self.ind(cur_pos)[1]", "def test_init_get_critical_for_needed(self):\n # needed_names = [nf for nf in ('country_display', 'country_flag') if nf not in self.form.base_fields]\n # for name in needed_names: name, field = self.get_critical_field(name, name)\n # original_get_critical_field = self.form.get_critical_field\n # self.form.get_critical_field = self.get_critical_field_signal\n print(\"================ TEST INIT GET CRITICAL FOR NEEDED ==================\")\n print(self.form.get_critical_field.__name__)\n # print(getattr(self, 'get_critical_call', 'NOT FOUND'))\n # print(getattr(self.form, 'get_critical_call', 'NOT FOUND'))\n name = 'country_display'\n expected = {'names': name, 'alt_name': name}\n field = self.form.fields.get(name, None) or self.form.computed_fields(name, None)\n response = self.form.get_critical_field(name, name)\n actual = getattr(self, 'get_critical_call', 'NOT FOUND')\n print(\"----------------------------------------\")\n print(response)\n print(expected)\n print(actual)\n # self.assertDictEqual(expected, actual)\n self.assertEqual((name, field), response)\n\n # self.get_critical_field = original_get_critical_field", "def test_step_gol():\n test_field = torch.tensor([[0, 1, 1, 1], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0]])\n\n # Checking that the game works properly\n assert torch.all(torch.eq(step(test_field), torch.tensor([[0, 1, 1, 1],\n [0, 0, 1, 0],\n [0, 0, 0, 0],\n [0, 0, 1, 0]])))\n return", "def test_callable_name_get_critical_field(self):\n special = self.get_generic_name\n name, field = self.form.get_critical_field(special)\n expected_name = special()\n expected_field = self.form.fields[expected_name]\n self.assertEqual(expected_name, name)\n self.assertEqual(expected_field, field)", "def test_fields_effort_default_reference_unit_invalid(self, _mock_check):\n field = EffortField(\n time_units={\"minute\": (\"minute\", \"minutes\")},\n default_reference_unit=\"invalid\",\n )\n\n errors = field.check()\n self.assertEqual(len(errors), 1)\n\n error = errors[0]\n self.assertEqual(error.msg, \"'invalid' is not a valid time unit.\")\n self.assertEqual(error.obj, field)\n self.assertEqual(error.id, \"fields.E1013\")", "def test_field_code(self):\n inv_search = \"subject:p\"\n spi_search = \"f f p\"\n self._compare_searches(inv_search, spi_search)", "def test_raises_on_missing_needed_fields(self):\n test_name = \"impossible_creature_not_present\"\n self.form.constructor_fields = [*self.form.constructor_fields, test_name]\n message = \"The fields for email, username, and constructor must be set in fields. \"\n self.assertNotIn(test_name, self.form.base_fields)\n with self.assertRaisesMessage(ImproperlyConfigured, message):\n self.form.confirm_required_fields()", "def test_no_flag_handle_flag_field(self):\n original_flag_name = self.form.USERNAME_FLAG_FIELD\n self.form.USERNAME_FLAG_FIELD = \"This is not a valid field name\"\n expected = None\n actual = self.form.handle_flag_field(self.form.name_for_email, self.form.name_for_user)\n\n self.assertEqual(expected, actual)\n self.form.USERNAME_FLAG_FIELD = original_flag_name", "def test_raise_missing_flag_field(self):\n original_data = self.form.data\n original_fields = self.form.fields\n original_flag = self.form.USERNAME_FLAG_FIELD\n original_cleaned_data = getattr(self.form, 'cleaned_data', None)\n original_errors = getattr(self.form, '_errors', None)\n self.form.data = original_data.copy()\n self.form.fields = original_fields.copy()\n self.form.USERNAME_FLAG_FIELD = 'Not a valid field name'\n self.form.cleaned_data = {self.form.name_for_user: 'test_username', self.form.name_for_email: 'test_email'}\n # self.form._errors = ErrorDict() if original_errors is None else original_errors.copy()\n self.form._errors = None if original_errors is None else original_errors.copy()\n\n with self.assertRaises(ImproperlyConfigured):\n self.form.configure_username_confirmation()\n\n self.form.data = original_data\n self.form.fields = original_fields\n self.form.USERNAME_FLAG_FIELD = original_flag\n self.form.cleaned_data = original_cleaned_data\n self.form._errors = original_errors\n if original_cleaned_data is None:\n del self.form.cleaned_data\n if original_errors is None:\n del self.form._errors", "def test_entities__Field__1():\n zope.interface.verify.verifyObject(IField, Field())", "def goto(vehicle, dNorth, dEast):\n goto_function = vehicle.simple_goto # can be changed\n currentLocation = vehicle.location.global_relative_frame\n targetLocation = get_location_metres(currentLocation, dNorth, dEast)\n targetDistance = get_distance_metres(currentLocation, targetLocation)\n goto_function(targetLocation)\n\n #Stop action if we are no longer in guided mode.\n while vehicle.mode.name == \"GUIDED\": \n remainingDistance = get_distance_metres(vehicle.location.global_relative_frame, targetLocation)\n #print \"Distance to target: \", remainingDistance\n if remainingDistance <= shared.WP_RADIUS: #Just below target, in case of undershoot.\n #print \"Reached target\"\n break;\n\n time.sleep(0.5)", "def condition_forward_checking(csp, var) :\n return False", "def condition_forward_checking(csp, var) :\n return False", "def checkFieldMappings(self, param):\n field_mappings = param.value\n over_fields = []\n fields_warning = ('The following visible field name(s) are' +\n ' over 10 characters and will be shortened' +\n ' automatically by ArcGIS: ')\n for idx, val in enumerate(range(field_mappings.count)):\n if field_mappings.getVisible(idx) == 'VISIBLE':\n field = field_mappings.getNewName(idx)\n if len(field) > 10:\n over_fields.append(field)\n if over_fields:\n param.setWarningMessage(fields_warning + \", \".join(over_fields))\n else:\n param.clearMessage()", "def test_model_custom_field_editing_attribute_missing(self):\n\n try:\n error = False\n\n # GIVEN invalid model field definition\n # WHEN model gets executed in the system\n class TestTestModel(models.Model):\n name = AppModelCharField(max_length=256, blank=True, null=True)\n\n except Exception as e:\n msg = e.args[0]\n error = True\n\n # THEN error should be raised\n self.assertTrue(error)\n\n # AND clear error description is present\n ref_msg = 'Field editing statuses are missing for AppModelCharField; called from TestTestModel'\n self.assertEqual(ref_msg, msg)", "def test_handle_flag_error(self):\n pass", "def _WaitForField(self, field):\n self._GaiaWebviewContext().WaitForJavaScriptCondition(\n \"document.getElementById({{ field }}) != null && \"\n \"!document.getElementById({{ field }}).hidden\",\n field=field, timeout=20)", "def test_fields_effort_default_reference_unit_success(self, _mock_check):\n field = EffortField(\n time_units={\"minute\": (\"minute\", \"minutes\")},\n default_reference_unit=\"minute\",\n )\n\n errors = field.check()\n self.assertEqual(len(errors), 0)", "def test_raises_if_missed_fields(self):\n name = 'second'\n self.form.called_handle_modifiers = False\n remove = {'remove_field': name}\n self.form.handle_modifiers({}, [], **remove)\n self.assertNotIn(name, self.form.fields)\n self.assertIn(name, self.form.hold_field)\n message = \"Some unassigned fields, perhaps some added during handle_modifiers. \"\n with self.assertRaisesMessage(ImproperlyConfigured, message):\n self.form.make_fieldsets(add_field=name)\n self.form.called_handle_modifiers = False", "def check_returnable_fields(fields, result):\n returnable_fields = get_returnable_fields(result, verbose=False)\n for field in fields:\n if field not in returnable_fields:\n err_str = \"The field %s is not a returnable field for \" % (field)\n err_str += \"result %s\" % (result)\n raise ValueError(err_str)", "def test_check_source_11(self):\n self.src1._organism_host_genus = \"Mycobacterio\"\n import_genome.check_source(self.src1, self.eval_flags,\n host_genus=\"Mycobacterium\")\n count = count_status(self.src1, \"error\")\n self.assertEqual(count, 0)", "def test_parse_case_field_00(input_, expected):\n actual = regex.match_case_field(input_)\n assert actual == expected", "def _assert_reg_field(self, extra_fields_setting, expected_field):\n # Add in fields that are always present\n self._populate_always_present_fields(expected_field)\n\n # Retrieve the registration form description\n with override_settings(REGISTRATION_EXTRA_FIELDS=extra_fields_setting):\n response = self.client.get(self.url)\n self.assertHttpOK(response)\n\n # Verify that the form description matches what we'd expect\n form_desc = json.loads(response.content.decode('utf-8'))\n\n actual_field = None\n for field in form_desc[\"fields\"]:\n if field[\"name\"] == expected_field[\"name\"]:\n actual_field = field\n break\n\n self._assert_fields_match(actual_field, expected_field)", "def test_name_field(self):\n field = self.record.find('field[@name=\\'name\\']')\n self.assertEqual(field.text, 'GUH-ADT', 'Incorrect Name Field')", "def test_init_valid_field(self):\n fields = {'Conn Note': {'offset': 0,\n 'length': 20}}\n parser = top.Parser(fields=fields)\n received = parser.get_fields()\n expected = fields\n msg = 'Fields initialisation property setter/getter error.'\n self.assertEqual(received, expected, msg)", "def link_check(form, field):\n if form.registrable.data and len(field.data)==0:\n raise validators.ValidationError('link should is required when the forum is registrable')", "def test_check_source_10(self):\n self.src1._lab_host_host_genus = \"Gordonia\"\n import_genome.check_source(self.src1, self.eval_flags,\n host_genus=\"Mycobacterium\")\n count = count_status(self.src1, \"error\", \"warning\")\n self.assertEqual(count, 1)", "def test_field_value_pattern(self):\n field = 'M*'\n ref_idx = [0,1,2]\n self.res=self.run_task(infile=self.rawfile,field=field,calmode=self.calmode,outfile=self.outname,outform='ASAP')\n self.assertEqual(self.res,None,\n msg='Any error occurred during calibration')\n self._compare_with_analytic(self.outname, self.line, self.baseline, ref_idx)", "def check(self) -> None:", "def _test(self, c):\n\treturn self.UNRESOLVED\t\t# Placeholder", "def test_50(self):\n assert 'False' == Api.requestBlock('test-50', CustomFields=True)", "def test_kyc_post_legal_board_member(self):\n pass", "def inspect_melons(self,passed):\n if passed == True:\n self.passed_inspection = True\n # return True", "def validate_field(self, fieldname):\n fieldname = self.__class__.FIELD_ALIAS.get(fieldname, fieldname)\n v = self._data[fieldname]\n t = self._field_or_default_datatype(fieldname, v)\n gfapy.Field._validate_gfa_field(v, t, fieldname)", "def test_raise_error_unknown_field():\n\n options = {'fields': ['kHello']}\n\n with pytest.raises(KeyError) as excinfo:\n process.Packager(options)\n excinfo.match('Field ([a-zA-Z].*) not found in file list.')", "def test_field_id_range(self):\n field = '6~8'\n ref_idx = [1,2,3]\n self.res=self.run_task(infile=self.rawfile,field=field,calmode=self.calmode,outfile=self.outname,outform='ASAP')\n self.assertEqual(self.res,None,\n msg='Any error occurred during calibration')\n self._compare_with_analytic(self.outname, self.line, self.baseline, ref_idx)", "def test_get_curr_bc_added_field_bc_and_added_field(self):\r\n\r\n curr_bc = \"GGCAGCACTTGT\"\r\n ids_bcs_added_field = {(\"AACTCGTCGATG\", \"123\"): \"s1\",\r\n (\"AGCAGCACTTGT\", \"123\"): \"s2\", (\"ACAGAGTCGGCT\", \"ABCD\"): \"s3\"}\r\n fasta_label = \"123ABC region=1 length=255\"\r\n all_bcs = [\"AACTCGTCGATG\", \"AGCAGCACTTGT\", \"ACAGAGTCGGCT\"]\r\n barcode_type = \"golay_12\",\r\n disable_bc_correction = False\r\n added_demultiplex_field = 'run_prefix'\r\n corrected_bc, num_errors, added_field =\\\r\n get_curr_bc_added_field(curr_bc, ids_bcs_added_field, fasta_label,\r\n all_bcs, barcode_type, disable_bc_correction, added_demultiplex_field)\r\n\r\n expected_corrected_bc = \"AGCAGCACTTGT\"\r\n expected_num_errors = 1\r\n expected_added_field = \"123\"\r\n\r\n self.assertEqual(corrected_bc, expected_corrected_bc)\r\n self.assertEqual(num_errors, expected_num_errors)\r\n self.assertEqual(added_field, expected_added_field)", "def test_get_field_state_comparisons_no_comp_states(self):\r\n self.assertRaises(ValueError, get_field_state_comparisons,\r\n self.dist_matrix_header, self.dist_matrix,\r\n self.mapping_header, self.mapping, self.field,\r\n [])", "def test_make_form_field():", "def test_single_field_failure(self, client):\n with pytest.raises(CastorException) as e:\n client.single_field_dependency(2)\n assert str(e.value) == \"404 Entity not found.\"", "def test_clean_country_flag(self):\n # country_flag = self.cleaned_data.get('country_flag', None)\n # field = self.fields.get(self.country_field_name, None)\n # if not field and hasattr(self, 'computed_fields'):\n # field = self.computed_fields.get(self.country_field_name, None)\n # if field.initial == self.cleaned_data.get(self.country_field_name, None)\n pass", "def test_select_var_failed_if_bad_var_name(self):\n with patch('esmvalcore.cmor._fixes.fix.Fix.get_fixes',\n return_value=[]):\n with self.assertRaises(ValueError):\n fix_metadata(\n cubes=[\n self._create_mock_cube('not_me'),\n self._create_mock_cube('me_neither')\n ],\n short_name='short_name',\n project='CMIP6',\n dataset='model',\n mip='mip',\n )", "def test_27(self):\n assert 'False' == Api.requestBlock('test-27')", "def test_field_errors(self):\n # For username field\n response = self.client.post(reverse('users:login'), data={\n 'username': 'username_that_not_exist',\n 'password': '11111111',\n })\n error = BeautifulSoup(response.content, 'html.parser').find('p', 'username-error').text\n\n self.assertEqual(error, 'User with this username doesn\\'t exist.')\n\n # For password field\n response = self.client.post(reverse('users:login'), data={\n 'username': 'temp1',\n 'password': '11111111',\n })\n error = BeautifulSoup(response.content, 'html.parser').find('p', 'non-field-error').text\n\n self.assertEqual(error, 'Enter correct password.')", "def test_field_id_exact(self):\n field = '6'\n ref_idx = [1]\n self.res=self.run_task(infile=self.rawfile,field=field,calmode=self.calmode,outfile=self.outname,outform='ASAP')\n self.assertEqual(self.res,None,\n msg='Any error occurred during calibration')\n self._compare_with_analytic(self.outname, self.line, self.baseline, ref_idx)", "def test_entities__Entity__getField__2(entity):\n assert IDummy['dummy2'] == entity.getField('dummy2')", "def check_grue(self, tile):\n if tile[2] == 'grue':\n if self.lab.inventory > 0:\n self.lab.fire()\n print 'Lighted match'", "def test_fields(self):\n form = self._get_form(data=None)\n self.assertEquals(len(form.fields), 4)\n self.assertTrue('tests_url' in form.fields)\n self.assertTrue('repo_url' in form.fields)\n self.assertTrue('pkg_type' in form.fields)\n self.assertTrue('tags' in form.fields)" ]
[ "0.67638075", "0.6552739", "0.6286361", "0.6170494", "0.6125492", "0.5995649", "0.59449685", "0.5715474", "0.5527284", "0.5455693", "0.5430143", "0.54134613", "0.530795", "0.530067", "0.5265416", "0.5247163", "0.52226955", "0.5134902", "0.5128405", "0.51222634", "0.5118506", "0.507991", "0.5075934", "0.50527847", "0.5018151", "0.50139004", "0.50080955", "0.5004106", "0.49996462", "0.49810272", "0.49631077", "0.49490714", "0.49488705", "0.49436623", "0.49411398", "0.4933822", "0.49048606", "0.4903577", "0.4901436", "0.48565638", "0.48545098", "0.4847694", "0.4844615", "0.4840407", "0.48328698", "0.48232615", "0.48063648", "0.4801461", "0.47966552", "0.47862187", "0.47787786", "0.477527", "0.47694856", "0.4769171", "0.47661033", "0.4760826", "0.47604647", "0.47596452", "0.4757198", "0.47549513", "0.47540453", "0.4751612", "0.47429374", "0.474126", "0.474126", "0.47311273", "0.47303915", "0.4725758", "0.4721384", "0.47154784", "0.4711148", "0.47098354", "0.4702116", "0.4690037", "0.46880516", "0.46824446", "0.4680683", "0.46739206", "0.4670181", "0.46625304", "0.46610245", "0.46421143", "0.46420065", "0.46413144", "0.46402544", "0.46368027", "0.46341586", "0.46294355", "0.4628066", "0.46272087", "0.46271235", "0.46258572", "0.46227115", "0.46120226", "0.46105298", "0.46054244", "0.4604083", "0.4601052", "0.4600611", "0.45975533" ]
0.7532309
0
Helper for boss science tests
def _do_boss_science(self, nCall, nInfo, nWarn, nErr, nExp=1): self._update_cart(11, 'BOSS') cmdState = self.actorState.doBossScience cmdState.reinitialize(self.cmd) cmdState.nExp = nExp masterThread.do_boss_science(self.cmd, cmdState, myGlobals.actorState) self._check_cmd(2, nInfo, nWarn, nErr, True)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def testBeliefs1sk(self):", "def test_apply_endorsements(self):", "def test_theft_and_stealing(self):", "def healthcare():", "def testBeliefs2sk(self):", "def test_T01():", "def test_alien_data(self):", "def test_households_in_admin_unit(self):", "def unitary_test():", "def test_get_waivers(league):\n pass", "def _test(self):", "def _test(self):", "def _test(self):", "def _test(self):", "def _test(self):", "def test_art_from_taste_space(self):", "def tests():", "def test_let(self):", "def test_bed(self):\n #TODO write bed tests", "def test(self):\n pass", "def test_get_boat(self):\n pass", "def _test(self):\n pass", "def _test(self):\n pass", "def _test(self):\n pass", "def test():", "def test():", "def test_observatory(lasco):\n assert lasco.observatory == \"SOHO\"", "def test_dummy():", "def exercise_b2_70():\r\n pass", "def test_4_4_1_1(self):\n pass", "def test_create_unexpected_problem(self):\n pass", "def test_4():", "def test_get_insumo(self):", "def sth():", "def test_title(names):", "def exercise_b2_106():\r\n pass", "def test_rules():", "def exercise_b2_53():\r\n pass", "def test(self):", "def test(self):", "def test_get_scenario(self):\n pass", "def exercise_b2_52():\r\n pass", "def test_subsystems(self):\n pass", "def test_get_goal(self):\n pass", "def test(self):\n raise NotImplementedError", "def test_03_visit_special(self):", "def test_T2():", "def test_T2():", "def test_T3():", "def test_T3():", "def test_T4():", "def test_T4():", "def exercise_b2_69():\r\n pass", "def exercise_b2_107():\r\n pass", "def test_5():", "def test_get_solution(self):\n pass", "def test():\n pass", "def test_do_boss_science(self):\n sopTester.updateModel('mcp', TestHelper.mcpState['boss_science'])\n nExp = 2\n self._do_boss_science(nExp, 35, 0, 0, nExp=nExp)", "def test_get_game(self):\n pass", "def test_submit_for_endorsement(self):", "def getTestResults():", "def test_guess_nutrition_by_dish_name(self):\n pass", "def exercise_b2_56():\r\n pass", "def test_something():", "def test_T1():", "def test_T1():", "def test_quick_answer(self):\n pass", "def test_observatory(eit_map):\n assert eit_map.observatory == \"SOHO\"", "def test_get_by_name2(self):\n pass", "def test_get_details7(self):\n pass", "def exercise_b2_98():\r\n pass", "def test_arc_smear(self):", "def exercise_b2_95():\r\n pass", "def test_3():", "def test_T0():", "def test_get_by_name1(self):\n pass", "def test_kyc_get_legal(self):\n pass", "def test_predictor():", "def exercise_b2_27():\r\n pass", "def test_get_scenarios(self):\n pass", "def test_basic_execution(self):", "def test_get_systems(self):\n pass", "def test_initialization_of_homework_result_homework():\n assert result_1.homework == oop_hw", "def test_initialization_of_homework_result_solution():\n assert result_1.solution == \"I have done this hw\"", "def exercise_b2_113():\r\n pass", "def test_create_system_entire(self):\n pass", "def test_01_lighting(self):", "def test_get1(self):\n pass", "def describe():", "def test_create_from_pear(self):\n pass", "def test_create_boat(self):\n pass", "def testNSESanityChecks(self):\n self.assertEqual(100, self.c.get_species_richness())\n self.assertEqual(67, self.c2.get_species_richness())", "def test_gtf(self):\n #TODO write bed tests", "def runtest(self):", "def test_initialization_of_homework_result_author():\n assert result_1.author == good_student", "def inner_test():\n pass", "def inner_test():\n pass", "def test_get_game_boxscore(self):\n pass", "def test_get_run(self):\n pass", "def test_if(self):", "def test_require():" ]
[ "0.6812872", "0.6686834", "0.6662029", "0.66596895", "0.6609597", "0.655939", "0.6549792", "0.6543334", "0.65167713", "0.6514505", "0.6439195", "0.6439195", "0.6439195", "0.6439195", "0.6439195", "0.64386874", "0.63926923", "0.6362536", "0.632096", "0.6316661", "0.6280583", "0.62787473", "0.62787473", "0.62787473", "0.6250387", "0.6250387", "0.6239162", "0.62319994", "0.62277293", "0.62227726", "0.6182341", "0.61744124", "0.6174265", "0.6170803", "0.6145204", "0.6141071", "0.6135324", "0.6134568", "0.61317664", "0.61317664", "0.61289835", "0.6128308", "0.6122706", "0.61205626", "0.6111555", "0.60926104", "0.6090884", "0.6090884", "0.6090731", "0.6090731", "0.6079427", "0.6079427", "0.60735434", "0.6072674", "0.6068866", "0.6065172", "0.6060398", "0.6054843", "0.60480833", "0.6043743", "0.60152835", "0.60117936", "0.6011772", "0.6001377", "0.5985093", "0.5985093", "0.5969813", "0.59656733", "0.5951016", "0.59473294", "0.5946892", "0.5941503", "0.5936585", "0.5926208", "0.59256935", "0.59243804", "0.591835", "0.5904382", "0.5899195", "0.5892244", "0.5885949", "0.5884395", "0.58831674", "0.5878359", "0.5873925", "0.5871371", "0.58698547", "0.58581173", "0.5856655", "0.5848433", "0.5845336", "0.58447963", "0.58409107", "0.58359194", "0.5834048", "0.58330595", "0.58330595", "0.5828992", "0.58145076", "0.58131313", "0.58125865" ]
0.0
-1
One call per requested exposure
def test_do_boss_science(self): sopTester.updateModel('mcp', TestHelper.mcpState['boss_science']) nExp = 2 self._do_boss_science(nExp, 35, 0, 0, nExp=nExp)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def endexposureloop(self):\n self.max_exposures = self.current_exposure", "def observeField(target, exposure):\n\n status = 2\n real_exposure = exposure + np.random.normal(0.0, 20.0)\n realSN2 = target['DESsn2'] + np.random.uniform(0.0, 1.0)\n\n return status, real_exposure, realSN2", "def expose(self):\n\n ## Determine type of exposure (exp, series, stack)\n exptype = str(self.exptypeComboBox.currentText())\n mode = self.modedict[exptype]\n\n ## Get exposure parameters\n if mode == \"bias\":\n exptime = 0.0\n else:\n exptime = self.exptimeSpinBox.value()\n imcount = self.imstackSpinBox.value()\n seqnum = self.imnumSpinBox.value()\n mintime = self.minexpSpinBox.value()\n maxtime = self.maxexpSpinBox.value()\n step = self.tstepSpinBox.value()\n\n ## Determine filter kwargs\n if self.filterToggleButton.isChecked():\n kwargs = {'filter_name' : str(self.filterComboBox.currentText())}\n else:\n kwargs = {'monowl' : self.monoSpinBox.value()}\n\n if self.testimCheckBox.isChecked():\n title = 'test'\n else:\n title = str(self.imtitleLineEdit.text())\n\n ## Build filepath\n filepath = os.path.join(str(self.imfilenameLineEdit.text()),title)\n \n ## Check if single exposure\n if exptype in [\"Exposure\", \"Dark\", \"Bias\"]:\n\n ## Perform exposure\n self.logger.info(\"Starting {0}s {1} image.\".format(exptime, exptype))\n self.image_start.emit(1)\n\n try:\n filename = exposure.im_acq(mode, filepath, exptime, seqnum, **kwargs)\n self.image_taken.emit(1)\n except subprocess.CalledProcessError:\n self.logger.exception(\"Error in executable {0}_acq. Image not taken.\".format(mode))\n except OSError:\n self.logger.exception(\"Executable {0}_acq not found. Image not taken\".format(mode))\n except IOError:\n self.logger.exception(\"File already exits. Image not taken.\")\n else:\n self.logger.info(\"Exposure {0} finished successfully.\".format(filename))\n self.seqnum_inc.emit(seqnum)\n subprocess.Popen(['ds9', '-mosaicimage', 'iraf', filename,\n '-zoom', 'to', 'fit', '-cmap', 'b'])\n\n ## Check if a stack of exposures of same type\n elif exptype in [\"Exposure Stack\", \"Dark Stack\", \"Bias Stack\"]:\n\n total = seqnum + imcount\n self.logger.info(\"Starting {0}s {1} stack.\".format(exptime, exptype))\n self.image_start.emit(imcount)\n\n try:\n for i in range(seqnum, total):\n self.logger.info(\"Starting image {0} of {1}.\".format(i+1-seqnum, imcount))\n filename = exposure.im_acq(mode, filepath, exptime, i, **kwargs)\n self.logger.info(\"Exposure {0} finished successfully.\".format(filename))\n self.image_taken.emit(i+1-seqnum)\n self.seqnum_inc.emit(i)\n except subprocess.CalledProcessError:\n self.logger.exception(\"Error in executable {0}_acq. Image not taken.\".format(mode))\n except OSError:\n self.logger.exception(\"Executable {0}_acq not found. Image not taken.\".format(mode))\n except IOError:\n self.logger.exception(\"File already exists. Image not taken.\")\n else:\n self.logger.info(\"Exposure stack finished successfully.\")\n subprocess.Popen(['ds9', '-mosaicimage', 'iraf', filename, '-zoom', 'to', 'fit', '-cmap', 'b'])\n \n ## Check if a series of exposures of increase exposure time\n elif exptype in [\"Exposure Series\", \"Dark Series\"]:\n\n ## Parameter checks\n if mintime > maxtime:\n self.logger.warning(\"Minimum time must be less than Maximum time. Series not started.\")\n return\n elif step <= 0:\n self.logger.warning(\"Time step must be greater than 0. Series not started.\")\n return\n\n ## Construct array of exposure times\n t = mintime\n time_array = []\n while t <= maxtime:\n time_array.append(t)\n t += step\n \n ## Perform series\n self.logger.info(\"Starting {0} series with mintime {1}, maxtime {2}, and step {3}.\".format(exptype, mintime, maxtime, step))\n self.image_start.emit(len(time_array))\n \n try:\n for i, time in enumerate(time_array):\n self.logger.info(\"Starting {0}s {1} image.\".format(time, mode))\n filename = exposure.im_acq(mode, filepath, time, seqnum, **kwargs)\n self.logger.info(\"Exposure {0} finished successfully.\".format(filename))\n self.image_taken.emit(i+1)\n except subprocess.CalledProcessError:\n self.logger.exception(\"Error in executable {0}_acq. Image not taken.\".format(mode))\n except OSError:\n self.logger.exception(\"Executable {0}_acq not found. Image not taken.\".format(mode))\n except IOError:\n self.logger.exception(\"File already exists. Image not taken.\")\n else:\n self.logger.info(\"Exposure series finished successfully.\")\n subprocess.Popen(['ds9', '-mosaicimage', 'iraf', filename, '-zoom', 'to', 'fit', '-cmap', 'b'])\n self.seqnum_inc.emit(seqnum)", "def exp(self, exposure_time):\n print(f\"exp: {exposure_time}\")\n self.device_control.exposure = exposure_time\n yield", "def expose(self, cmd, expTime, expType):\n\n if not expType:\n expType = 'test'\n if cmd:\n cmd.inform('exposureState=\"exposing\"')\n if expType not in ('bias', 'test') and expTime > 0:\n time.sleep(expTime + self._exposureOverheadTime())\n\n if cmd:\n cmd.inform('exposureState=\"reading\"')\n\n f = pyfits.open('/home/chyan/mhs/data/mcs/schmidt_fiber_snr400_rmod71.fits')\n image = f[0].data\n # image = numpy.random.normal(self.biasLevel,\n # scale=self.readNoise,\n # size=self.imageSize).astype('u2')\n\n if expType != 'test':\n time.sleep(self._readoutTime())\n return image", "def exposure(frameType, expTime):\n\n blobEvent.clear() \n\n # set the specified frame type\n if frameType.lower() == 'light':\n ccd_frame[0].s = PyIndi.ISS_ON\n ccd_frame[1].s = PyIndi.ISS_OFF\n ccd_frame[2].s = PyIndi.ISS_OFF\n ccd_frame[3].s = PyIndi.ISS_OFF \n indiclient.sendNewSwitch(ccd_frame)\n elif frameType.lower() == 'bias':\n ccd_frame[0].s = PyIndi.ISS_OFF\n ccd_frame[1].s = PyIndi.ISS_ON\n ccd_frame[2].s = PyIndi.ISS_OFF\n ccd_frame[3].s = PyIndi.ISS_OFF \n indiclient.sendNewSwitch(ccd_frame)\n elif frameType.lower() == 'dark':\n ccd_frame[0].s = PyIndi.ISS_OFF\n ccd_frame[1].s = PyIndi.ISS_OFF\n ccd_frame[2].s = PyIndi.ISS_ON\n ccd_frame[3].s = PyIndi.ISS_OFF \n indiclient.sendNewSwitch(ccd_frame)\n elif frameType.lower() == 'flat':\n ccd_frame[0].s = PyIndi.ISS_OFF\n ccd_frame[1].s = PyIndi.ISS_OFF\n ccd_frame[2].s = PyIndi.ISS_OFF\n ccd_frame[3].s = PyIndi.ISS_ON \n indiclient.sendNewSwitch(ccd_frame)\n\n # set the value for the next exposure\n ccd_exposure[0].value=expTime\n\n indiclient.sendNewNumber(ccd_exposure)\n\n # wait for the exposure\n blobEvent.wait()\n\n for blob in ccd_ccd1:\n # pyindi-client adds a getblobdata() method to IBLOB item\n # for accessing the contents of the blob, which is a bytearray in Python\n image_data=blob.getblobdata()\n\n # write the byte array out to a FITS file\n global imgNum\n global imgName\n imgNum += 1\n fileName = fileDir+'raw-'+str(imgNum).zfill(8)+'.fits'\n f = open(fileName, 'wb')\n f.write(image_data)\n f.close()\n imgName = fileName\n \n return fileName", "def runPass(self, exposure_range, rate):\n r = rospy.Rate(rate)\n for i, exposure in enumerate(exposure_range):\n if rospy.is_shutdown():\n break\n\n self.current_exposure = exposure\n self.client.update_configuration(\n {\"exposure\": self.current_exposure})\n r.sleep()\n\n finished = (i >= (len(exposure_range)-1))\n if finished:\n optimal_exposure = max(self.scores, key=self.scores.get)\n self.reset()\n return optimal_exposure # an optimal exposure has been found\n else:\n return -1", "def call(self):", "def get_exposure(exposure_id,b_mean,b_sd,c_mean,c_sd,non_rate,dist_type,mortalities):#id in db\n\te_id \t\t= int(long(exposure_id))\n\texposure_outcomes = DBHelper.exposure_outcome\n\toutcome_ids \t= DBHelper.exposure_outcome.get(e_id)\n\n\tsamples_rr \t= DBHelper.samples_rr.get(e_id)\n\tsamples_pop \t= DBHelper.samples_pop.get(e_id)\n\trisks \t\t= DBHelper.risks.get(e_id)\n\tmeasure \t= DBHelper.measures.get(e_id)\n\tdist_type \t= get_dist_type(e_id)\n\n\t#get population distribution \n\tpopDistribution = PopDistribution(DBHelper.age_group_num,non_rate,b_mean,b_sd,c_mean,c_sd,samples_pop,dist_type)\n\n\t#get outcomes\n\toutcomes = []\n\tfor o_id in outcome_ids:\n\t\t# mortality\n\t\tm_mortality = mortalities.get(2*o_id)\n\t\tf_mortality = mortalities.get(2*o_id+1)\n\t\t# risks\n\t\tm_risks = risks.get(2*o_id)\n\t\tf_risks = risks.get(2*o_id+1)\n\t\t# outcome name\n\t\tname = DBHelper.get_outcome_name(o_id)\n\t\t# limit estimates\n\t\tlle = DBHelper.exposure_outcome.get(e_id).get(o_id)[0]\n\t\tule = DBHelper.exposure_outcome.get(e_id).get(o_id)[1]\n\t\t# outcome\n\t\toutcome = PrimeOutcome(name,o_id,m_mortality,f_mortality,samples_rr,m_risks,f_risks,lle,ule,measure,e_id) \n\t\toutcomes.append(outcome)\n\n\texposure = PrimeExposure(mortalities,outcome_ids,samples_rr,samples_pop,outcomes,popDistribution)\n\treturn exposure", "def hit(self):", "def _generate_exposure(self, expstart, number):\n\n index_number = number - 1 # for zero indexing\n\n filename = '{:04d}_raw.fits'.format(number)\n\n exp_gen = ExposureGenerator(self.detector, self.grism, self.NSAMP,\n self.SAMPSEQ, self.SUBARRAY,\n self.planet, filename, expstart)\n\n if not self.spatial_scan:\n self.sample_rate = 1 * u.year # high number reverts to read times\n\n _, sample_mid_points, sample_durations, read_index = \\\n exp_gen._gen_scanning_sample_times(self.sample_rate)\n\n time_array = (sample_mid_points + expstart).to(u.day)\n\n if self.transmission_spectroscopy:\n star_norm_flux = self.generate_lightcurves(time_array)\n planet_depths = 1 - star_norm_flux\n else:\n planet_depths = None\n\n # x shifts - linear shift with exposure, second exposure shifted by\n # x_shifts, direct image and first exp will match.\n x_ref = self._try_index(self.x_ref, index_number)\n y_ref = self._try_index(self.y_ref, index_number)\n sky_background = self._try_index(self.sky_background, index_number)\n\n # X and Y Shifts\n x_ref += self.x_shifts * index_number\n y_ref += self.y_shifts * index_number\n x_jitter = self.x_jitter\n y_jitter = self.y_jitter\n\n if self._visit_trend:\n scale_factor = self._visit_trend.get_scale_factor(index_number)\n else:\n scale_factor = None\n\n if self.spatial_scan:\n exp_frame = exp_gen.scanning_frame(\n x_ref, y_ref, x_jitter, y_jitter,\n self.wl, self.stellar_flux, planet_depths,\n self.scan_speed, self.sample_rate, sample_mid_points,\n sample_durations, read_index, ssv_generator=self.ssv_gen,\n noise_mean=self.noise_mean, noise_std=self.noise_std,\n add_flat=self.add_flat, add_dark=self.add_dark,\n scale_factor=scale_factor, sky_background=sky_background,\n cosmic_rate=self.cosmic_rate,\n add_gain_variations=self.add_gain_variations,\n add_non_linear=self.add_non_linear,\n clip_values_det_limits=self.clip_values_det_limits,\n add_read_noise=self.add_read_noise,\n add_stellar_noise=self.add_stellar_noise,\n add_initial_bias=self.add_initial_bias,\n progress_bar=self.progess,\n threads=self.threads\n )\n else:\n exp_frame = exp_gen.staring_frame(\n x_ref, y_ref, x_jitter, y_jitter,\n self.wl, self.stellar_flux, planet_depths,\n sample_mid_points, sample_durations, read_index,\n noise_mean=self.noise_mean, noise_std=self.noise_std,\n add_flat=self.add_flat, add_dark=self.add_dark,\n scale_factor=scale_factor, sky_background=sky_background,\n cosmic_rate=self.cosmic_rate,\n add_gain_variations=self.add_gain_variations,\n add_non_linear=self.add_non_linear,\n clip_values_det_limits=self.clip_values_det_limits,\n add_read_noise=self.add_read_noise,\n add_stellar_noise=self.add_stellar_noise,\n add_initial_bias=self.add_initial_bias,\n progress_bar=self.progess,\n threads=self.threads\n )\n\n exp_frame.generate_fits(self.outdir, filename, ldcoeffs=self.ldcoeffs)\n\n return exp_frame", "def tcs_exposure_request(image_type, duration = 0, number = 1):\n\n\tvalid_types = ['THERMAL','DARK', 'BIAS', 'FLAT','OBJECT']\n\tvalid = image_type in valid_types\n\n\tif valid:\n\t\timage_type = image_type.lower()\n\t\tif image_type == 'dark':\n\t\t\timage_type = 'thermal'\n\n\t\tif number < 1:\n\t\t\tlogger.error('Invalid number of exposures requested')\n\t\t\trespond = set_err_codes.STATUS_CODE_EXPOSURE_NOT_STARTED\n\t\t\treturn respond\n\n\t\tif duration <0:\n\t\t\tlogger.error('Invalid exposure time requested')\n\t\t\trespond = set_err_codes.STATUS_CODE_EXPOSURE_NOT_STARTED\n\t\t\treturn respond\n\n\t\tcommand_str = 'expose ' + image_type\n\t\tif number != 1:\n\t\t\tcommand_str += ' '+str(number)\n\t\tif image_type != 'bias':\n\t\t\tcommand_str += ' ' + str(duration)\n\t\t\n\t\ttry:\n\t\t\ttcs_respond = send_command(command_str)\n\t\t\n\t\texcept:\n\t\t\trespond = set_err_codes.STATUS_CODE_EXPOSURE_NOT_STARTED\n\t\telse:\n\t\t\t\n\t\t\tcam_temp = get_camera_status()[2]\n\t\t\t#if good_response and cam_temp>-20:\n\t\t\tif float(cam_temp)>-20:\n\t\t\t\trespond = set_err_codes.STATUS_CODE_CCD_WARM\n\t\n\t\t\telse:\n\t\t\t\trespond = set_err_codes.STATUS_CODE_OK\n\t\t\t\n\t\treturn respond\n\n\telse:\n\t\tlogger.error('Invalid image type provided to exposure request '+str(\n\t\t\t\timage_type))\n\t\tprint('Invalid image type provided to exposure request'+str(\n\t\t\timage_type))", "def run_observation(self):\n\n self._generate_direct_image() # to calibrate x_ref and y_ref\n\n num_frames = len(self.exp_start_times)\n progress = Progress(num_frames)\n self.progess = progress\n\n progress_line = 'Generating frames 0/{} done'.format(num_frames)\n progress.print_status_line(progress_line)\n progress.progress_line = progress_line\n\n for i, start_time in enumerate(self.exp_start_times):\n filenum = i + 1\n self._generate_exposure(start_time, filenum)\n\n progress.increment()\n progress_line = 'Generating frames {}/{} done'.format(filenum,\n num_frames)\n progress.print_status_line(progress_line)\n\n # so it can be retreived by exposure_generator\n progress.progress_line = progress_line", "def exposureCallback(self, config):\n rospy.loginfo('Set exposure: {}'.format(config['exposure']))", "def exposure(cls, *args):\n return cls.path_finder('exposure', *args)", "def exposure(cls, *args):\n return cls.path_finder('exposure', *args)", "def _do_expose_loop(self, fitsfile, seconds):\n log.debug(f\"Starting expose loop with {self.max_exposures} exposures\")\n while (self.current_exposure is not None and \n self.current_exposure < self.max_exposures):\n self.current_exposure += 1\n self.Expose(fitsfile, seconds)\n while self.process and self.process.poll() is None:\n sleep(5)\n if not self.process or self.process.returncode != 0:\n break\n \n self.current_exposure = None\n self.max_exposures = None", "def exposure():\n def r(x):\n return x/6e4\n\n def w(x):\n return int(x*6e4)\n return r, w", "def exposure(self):\n\n # define a range of declination to evaluate the\n # exposure at\n self.declination = np.linspace(-np.pi/2, np.pi/2, self.num_points)\n\n m = np.asarray([m_dec(d, self.params) for d in self.declination])\n \n # normalise to a maximum at 1\n self.exposure_factor = (m / m_dec(-np.pi/2, self.params))\n\n # find the point at which the exposure factor is 0\n self.limiting_dec = Angle((self.declination[m == 0])[0], 'rad')", "def select_exposure(self):\n exp1_selected = self.exp1_radio.isChecked()\n\n if self.recording_sequence:\n self.record_sequence() # stop current recording\n\n if exp1_selected: # then exp1\n ifi_ndx = self.exp1_ifi_select.currentIndex()\n self.camera.set_exposure(self.exp1_select.currentIndex(), ifi_ndx)\n else:\n ifi_ndx = self.exp2_ifi_select.currentIndex()\n self.camera.set_exposure(self.exp2_select.currentIndex(), ifi_ndx)\n\n temp = list(self.dpar.iwindow_toggle_save)\n self.dpar.iwindow_toggle_save = list(self.dpar.iwindow[0])\n self.dpar.iwindow[0] = temp\n self._update_scrollbars()\n\n self.rec_seq_button.setEnabled(ifi_ndx > 0)\n\n et = np.int(np.round(self.camera.actual_exposure_time_ms))\n self.write_to_log('Exposure %d ms' % et)", "def apply(self):", "def access():", "def selectOptimalExposureCallback(self):\n rospy.loginfo('Starting automatic exposure selection algorithm')\n\n # Start searching for the optimal exposure\n best_exp_pass1 = self.runPass(range(100, 27100, 3000), rate=self.rate)\n # first pass, find coarse exposure\n if best_exp_pass1 > 0:\n rospy.loginfo('First pass: success!')\n rospy.loginfo(\n 'Best exposure in first pass: {}'.format(best_exp_pass1))\n\n # second pass, refine the exposure\n exp_range = [int(exp) for exp in np.linspace(\n 0.9*best_exp_pass1, 1.1*best_exp_pass1, 10)]\n best_exp_pass2 = self.runPass(exp_range, rate=self.rate)\n\n if best_exp_pass2 > 0:\n rospy.loginfo('Second pass: success!')\n rospy.loginfo(\n 'Best exposure in second pass: {}'.format(best_exp_pass2))\n optimal_exposure = best_exp_pass2\n else:\n rospy.logerr(\n 'Second pass: failure\\nSetting the best exposure found in first pass')\n optimal_exposure = best_exp_pass1\n\n self.client.update_configuration({\"exposure\": optimal_exposure})\n self.reset()\n\n else:\n rospy.logerr(\n 'First pass: failure\\nCould not auto-set the exposure')", "def expose(self, cmd):\n\n expType = cmd.cmd.keywords[0].name\n if expType in ('bias', 'test'):\n expTime = 0.0\n else:\n expTime = cmd.cmd.keywords[\"expTime\"].values[0]\n\n filename, image = self._doExpose(cmd, expTime, expType)\n cmd.finish('exposureState=done')", "def request_access(self):\n pass", "def exp_scan(self, exposure_time_list):\n self.generic_scan(self.exp, exposure_time_list)", "def __call__(self):\n if grinder.runNumber == 0: self.initialSleep()\n (param1, param2) = self.getParam()\n self.request1(param1, param2)", "def set_exposure(self, exposure):\n self.logger.info(f'Setting exposure to {exposure}')\n self._driver.ExposureTime.SetValue(exposure)", "def __call__(self,sensation,reward=None):\n raise NYI", "def apply(self):\r\n return", "def apply(self):\r\n return", "def apply(self):\r\n return", "def apply(self):\r\n return", "def apply(self):\r\n return", "def explore_action():\n # def get_action(o, noise_scale):\n # a = ac.act(torch.as_tensor(o, dtype=torch.float32))\n # a += noise_scale * np.random.randn(act_dim)\n # return np.clip(a, -act_limit, act_limit)\n raise NotImplementedError", "def toggle_exposure(self):\n\n checked1 = self.exp1_radio.isChecked()\n if checked1:\n self.exp2_radio.setChecked(True)\n else:\n self.exp1_radio.setChecked(True)\n self.select_exposure()", "def apply(self) -> None:", "def apply(self) -> None:", "def expose(self):\n if self.camera is None: # test mode -- immediately return test image\n print(\"NO SPECTRAL CAMERA FOUND -- USING TEST DATA\")\n self.filename = \"example_fits_files/Mooi\"\n return\n\n exposure_time = self.time.get()\n try:\n self.exposure_time = float(exposure_time)\n except:\n message = \"Exposure time \\\"{0}\\\" cannot be converted to floating point number\".format(exposure_time)\n messagebox.showerror(\"Error\", message)\n raise ValueError(message)\n filename = \"spectra/{0}\".format(timestamp())\n self.camera.spectrum(self.exposure_time, filename)\n self.filename = filename", "def run(self, exposure, sources):\n with self.distortionContext(exposure, sources) as bbox:\n results = self.astrometry(exposure, sources, bbox=bbox)\n\n if results.matches:\n self.refitWcs(exposure, sources, results.matches)\n\n return results", "def __call__(self, frame_num):\n # propagate and set the density\n self.img.set_array(\n np.abs(self.quant_sys.propagate(10)) ** 2\n )\n return self.img,", "def on_start(self):\r\n self.start_whole_exposure()", "def apply(self):\n pass", "def apply(self):\n pass", "def falcon():", "def apply(self, request):\n raise NotImplementedError(u\"%s: Method not implemented\",\n self.__class__.__name__)", "def experiment(self) -> Any:", "def _step(self, action):\n \n obs, reward, done, info = self.env.step(action)\n\n \n advice=self.generateAdvice()[1]\n\n obs = {\n \"image\": obs,\n \"advice\": advice\n }\n\n\n \n\n\n\n return obs, reward, done, info", "def collect_data(self,sensation,action,reward,next_sensation):\n pass", "async def expose(obj, exptime, outfile, overwrite):\n\n async with obj['camera_system'] as fli:\n\n log.debug('starting camera exposures ... ')\n exposures = await asyncio.gather(*[camera.expose(exptime)\n for camera in fli.cameras],\n return_exceptions=False)\n\n log.debug('writing images to disk ... ')\n writers = []\n for exposure in exposures:\n if outfile:\n outfile = outfile.format(camera=exposure.camera)\n writers.append(exposure.write(filename=outfile,\n overwrite=overwrite))\n else:\n writers.append(exposure.write(overwrite=overwrite))\n\n await asyncio.gather(*writers, return_exceptions=True)", "def next_point(self):\n if self.verbose:\n print(\"Computing acquisition function...\")\n if self.acquisition_function == 'cb':\n acq, pred = acqfunc.confidence_bound(\n self.surrogate_model, self.X_full,\n alpha=self.alpha, beta=self.beta)\n elif self.acquisition_function == 'ei':\n acq, pred = acqfunc.expected_improvement(\n self.surrogate_model, self.X_full,\n self.X_sparse, xi=self.xi)\n elif self.acquisition_function == 'poi':\n acq, pred = acqfunc.probability_of_improvement(\n self.surrogate_model, self.X_full,\n self.X_sparse, xi=self.xi)\n elif isinstance(self.acquisition_function, types.FunctionType):\n acq, pred = self.acquisition_function(\n self.surrogate_model, self.X_full, self.X_sparse)\n else:\n raise NotImplementedError(\n \"Choose between 'cb', 'ei', and 'poi' acquisition functions or define your own\")\n self.gp_predictions.append(pred)\n if self.mask is None:\n indices_list = np.unravel_index(np.argsort(acq.ravel()), acq.shape)\n vals_list = acq[indices_list][::-1][:self.batch_size].tolist()\n indices_list = np.dstack(indices_list)[0][::-1][:self.batch_size].tolist()\n else:\n acq = self.mask*acq\n indices_list = np.unravel_index(np.argsort(acq.ravel()), acq.shape)\n vals_list = acq[indices_list]\n vals_list = vals_list[~np.isnan(vals_list)][::-1]\n indices_list = np.dstack(indices_list)[0]\n indices_list = indices_list[:len(vals_list)][::-1]\n vals_list = vals_list[:self.batch_size].tolist()\n indices_list = indices_list[:self.batch_size].tolist()\n if not self.batch_update:\n return vals_list, indices_list\n if self.batch_dscale is None:\n batch_dscale_ = self.surrogate_model.model.kernel.lengthscale.mean().item()\n else:\n batch_dscale_ = self.batch_dscale\n vals_list, indices_list = self.update_points(\n vals_list, indices_list, batch_dscale_)\n return vals_list, indices_list", "def factor_exposure(asset: Asset, risk_model_id: str, factor_name: str, *,\n source: str = None, real_time: bool = False, request_id: Optional[str] = None) -> pd.Series:\n risk_model = RiskModel(risk_model_id)\n factor = Factor(risk_model_id, factor_name)\n if factor.factor is None or risk_model_id != factor.risk_model_id:\n raise MqValueError('Requested factor not available in requested risk model')\n\n asset_gsid = asset.get_identifiers().get('GSID')\n\n # Establish date interval for data query\n dates = risk_model.get_dates()\n start_date = dt.datetime.strptime(min(dates), \"%Y-%m-%d\").date() if dates else None\n end_date = dt.datetime.strptime(max(dates), \"%Y-%m-%d\").date() if dates else None\n\n # Query data and append pull requested factor exposure\n all_exposures = []\n query_results = risk_model.get_data(\n measures=[Measure.Factor_Name, Measure.Universe_Factor_Exposure, Measure.Asset_Universe],\n start_date=start_date,\n end_date=end_date,\n assets=DataAssetsRequest(identifier=AssetUniverseIdentifier.gsid, universe=[asset_gsid])).get('results', [])\n for result in query_results:\n if result.get('date') in dates:\n exposures = result.get('assetData', {}).get('factorExposure', [])\n if exposures:\n all_exposures.append(\n {'date': result['date'],\n 'factorExposure': exposures[0].get(factor.factor.identifier)})\n\n # Create and return timeseries\n df = pd.DataFrame(all_exposures)\n df.set_index('date', inplace=True)\n df.index = pd.to_datetime(df.index)\n return _extract_series_from_df(df, QueryType.FACTOR_EXPOSURE)", "def _doExpose(self, cmd, expTime, expType):\n \n image = self.actor.camera.expose(cmd, expTime, expType)\n filename = self.getNextFilename(cmd)\n pyfits.writeto(filename, image, checksum=False, clobber=True)\n cmd.inform(\"filename=%s\" % (qstr(filename)))\n \n return filename, image", "def refresh_index_page_filter_by(request, exposure_sequence):", "def continue_with_exposure(self):\r\n # Allocate space to give to scan_until_abort, and name the two\r\n # rows appropriately.\r\n self.data_pair = self.cam.get_new_array(n_images=2)\r\n self.pump_probe_data = self.data_pair[0]\r\n self.probe_only_data = self.data_pair[1]\r\n # Keep track of which image will be updated next\r\n self.next_data_has_pump = True\r\n\r\n # Tell self.thread what to do when the camera has new images\r\n self.cam.new_images.connect(self.send_new_images)\r\n\r\n # Get the current array of wavelengths from cam\r\n self.wavelen_arr = self.cam.get_wavelen_array()\r\n\r\n # Queue a call to cam.scan_until_abort\r\n self.startAcq.emit(self.data_pair)\r\n\r\n # Tell listeners (plotting widgets) to start displaying data too\r\n self.startDisplay.emit()", "def any_image(self, index):\n self.__accessed_image[index] = True\n return self.__image(index)", "def __exp1_changed_callback(self, ndx):\n if self.recording_sequence:\n self.record_sequence()\n self.exp1_radio.setChecked(True)\n self.exp1_ifi_select.setCurrentIndex(0)\n self.camera.set_exposure(ndx, 0)\n self.rec_seq_button.setEnabled(False)\n et = np.int(np.round(self.camera.actual_exposure_time_ms))\n self.write_to_log('Exposure %d ms' % et)", "async def integrate(self, exposure_time=1):\n if not self.status == ControllerStatus.IDLE:\n raise ArchonError(\"Status must be IDLE to start integrating.\")\n\n await self.set_param(\"IntMS\", int(exposure_time * 1000))\n await self.set_param(\"Exposures\", 1)\n\n self.status = ControllerStatus.EXPOSING", "def _apphot_one(args):\n return apphot_one(*args)", "def run(self):\n self.cause_dispatch() # generate one dispatch before temporarily disabling CENTRAL_DISPATCH\n settings.DISPATCH_ENABLED = False\n bare_specdata_list = self._compute_bare_specdata_sweep()\n dressed_specdata = self._compute_dressed_specdata_sweep(bare_specdata_list)\n self._lookup = spec_lookup.SpectrumLookup(self, dressed_specdata, bare_specdata_list)\n settings.DISPATCH_ENABLED = True", "def __init__(self, exposure = -1):\n self.cam = None\n self.exposure = exposure\n\n self._set_camera()", "def single_step(self, *args):\n e = args[0]\n if self.verbose:\n print(\"\\nExploration step {} / {}\".format(\n e+1, self.exploration_steps))\n # train with seeded data\n if e == 0:\n self.surrogate_model.train()\n # calculate acquisition function and get next query points\n vals, inds = self.next_point()\n if not self.batch_update:\n inds, vals = self.checkvalues(inds, vals)\n # evaluate function\n self.evaluate_function(inds)\n # update posterior\n self.update_posterior()\n # store indices and values\n if isinstance(vals, float):\n self.indices_all.append(inds)\n self.vals_all.append(vals)\n else:\n self.indices_all.extend(inds)\n self.vals_all.extend(vals)\n return", "def __call__(self):\n return self.shoot()", "def run(self) -> None:\n self.cause_dispatch() # one dispatch before temp. disabling CENTRAL_DISPATCH\n settings.DISPATCH_ENABLED = False\n bare_specdata_list = self._compute_bare_specdata_sweep()\n dressed_specdata = self._compute_dressed_specdata_sweep(bare_specdata_list)\n self._lookup = spec_lookup.SpectrumLookup(\n self, dressed_specdata, bare_specdata_list\n )\n settings.DISPATCH_ENABLED = True", "def run_single_camera(cam):\n\n try:\n # Retrieve TL device nodemap and print device information\n #nodemap_tldevice = cam.GetTLDeviceNodeMap()\n\n #result &= print_device_info(nodemap_tldevice)\n\n # Initialize camera\n cam.Init()\n\n # Retrieve GenICam nodemap\n nodemap = cam.GetNodeMap()\n exposures=[2000,4000,8000,16000]\n index=0\n if cam.ExposureAuto.GetAccessMode() != PySpin.RW:\n print(\"Unable to disable automatic exposure. Aborting...\")\n return False\n node_acquisition_mode = PySpin.CEnumerationPtr(nodemap.GetNode(\"AcquisitionMode\"))\n if not PySpin.IsAvailable(node_acquisition_mode) or not PySpin.IsWritable(node_acquisition_mode):\n print(\"Unable to set acquisition mode to continuous (enum retrieval). Aborting...\")\n return False\n\n # Retrieve entry node from enumeration node\n node_acquisition_mode_continuous = node_acquisition_mode.GetEntryByName(\"Continuous\")\n if not PySpin.IsAvailable(node_acquisition_mode_continuous) or not PySpin.IsReadable(node_acquisition_mode_continuous):\n print(\"Unable to set acquisition mode to continuous (entry retrieval). Aborting...\")\n return False\n\n acquisition_mode_continuous = node_acquisition_mode_continuous.GetValue()\n\n node_acquisition_mode.SetIntValue(acquisition_mode_continuous)\n\n print(\"Acquisition mode set to continuous...\")\n\n cam.ExposureAuto.SetValue(PySpin.ExposureAuto_Off)\n '''\n # Set maximum width\n #\n # *** NOTES ***\n # Other nodes, such as those corresponding to image width and height,\n # might have an increment other than 1. In these cases, it can be\n # important to check that the desired value is a multiple of the\n # increment.\n #\n # This is often the case for width and height nodes. However, because\n # these nodes are being set to their maximums, there is no real reason\n # to check against the increment.\n if cam.Width.GetAccessMode() == PySpin.RW and cam.Width.GetInc() != 0 and cam.Width.GetMax != 0:\n cam.Width.SetValue(FRAME_WIDTH)\n print(\"Width set to %i...\" % cam.Width.GetValue())\n\n else:\n print(\"Width not available...\")\n result = False\n\n # Set maximum height\n #\n # *** NOTES ***\n # A maximum is retrieved with the method GetMax(). A node's minimum and\n # maximum should always be a multiple of its increment.\n if cam.Height.GetAccessMode() == PySpin.RW and cam.Height.GetInc() != 0 and cam.Height.GetMax != 0:\n cam.Height.SetValue(FRAME_HEIGHT)\n print(\"Height set to %i...\" % cam.Height.GetValue())\n\n else:\n print(\"Height not available...\")\n result = False\n '''\n print(\"Automatic exposure disabled...\")\n #node_acquisition_framerate = PySpin.CFloatPtr(nodemap.GetNode(\"AcquisitionFrameRate\"))\n\n # if not PySpin.IsAvailable(node_acquisition_framerate) and not PySpin.IsReadable(node_acquisition_framerate):\n # print(\"Unable to retrieve frame rate. Aborting...\")\n # return False\n\n # framerate_to_set = node_acquisition_framerate.GetValue()\n\n # print(\"Frame rate to be set to %d...\" % framerate_to_set)\n canvas=np.zeros((FRAME_HEIGHT*2,FRAME_WIDTH*2,3), np.uint8)\n while True:\n exposure=exposures[index]\n \n configure_exposure(cam, exposure)\n # Acquire images\n err, img,width,height = acquire_images(cam, nodemap)\n if err < 0:\n return err\n\n \n img = img.GetData().reshape(height,width,3)\n\n half_height = int(height/2)\n half_width = int(width/2)\n half_frame_height = int(FRAME_HEIGHT/2)\n half_frame_width = int(FRAME_WIDTH/2)\n \n img = img[half_height-half_frame_height:half_height+half_frame_height,half_width-half_frame_width:half_width+half_frame_width]\n #smallimg=cv2.resize(img,(int(FRAME_WIDTH/2),int(FRAME_HEIGHT/2)))\n if index==0:\n #top left\n canvas[0:FRAME_HEIGHT,0:FRAME_WIDTH]=img\n elif index==1:\n #top right\n canvas[0:FRAME_HEIGHT,FRAME_WIDTH:FRAME_WIDTH*2]=img\n elif index==2:\n #bot left\n canvas[FRAME_HEIGHT:FRAME_HEIGHT*2,0:FRAME_WIDTH]=img\n else:\n #bot right\n canvas[FRAME_HEIGHT:FRAME_HEIGHT*2,FRAME_WIDTH:FRAME_WIDTH*2]=img\n index+=1\n if index>=len(exposures):\n index=0\n\n cv2.imshow(\"frame\",canvas)\n if cv2.waitKey(1) &0xff ==ord('q'):\n #stop the feed the 'q'\n break\n cv2.destroyAllWindows()\n # Deinitialize camera\n cam.DeInit()\n\n except PySpin.SpinnakerException as ex:\n print(\"Error: %s\" % ex)\n result = False", "def one(self):", "def perform(self):\n pass", "def refresh_counterfactual_url_json(request, is_json):\t\n\tif is_json != 'json':\n\t\treturn\n\n #json_file = '/home/zhou/Downloads/jsons/only/json_7.json'\n json_file = '/home/zhou/Downloads/jsons/compound/json_9.json'\n\t#json_file = '/Users/jiaozi/Downloads/jsons/compound/json_2.json' #json file address\n exposure_sequence = read_json(json_file)#list of exposures{mean,sd,non_rate}, read json content into memory\n\n\tprimeCoordinator.get_counterfactual_compound_exposures(exposure_sequence)\n\t\n\n\t'''\n\t\tFor interface output data format, please refer to refresh_output_counterfactual_mortality. When a user clicks on age/gender/outcome, he can view mortalities in different formats\n It refreshes final compound mortality for output, it should be the counterfactual mortality of the final exposure.\n In order to show mortality by age/outcome/gender, there should be three lists.\n 1. Show by age/population_subset \n All information to present on a webpage is stored in variable 'output_all_mortality_exposure_age', the format of this variable is [{'population_subset_id':1,'age_group':'15-19','b_mortality_sum_db':757,'c_mrtality_sum':341}] \n 2. Show by outcome \n All information to present on a webpage is stored in variable 'output_all_mortality_exposure_outcome', the format of this variable is [{'o_id':1,'name':'Stroke','b_mortality_sum_db':24757,'c_mrtality_sum':23415}] \n 3. Show by gender \n All information to present on a webpage is stored in variable 'output_all_mortality_exposure_gender', the format of this variable is [{'gender':'male','b_mortality_sum_db':24757,'c_mrtality_sum':415}] \n\n\t'''\n\t#filter_by\t= request.POST['filter-by'] \n\tfilter_by\t= request.GET\n\tby\t\t= request.POST\n\tprint filter_by\n\tprint by\n\n\t# get the data in the interface\n\tb_output_mortality \t= primeCoordinator.output_baseline_mortality\n\tb_output_mortality_num \t= primeCoordinator.output_baseline_mortality_num\n\tb_total_mortality \t= primeCoordinator.output_baseline_mortality_total\n\tc_output_mortality \t= primeCoordinator.output_counterfactual_mortality\n\tc_output_mortality_num \t= primeCoordinator.output_counterfactual_mortality_num\n\tc_total_mortality \t= primeCoordinator.output_counterfactual_mortality_total\n\ttotal_population\t= primeCoordinator.output_total_population\n\ttotal_death_averted\t= str(int(round(primeCoordinator.output_total_death_averted))) # int\n\t#total_death_averted\t= str(primeCoordinator.output_total_death_averted) # decimale\n\ttotal_death_baseline\t= str(primeCoordinator.output_total_death_baseline)\n\n\t'''\n\t\tThis is the outputs when user click outcome\n\t'''\n\tall_mortality_exposure\t= primeCoordinator.output_all_mortality_exposure_outcome # [{'outcome_id':outcome_id,'name':outcome name,'baseline_death':100, 'counterfactual_death':20},{}] \n\n\t'''\n\t\tThis is the output when user click age\n\t'''\n\tall_mortality_age\t= primeCoordinator.output_all_mortality_age # [{'age_group_id':age_group_id,'age_group':age_group,'baseline_death':100, 'counterfactual_death':20},{}] \n\n\t'''\n\t\tThis is the outputs when user click gender \n\t'''\n\tall_mortality_gender\t= primeCoordinator.output_all_mortality_gender# [{'gender':'male','baseline_death':100, 'counterfactual_death':20},{'gender':'female','baseline_death':100, 'counterfactual_death':20}] \n\n\n\t#transmit the parameters\n\ttemplate = loader.get_template('primemodel/index.html')\n\tpara_view = {\n\t\t\t'b_output_mortality_num':\tb_output_mortality_num,\n\t\t\t'b_total_mortality':\t\tb_total_mortality,\n\t\t\t'c_output_mortality_num':\tc_output_mortality_num,\n\t\t\t'c_total_mortality':\t\tc_total_mortality,\n\t\t\t'total_population':\t\ttotal_population,\n\t\t\t'total_death_averted':\t\ttotal_death_averted,\n\t\t\t'total_death_baseline':\t\ttotal_death_baseline,\n\t\t\t'all_mortality_exposure':\tall_mortality_exposure\n\t\t\t}\n\n\t#context to transmit the parameters to show\n\tcontext = Context(para_view)\n\tresponse = template.render(context)\n\treturn HttpResponse(response)", "def process(self):", "def process(self):", "def process(self):", "def run():\n return estimate(0,1,0)", "def _request_and_measure(self, count):\n for i in range(count):\n self.rate_measurer.update_rate()\n\n def handle_request_error(result):\n self.rate_measurer.request_failed(result)\n write_failure(result)\n\n for i in range(self.request_rate):\n d = self.control_service.move_dataset(self.dataset_node.uuid,\n self.dataset_id)\n self.rate_measurer.request_sent()\n d.addCallbacks(self.rate_measurer.response_received,\n errback=handle_request_error)", "def pick_action(self):\n if self.exploration_mode == 'time':\n self.acq_func.exploration_rate = self.exploration_rate(self.duration + 1)\n elif self.exploration_mode == 'samples':\n self.acq_func.exploration_rate = self.exploration_rate(len(self.rounds) + 1)\n\n fid, x = optim.pick_acquisition_mf(acq_func=self.acq_func,\n optimizer=self.aux_optimizer,\n gammas=self.gammas,\n x_init=self.aux_x_init)\n rmean, rsd = self.acq_func.predict_mf(fid=fid, x=x)\n\n # Undo negation of objective function so as to not confuse user\n if self.mode == 'min':\n rmean = -rmean\n\n rospy.loginfo('Next sample (%d, %s) with beta %f and predicted reward %f +- %f',\n fid,\n str(x), self.acq_func.exploration_rate,\n rmean,\n rsd)\n return fid, x", "def run(self):\n\n # TODO: Logic to get data, enforce request limits, and filter out duplicates", "def _process(self, activity):", "def fire(self):", "def callee(calls):\n calls.append(1)", "def __init__(self):\n self.state = Idle\n self.currExposure = Exposure(None, None, None)\n self.expTimer = Timer()\n self.readTimer = Timer()\n self.expBegin = None\n self.expAccumulated = None\n self.setConfig(CameraConfig())\n self._expName = \"\"\n self.failExposure = False # for unittesting", "def autoExposure(this, **kargs):\n\t\t\n\t\t# Arguments\n\t\tframes = kargs.get('frames', 4)\n\t\tstart = kargs.get('start', -10)\n\t\tend = kargs.get('start', -3)\n\t\t\n\t\tmax = 0\n\t\tv = start\n\t\tprint 'Auto Exposition starting...'\n\t\t\n\t\tfor i in range(start, end):\n\t\t\tthis.setProp('exposure', i)\n\t\t\tfor j in range(frames): this.getFrame()\n\t\t\t\n\t\t\te = imEntropy(this.frame)\n\t\t\tif e > max:\n\t\t\t\tmax = e\n\t\t\t\tv = i\n\t\t\n\t\tthis.setProp('exposure', v)\n\t\tfor j in range(frames): this.getFrame()\n\t\tprint 'Exposure Calibrated: %i / Entropy: %.4f' % (v, max)", "def set_exposure(self, expo):\n if expo == 0:\n self.exposure = 0\n elif expo == 1:\n self.exposure = min(9, self.exposure+1)\n elif expo == -1:\n self.exposure = max(-9, self.exposure-1)\n self.drone.set_exposure(self.exposure)\n log.info(f\"EXPOSURE {self.exposure}\")", "def expose_test(self):\n with self.lock:\n self.dark = 1\n self.tstart = time.time()\n self.timestamp = time.strftime(\"%Y-%m-%dT%H:%M:%S\", time.localtime(self.tstart))\n imagesize = (self.expArea[3] - self.expArea[1],\n self.expArea[2] - self.expArea[0])\n self.data = np.ones(shape=imagesize, dtype=np.uint16)\n self.tend = time.time()", "def perform(self):\n return", "def perform(self):\n return", "def __call__(self, population, context):\n pass", "def __call__():", "def __call__():", "def __call__():", "def __call__():", "def __call__():", "def process_exposure_product(self, exp_product, pool_name=\" \", asn_file=\" \"):\n # Find all the member types in the product\n members_by_type = defaultdict(list)\n for member in exp_product[\"members\"]:\n members_by_type[member[\"exptype\"].lower()].append(member[\"expname\"])\n\n # Get the science member. Technically there should only be\n # one. We'll just get the first one found.\n science = members_by_type[\"science\"]\n if len(science) != 1:\n self.log.warning(\n \"Wrong number of science files found in {}\".format(exp_product[\"name\"])\n )\n self.log.warning(\" Using only first one.\")\n science = science[0]\n\n self.log.info(\"Working on input %s ...\", science)\n if isinstance(science, datamodels.DataModel):\n input = science\n else:\n input = datamodels.open(science)\n\n # Record ASN pool and table names in output\n input.meta.asn.pool_name = pool_name\n input.meta.asn.table_name = asn_file\n\n input = self.dark_current(input)\n input = self.normalize(input)\n\n self.log.info(\"Finished processing product {}\".format(exp_product[\"name\"]))\n return input", "def capture(self) -> int:", "def run(self, req):\n\n if(self.isROS):\n featureList = []\n self.fov = self.geoPathToGPD(req.fov)\n else:\n self.fov = self.pickleToGPD(req)\n\n\n \n response = []\n \n featuresInView = gpd.sjoin(self.featureDataframe, self.fov, op='within') \n for index, feature in featuresInView.iterrows():\n if(self.isROS):\n response.append(enc_feature_msg(feature[\"name\"], feature[\"longitude\"],feature[\"latitude\"], feature[\"fid\"]))\n else:\n response.append((feature[\"name\"], feature[\"longitude\"],feature[\"latitude\"], feature[\"fid\"]))\n if(self.isROS):\n return enc_query_srvResponse(response)\n return response", "def call(self, *args, **kwargs):", "def get_exposure_value():\n validate(request.json, 'exposureValueRequestSchema', 'swagger/getExposureValue.yml')\n logging.info(\"get_exposure_value({0})\".format(request.json))\n return database.get_exposure_value(loc=request.json['loc'],\n stime=ExposureUtil.to_timestamp(request.json['stime']),\n etime=ExposureUtil.to_timestamp(request.json['etime']),\n tres=request.json['tres'],\n tstat=request.json['tstat'])", "def exposuretime(self):\n _, = self.exposuretimes\n return _", "def _generate_direct_image(self):\n filename = '0000_flt.fits'\n\n di_start_JD = (self.exp_start_times[0] - 1 * u.min).to(u.day)\n di_exp_gen = ExposureGenerator(self.detector, self.grism, self.NSAMP,\n self.SAMPSEQ, self.SUBARRAY,\n self.planet, filename, di_start_JD)\n\n try: # assume that its a list not a single value\n x_ref = self.x_ref[0]\n except TypeError:\n x_ref = self.x_ref\n\n try: # assume that its a list not a single value\n y_ref = self.y_ref[0]\n except TypeError:\n y_ref = self.y_ref\n\n exp = di_exp_gen.direct_image(x_ref, y_ref)\n exp.generate_fits(self.outdir, '0000_flt.fits')", "def healthcare():", "def command(self, value):\n for ii in range(0, len(exposure_mode_names)):\n if value == exposure_mode_names[ii]: break\n self.tcp_comms.tcp_params.exposureMode = ii\n self.tcp_comms.send_exposure_mode(self.tcp_comms.tcp_params.exposureMode)", "def ExposeLoop(self, nexposures, fitsfile, seconds=5):\n if self.process and self.process.poll() is None:\n raise RuntimeError(\"A process is already running\")\n if self.exposethread and self.exposethread.is_alive():\n raise RuntimeError(\"An exposure loop is already running\")\n\n self.current_exposure = 0\n self.max_exposures = nexposures\n self.exposethread = Thread(target=self._do_expose_loop,\n args=(fitsfile, seconds))\n self.exposethread.start()", "def _get_one(self,url):\n pass" ]
[ "0.6202363", "0.5730469", "0.5720542", "0.5674125", "0.5648648", "0.56126845", "0.55802995", "0.5563538", "0.55593634", "0.5544014", "0.5505167", "0.5467155", "0.54338604", "0.5406679", "0.53846276", "0.53846276", "0.53756344", "0.531871", "0.5311451", "0.5309933", "0.5305943", "0.5292303", "0.5289628", "0.5213631", "0.5211134", "0.52088344", "0.5206163", "0.51909417", "0.5164779", "0.5163428", "0.5163428", "0.5163428", "0.5163428", "0.5163428", "0.5157655", "0.51499236", "0.5139993", "0.5139993", "0.51379657", "0.51303023", "0.5121835", "0.51122135", "0.51093787", "0.51093787", "0.508679", "0.5080306", "0.5057563", "0.50528455", "0.50525856", "0.5040895", "0.50380754", "0.5034372", "0.50332034", "0.5030125", "0.5027548", "0.5016878", "0.5010914", "0.49993348", "0.49954835", "0.4983248", "0.49682274", "0.49614328", "0.49602866", "0.49552783", "0.49537838", "0.4952609", "0.4948855", "0.4910469", "0.4884281", "0.4884281", "0.4884281", "0.4872846", "0.4870585", "0.4864314", "0.48594034", "0.4857065", "0.48569065", "0.48536173", "0.48495597", "0.48374608", "0.48334485", "0.4833361", "0.48126543", "0.48126543", "0.48125893", "0.48073483", "0.48073483", "0.48073483", "0.48073483", "0.48073483", "0.4795321", "0.4791037", "0.47902206", "0.47769552", "0.4776758", "0.47711435", "0.47647318", "0.4761584", "0.47576076", "0.47557303", "0.47480124" ]
0.0
-1
Helper for apogee science tests
def _do_apogee_science(self, nCall, nInfo, nWarn, nErr, ditherPairs=4): self._update_cart(1, 'APOGEE') cmdState = self.actorState.doApogeeScience cmdState.reinitialize(self.cmd) cmdState.ditherPairs = ditherPairs masterThread.do_apogee_science(self.cmd, cmdState, myGlobals.actorState) self._check_cmd(nCall, nInfo, nWarn, nErr, True)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def testBeliefs1sk(self):", "def test_alien_data(self):", "def test_art_from_taste_space(self):", "def test_theft_and_stealing(self):", "def test_T01():", "def testBeliefs2sk(self):", "def _test(self):", "def _test(self):", "def _test(self):", "def _test(self):", "def _test(self):", "def tests():", "def test_predictor():", "def test_T3():", "def test_T3():", "def unitary_test():", "def test_T4():", "def test_T4():", "def test_quick_answer(self):\n pass", "def test(self):\n pass", "def test_4():", "def test():", "def test():", "def test_let(self):", "def test_4_4_1_1(self):\n pass", "def _test(self):\n pass", "def _test(self):\n pass", "def _test(self):\n pass", "def getTestResults():", "def test_01_lighting(self):", "def test_get_art_info(self):\n pass", "def test_get_insumo(self):", "def test_households_in_admin_unit(self):", "def test(self):", "def test(self):", "def test_arc_smear(self):", "def test_T2():", "def test_T2():", "def test_T0():", "def test_apply_endorsements(self):", "def test_create_unexpected_problem(self):\n pass", "def substantiate():", "def test_5():", "def test_dummy():", "def test_T1():", "def test_T1():", "def test_get_solution(self):\n pass", "def test():\n pass", "def test_get_waivers(league):\n pass", "def test_something():", "def test_3():", "def test_g_asignar_rol(self):", "def test_03_visit_special(self):", "def test_analytical_vs_numerical():\n pass", "def test_rules():", "def test_training(self):\n\t\tpass", "def test_get_details7(self):\n pass", "def exercise_b2_70():\r\n pass", "def test_get_goal(self):\n pass", "def test_basic_execution(self):", "def exercise_b2_53():\r\n pass", "def test_single_issue():\n pass", "def test_if(self):", "def test_get_game_boxscore(self):\n pass", "def exercise_b2_106():\r\n pass", "def runtest(self):", "def test_kyc_get_legal(self):\n pass", "def test_gtf(self):\n #TODO write bed tests", "def exercise_b2_69():\r\n pass", "def test_machine_learning():", "def test(self):\n raise NotImplementedError", "def exercise_b2_107():\r\n pass", "def healthcare():", "def exercise_b2_113():\r\n pass", "def test_active_inference_SPM_1b(self):", "def test_get_game(self):\n pass", "def test_get1(self):\n pass", "def test_title(names):", "def exercise_b2_56():\r\n pass", "def test_1():", "def test_get_food(self):\n pass", "def test_convert():", "def exercise_b2_52():\r\n pass", "def test_initialization_of_homework_result_solution():\n assert result_1.solution == \"I have done this hw\"", "def test_nothing(self):", "def test_get_scenario(self):\n pass", "def test(self):\n # -- Test --\n\n # (1)\n\n # (2)\n\n # (3)\n\n # (4)\n # -- Test --", "def test_generate_all_testing(self):\n pass", "def test_get2(self):\n pass", "def test_visualize_recipe_nutrition(self):\n pass", "def test_01_visit(self):", "def test_kyc_post_legal(self):\n pass", "def test_get_run(self):\n pass", "def test_2():", "def inner_test():\n pass", "def inner_test():\n pass", "def test_get_scenarios(self):\n pass", "def test_observatory(lasco):\n assert lasco.observatory == \"SOHO\"", "def test_uparforvarg(self):", "def test_outE_traverals(self):\r\n results = self.blake.outE()\r\n assert len(results) == 1\r\n assert self.blake_in_theoretics in results", "def test_testing():\n Pendulum = pu.Pendulum()\n ans = Pendulum.dummytest()\n assert ans" ]
[ "0.6974807", "0.6933129", "0.6889396", "0.6887534", "0.6827801", "0.67861944", "0.67435616", "0.67435616", "0.67435616", "0.67435616", "0.67435616", "0.6690207", "0.6631977", "0.66018003", "0.66018003", "0.6595975", "0.6593147", "0.6593147", "0.65639585", "0.65451413", "0.65322465", "0.6526747", "0.6526747", "0.6505568", "0.6486779", "0.64841115", "0.64841115", "0.64841115", "0.6458555", "0.64535874", "0.6450602", "0.64448696", "0.64370126", "0.643546", "0.643546", "0.6431869", "0.6430605", "0.6430605", "0.6421754", "0.6408224", "0.63703156", "0.63650006", "0.6356629", "0.63480633", "0.6340758", "0.6340758", "0.6338208", "0.63330495", "0.63233906", "0.63228166", "0.6317575", "0.6311631", "0.6307398", "0.6287377", "0.6282867", "0.6281507", "0.6232615", "0.62198275", "0.6190656", "0.61890864", "0.6187388", "0.6172596", "0.61634064", "0.6159596", "0.6157667", "0.6153323", "0.61480534", "0.6139105", "0.61278695", "0.61247754", "0.60998374", "0.60986453", "0.6091511", "0.60884005", "0.6080704", "0.60776824", "0.60678023", "0.60564125", "0.60494256", "0.604927", "0.6047783", "0.60473233", "0.60296744", "0.60250133", "0.6023721", "0.60179156", "0.6017413", "0.59969765", "0.5994655", "0.599352", "0.5988078", "0.59868515", "0.59751683", "0.5962426", "0.59601253", "0.59601253", "0.59584135", "0.5953797", "0.5951116", "0.59443325", "0.59189427" ]
0.0
-1
open shutter, one call per exposure/dither moves
def test_do_apogee_science_4_pair_A_closed(self): sopTester.updateModel('mcp', TestHelper.mcpState['apogee_science']) sopTester.updateModel('apogee', TestHelper.apogeeState['A_closed']) self._do_apogee_science(13, 72, 0, 0, ditherPairs=4)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def do_line_scan_shutter_closed(self):\n scan = self.scan\n laser = self.devices[scan['laser']['name']]\n shutter = self.scan['shutter']\n ni_daq = self.devices['NI-DAQ']\n ni_daq.driver.digital_output(shutter['port'], False)\n if not isinstance(shutter['delay'], Q_):\n delay = Q_(shutter['delay'])\n else:\n delay = shutter['delay']\n delay = delay.m_as('s')\n if delay > 0:\n time.sleep(delay)\n ni_daq.driver.digital_output(shutter['port'], True)\n print('Pumped Scan')\n laser.driver.execute_sweep()\n approx_time_to_scan = (laser.params['stop_wavelength'] - laser.params['start_wavelength']) / laser.params['wavelength_speed']*laser.params['wavelength_sweeps']\n\n while laser.driver.sweep_condition != 'Stop':\n sleep(approx_time_to_scan.m/config.monitor_read_scan)\n ni_daq.driver.digital_output(shutter['port'], False)\n laser.driver.wavelength = scan['laser']['params']['start_wavelength']\n \n # Repeat with shutter closed to get reference scan\n \n ni_daq.driver.digital_output(shutter['port'], False)\n if not isinstance(shutter['delay'], Q_):\n delay = Q_(shutter['delay'])\n else:\n delay = shutter['delay']\n delay = delay.m_as('s')\n if delay > 0:\n time.sleep(delay)\n print('Reference Scan')\n laser.driver.execute_sweep()\n approx_time_to_scan = (laser.params['stop_wavelength'] - laser.params['start_wavelength']) / laser.params['wavelength_speed']*laser.params['wavelength_sweeps']\n while laser.driver.sweep_condition != 'Stop':\n sleep(approx_time_to_scan.m/config.monitor_read_scan)\n laser.driver.wavelength = scan['laser']['params']['start_wavelength']\n \n return True", "def sharpen_slide(self):\r\n std_input = self.horizontal.get() # Get the std defined by user\r\n c_input = self.vertical.get() # get the constant defined by the user\r\n img = self.master.images[-1] # Use the most recent displayed image for sharpening\r\n sharpened_image = sharpen.gaussian_unsharp_masking(img, std_input, c_input) # Apply unsharp masking on image\r\n self.master.display_image.display_image(img=sharpened_image) # display sharpened image\r\n self.master.images.append(sharpened_image) # Append the sharpened image on the stack\r", "def run(self):\n openShutter = True\n actuateXed = False\n image_type = \"PPUMP\"\n\n wl = float(self.eo_config.get(\"PPUMP_WL\", 550))\n meas_flux = self.measured_flux(wl)\n seqno = 0\n for tokens in self.instructions:\n exptime = float(tokens[1])\n nframes = int(tokens[2])\n shifts = int(tokens[3])\n for iframe in range(nframes):\n self.image_clears()\n self.bias_image(seqno)\n self.take_image(seqno, exptime, openShutter, actuateXed,\n image_type)\n seqno += 1", "def sync_shutter(self):\n shutter = self.scan['shutter']\n ni_daq = self.devices['NI-DAQ']\n ni_daq.driver.digital_output(shutter['port'], False)\n time.sleep(0.2)\n ni_daq.driver.digital_output(shutter['port'], True)\n time.sleep(0.2)\n ni_daq.driver.digital_output(shutter['port'], False)", "def testSharpen (name = \"smokey.gif\", degree = 50, amount = 20):\n image = Image(name)\n print(\"Close the image window to see the transformation\")\n image.draw()\n image2 = sharpen(image, degree, amount)\n image2.draw()", "def main():\n camera = picamera.PiCamera()\n camera.resolution = (RESOLUTIONX, RESOLUTIONY)\n camera.iso = 800\n time.sleep(2)\n while True:\n camera.capture('current-image.jpg')\n adapt_steering(navigation.get_xposition('current-image.jpg'))\n time.sleep(0.4)", "def shutter_pulse(self, width):\n step_name = 'Shutter Pulse'\n self.shutter.settings['shutter_open'] = True\n self.db_poll(step_name)\n print('Shutter open')\n t0 = time.time()\n t_lastlog = t0\n while True:\n if self.interrupt_measurement_called:\n self.shutter.settings['shutter_open'] = False\n break\n if time.time()-t0 > width:\n break\n time.sleep(0.001)\n if time.time() - t_lastlog > 0.2:\n # do some logging\n self.db_poll(step_name)\n t_lastlog = time.time()\n \n self.shutter.settings['shutter_open'] = False\n self.settings['steps_taken'] += 1\n print('Shutter closed')", "def measure_darks(det, shutter, quantity):\n yield from set_dark_frame()\n yield from bps.mv(shutter, \"close\")\n yield from _acquire_n_frames(det, quantity)", "def shar():\n pass", "def warmup():\n print camera.CoolerOFF()\n camera.status.update()", "def shutter(self, state):\n if self.status != \"not connected\":\n m = self.serial\n m.write(\"shutter \" + str(state) + \"\\r\\n\")\n r = m.read(100)\n if state == 'O':\n st = \"open\"\n else:\n st = \"closed\"\n result = \"out.monochrom: Shutter is \" + st\n return result\n else:\n pass", "def sample_damaging(image):\r\n return crease_image(blotch_image(image, 100, True), 10, False)", "def expose(self):\n if self.camera is None: # test mode -- immediately return test image\n print(\"NO SPECTRAL CAMERA FOUND -- USING TEST DATA\")\n self.filename = \"example_fits_files/Mooi\"\n return\n\n exposure_time = self.time.get()\n try:\n self.exposure_time = float(exposure_time)\n except:\n message = \"Exposure time \\\"{0}\\\" cannot be converted to floating point number\".format(exposure_time)\n messagebox.showerror(\"Error\", message)\n raise ValueError(message)\n filename = \"spectra/{0}\".format(timestamp())\n self.camera.spectrum(self.exposure_time, filename)\n self.filename = filename", "def setShutter( self, c, devChannel, state ):\n dev = 'shutter'\n self.validateDevChannel( dev, devChannel )\n self.validateInput( dev, state )\n channel = self.dcDict[dev]['devChannels'][devChannel]['channel']\n self.tryToSend( channel, state )", "def skywalker(this, **kargs):\n\t\t\n\t\t# Arguments\n\t\tbin = kargs.get('bin', this._BINARY)\n\t\toffshore = kargs.get('offshore', 5)\n\t\tminSize = kargs.get('minSize', 3)\n\t\tblur = kargs.get('blur', False)\n\t\t\n\t\tif blur: # Flou de test\n\t\t\tkernel = np.ones((3, 3), np.float32)/9\n\t\t\tbin = cv2.filter2D(bin, -1, kernel)\n\t\t\n\t\t# On duplique l'image pour le rendu final\n\t\tscan = EmptyFrom(bin, 3)\n\t\tscan[:,:,0] = scan[:,:,1] = scan[:,:,2] = bin\n\t\tthis._SCAN = scan\n\t\t\n\t\tstep = 0 # Compteur de pas dans le vide\n\t\tstart, end = None, None\n\t\t\n\t\t# Dimensions de l'image à scanner\n\t\tsize = D2Point(width(bin), height(bin))\n\t\tratio = size if minSize < 1 else 1\n\t\t\n\t\t# Scan pixel par pixel, en partant du bas\n\t\tfor v in xrange(int(size.y)-1, -1, -1):\n\t\t\tfor u in xrange(int(size.x)):\n\t\t\t\n\t\t\t\tif bin.item((v, u)): # Si un pixel != 0:\n\t\t\t\t\tscan[v,u] = [0, 0, 255] # Rouge.\n\t\t\t\t\tstep = 0 # On reset le jump\n\t\t\t\t\t\n\t\t\t\t\t# Si c'est le premier\n\t\t\t\t\tif not start:\n\t\t\t\t\t\tstart = D2Point(u, v)\n\t\t\t\t\t\tend = D2Point(u, v)\n\t\t\t\t\telse: # On trace\n\t\t\t\t\t\tend.x, end.y = u, v\n\t\t\t\t\n\t\t\t\telif end:\n\t\t\t\t\tif step < offshore:\n\t\t\t\t\t\tscan[v,u] = [0, 255, 255] # Jaune\n\t\t\t\t\t\tstep += 1 # On continue\n\t\t\t\t\telif abs((start - end)/ratio) < minSize:\n\t\t\t\t\t\tstart, end = None, None\n\t\t\t\t\telse: break\n\t\t\t\t# elif end: break\n\t\t\t###\n\t\t\tif end: break\n\t\t###\n\t\t\n\t\tif end: # Si on a trouvé une fin\n\t\t\t\n\t\t\t# Point médian = doigt\n\t\t\tresult = start % end\n\t\t\t\n\t\t\t# Visuel\n\t\t\tscan[:,result.x,:] = [0, 255, 0] # On trace une bande verte\n\t\t\tscan[result.y,:,:] = [0, 127, 0] # On trace une autre bande verte\n\t\t\t\n\t\t\t# Reformatage\n\t\t\tresult /= size-1 # On remet en ratio d'image\n\t\t\tresult.x = 1 - result.x # On inverse le côté de mesure\n\t\t\t\n\t\t\t# Stockage\n\t\t\tthis._DETECTED = result # On stocke le point détecté\n\t\t\tthis._BOTTOM = result.y == 1 # On clic ou bien ?\n\t\t\n\t\t# Si rien\n\t\telse:\n\t\t\tresult = None\n\t\t\tthis._BOTTOM = False\n\t\t\n\t\t# Tchao\n\t\treturn result", "def wfits(self, filename=None):\n with self.lock:\n dark = self.dark\n if not filename:\n if dark != 0:\n filename = self.getNextFilename(\"dark\")\n else:\n filename = self.getNextFilename(\"object\")\n with self.lock:\n if(self.data.size == 0):\n raise FliError(\"No image available\")\n hdu = pyfits.PrimaryHDU(self.data)\n hdr = hdu.header\n with self.lock:\n hdr.set('DATE', self.timestamp, 'exposure begin date')\n hdr.set('INSTRUME', self.devname, 'this instrument')\n hdr.set('SERIAL', self.devsn, 'serial number')\n hdr.set('EXPTIME', self.exptime, 'exposure time (ms)')\n hdr.set('VBIN', self.vbin, 'vertical binning')\n hdr.set('HBIN', self.hbin, 'horizontal binning')\n hdr.set('CCD-TEMP', self.temp, 'CCD temperature')\n if dark != 0:\n hdr.set('SHUTTER', 'CLOSE', 'shutter status')\n else:\n hdr.set('SHUTTER', 'OPEN', 'shutter status')\n hdr.set('CCDAREA', '[%d:%d,%d:%d]' % self.expArea, 'image area')\n hdu.writeto(filename, overwrite=True, checksum=True)\n with self.lock:\n self.filename = filename", "def start(self):\n self.frame = 0\n self._init_level(1)\n self.reward = 0\n self.pcontinue = 1\n self.ghost_speed = self.ghost_speed_init\n return self._make_image(), self.reward, self.pcontinue", "def __init__(\n self,\n win,\n outer_diam_pix,\n inner_diam_pix,\n bg_colour=(-1, -1, -1),\n line_colour=(+1, +1, +1),\n spot_colour=(-1, -1, -1),\n circle_edges=128,\n ):\n\n self._win = win\n self._outer_diam_pix = outer_diam_pix\n self._inner_diam_pix = inner_diam_pix\n self._circle_edges = circle_edges\n self._stim = {}\n\n self._stim[\"aperture\"] = psychopy.visual.Aperture(\n win=win,\n size=self._outer_diam_pix,\n nVert=self._circle_edges,\n shape=\"circle\",\n units=\"pix\",\n autoLog=False,\n )\n\n self._stim[\"aperture\"].enabled = False\n\n self._stim[\"bg\"] = psychopy.visual.Circle(\n win=self._win,\n radius=self._outer_diam_pix / 2.0,\n units=\"pix\",\n lineColor=None,\n fillColor=bg_colour,\n edges=self._circle_edges,\n autoLog=False,\n )\n\n self._stim[\"line\"] = psychopy.visual.Rect(\n win=self._win,\n size=(self._outer_diam_pix * 2, self._inner_diam_pix),\n units=\"pix\",\n lineWidth=0,\n lineColor=None,\n fillColor=line_colour,\n autoLog=False,\n )\n\n self._stim[\"spot\"] = psychopy.visual.Circle(\n win=self._win,\n radius=self._inner_diam_pix / 2.0,\n units=\"pix\",\n fillColor=spot_colour,\n edges=self._circle_edges,\n lineWidth=0,\n autoLog=False,\n )\n\n self.bg_colour = bg_colour\n self.line_colour = line_colour\n self.spot_colour = spot_colour", "def _generate_exposure(self, expstart, number):\n\n index_number = number - 1 # for zero indexing\n\n filename = '{:04d}_raw.fits'.format(number)\n\n exp_gen = ExposureGenerator(self.detector, self.grism, self.NSAMP,\n self.SAMPSEQ, self.SUBARRAY,\n self.planet, filename, expstart)\n\n if not self.spatial_scan:\n self.sample_rate = 1 * u.year # high number reverts to read times\n\n _, sample_mid_points, sample_durations, read_index = \\\n exp_gen._gen_scanning_sample_times(self.sample_rate)\n\n time_array = (sample_mid_points + expstart).to(u.day)\n\n if self.transmission_spectroscopy:\n star_norm_flux = self.generate_lightcurves(time_array)\n planet_depths = 1 - star_norm_flux\n else:\n planet_depths = None\n\n # x shifts - linear shift with exposure, second exposure shifted by\n # x_shifts, direct image and first exp will match.\n x_ref = self._try_index(self.x_ref, index_number)\n y_ref = self._try_index(self.y_ref, index_number)\n sky_background = self._try_index(self.sky_background, index_number)\n\n # X and Y Shifts\n x_ref += self.x_shifts * index_number\n y_ref += self.y_shifts * index_number\n x_jitter = self.x_jitter\n y_jitter = self.y_jitter\n\n if self._visit_trend:\n scale_factor = self._visit_trend.get_scale_factor(index_number)\n else:\n scale_factor = None\n\n if self.spatial_scan:\n exp_frame = exp_gen.scanning_frame(\n x_ref, y_ref, x_jitter, y_jitter,\n self.wl, self.stellar_flux, planet_depths,\n self.scan_speed, self.sample_rate, sample_mid_points,\n sample_durations, read_index, ssv_generator=self.ssv_gen,\n noise_mean=self.noise_mean, noise_std=self.noise_std,\n add_flat=self.add_flat, add_dark=self.add_dark,\n scale_factor=scale_factor, sky_background=sky_background,\n cosmic_rate=self.cosmic_rate,\n add_gain_variations=self.add_gain_variations,\n add_non_linear=self.add_non_linear,\n clip_values_det_limits=self.clip_values_det_limits,\n add_read_noise=self.add_read_noise,\n add_stellar_noise=self.add_stellar_noise,\n add_initial_bias=self.add_initial_bias,\n progress_bar=self.progess,\n threads=self.threads\n )\n else:\n exp_frame = exp_gen.staring_frame(\n x_ref, y_ref, x_jitter, y_jitter,\n self.wl, self.stellar_flux, planet_depths,\n sample_mid_points, sample_durations, read_index,\n noise_mean=self.noise_mean, noise_std=self.noise_std,\n add_flat=self.add_flat, add_dark=self.add_dark,\n scale_factor=scale_factor, sky_background=sky_background,\n cosmic_rate=self.cosmic_rate,\n add_gain_variations=self.add_gain_variations,\n add_non_linear=self.add_non_linear,\n clip_values_det_limits=self.clip_values_det_limits,\n add_read_noise=self.add_read_noise,\n add_stellar_noise=self.add_stellar_noise,\n add_initial_bias=self.add_initial_bias,\n progress_bar=self.progess,\n threads=self.threads\n )\n\n exp_frame.generate_fits(self.outdir, filename, ldcoeffs=self.ldcoeffs)\n\n return exp_frame", "def cmdMouse(self, dev):\n self.hitsMouses[dev] = False\n f = open(self.inputPath + dev, 'rb')\n while self.live:\n f.read(500) # 144 kan eigenlijk alles zijn, behalve absurbt hoge waarden..\n self.hitsMouses[dev] = True\n time.sleep(0.1)", "def effect_spread(self, distance):\n self.load()\n return self._new(self.im.effect_spread(distance))", "def go():\n ##########\n #\n # MB19284\n #\n ##########\n\n ##########\n # Kp-band reduction\n ##########\n\n target = 'mb19284'\n sci_files = ['i200822_a011{0:03d}_flip'.format(ii) for ii in range(2, 5+1)]\n sci_files += ['i200822_a012{0:03d}_flip'.format(ii) for ii in range(2, 25+1)]\n sky_files = ['i200822_a018{0:03d}_flip'.format(ii) for ii in range(2, 6+1)]\n refSrc = [917.75, 1033.5] # This is the target\n # Alternative star to try (bright star to bottom of target): [1015, 581.9]\n \n sky.makesky(sky_files, target, 'kp_tdOpen', instrument=osiris)\n data.clean(sci_files, target, 'kp_tdOpen', refSrc, refSrc, field=target, instrument=osiris)\n data.calcStrehl(sci_files, 'kp_tdOpen', field=target, instrument=osiris)\n data.combine(sci_files, 'kp_tdOpen', epoch, field=target,\n trim=0, weight='strehl', submaps=3, instrument=osiris)\n\n ##########\n #\n # KB200101\n #\n ##########\n\n ##########\n # Kp-band reduction\n ##########\n\n # -- If you have more than one position angle, make sure to\n # clean them seperatly.\n # -- Strehl and Ref src should be the pixel coordinates of a bright\n # (but non saturated) source in the first exposure of sci_files.\n # -- If you use the OSIRIS image, you must include the full filename in the list. \n target = 'kb200101'\n sci_files = ['i200822_a014{0:03d}_flip'.format(ii) for ii in range(2, 28+1)]\n sci_files += ['i200822_a015{0:03d}_flip'.format(ii) for ii in range(2, 5+1)]\n sci_files += ['i200822_a016{0:03d}_flip'.format(ii) for ii in range(2, 5+1)]\n sky_files = ['i200822_a017{0:03d}_flip'.format(ii) for ii in range(2, 6+1)]\n refSrc = [975, 1006] # This is the target\n # Alternative star to try (bright star to right of target): [1158, 994]\n \n sky.makesky(sky_files, target, 'kp_tdOpen', instrument=osiris)\n data.clean(sci_files, target, 'kp_tdOpen', refSrc, refSrc, field=target, instrument=osiris)\n data.calcStrehl(sci_files, 'kp_tdOpen', field=target, instrument=osiris)\n data.combine(sci_files, 'kp_tdOpen', epoch, field=target,\n trim=1, weight='strehl', submaps=3, instrument=osiris)", "def main():\n\n with its.device.ItsSession() as cam:\n\n props = cam.get_camera_properties()\n its.caps.skip_unless(its.caps.raw16(props) and\n its.caps.manual_sensor(props) and\n its.caps.read_3a(props) and\n its.caps.per_frame_control(props) and\n not its.caps.mono_camera(props))\n debug = its.caps.debug_mode()\n\n # Expose for the scene with min sensitivity\n exp_min, exp_max = props[\"android.sensor.info.exposureTimeRange\"]\n sens_min, _ = props[\"android.sensor.info.sensitivityRange\"]\n # Digital gains might not be visible on RAW data\n sens_max = props[\"android.sensor.maxAnalogSensitivity\"]\n sens_step = (sens_max - sens_min) / NUM_ISO_STEPS\n white_level = float(props[\"android.sensor.info.whiteLevel\"])\n black_levels = [its.image.get_black_level(i,props) for i in range(4)]\n # Get the active array width and height.\n aax = props[\"android.sensor.info.activeArraySize\"][\"left\"]\n aay = props[\"android.sensor.info.activeArraySize\"][\"top\"]\n aaw = props[\"android.sensor.info.activeArraySize\"][\"right\"]-aax\n aah = props[\"android.sensor.info.activeArraySize\"][\"bottom\"]-aay\n raw_stat_fmt = {\"format\": \"rawStats\",\n \"gridWidth\": aaw/IMG_STATS_GRID,\n \"gridHeight\": aah/IMG_STATS_GRID}\n\n e_test = []\n mult = 1.0\n while exp_min*mult < exp_max:\n e_test.append(int(exp_min*mult))\n mult *= EXP_MULT\n if e_test[-1] < exp_max * INCREASING_THR:\n e_test.append(int(exp_max))\n e_test_ms = [e / 1000000.0 for e in e_test]\n\n for s in range(sens_min, sens_max, sens_step):\n means = []\n means.append(black_levels)\n reqs = [its.objects.manual_capture_request(s, e, 0) for e in e_test]\n # Capture raw in debug mode, rawStats otherwise\n caps = []\n for i in range(len(reqs) / SLICE_LEN):\n if debug:\n caps += cam.do_capture(reqs[i*SLICE_LEN:(i+1)*SLICE_LEN], cam.CAP_RAW)\n else:\n caps += cam.do_capture(reqs[i*SLICE_LEN:(i+1)*SLICE_LEN], raw_stat_fmt)\n last_n = len(reqs) % SLICE_LEN\n if last_n == 1:\n if debug:\n caps += [cam.do_capture(reqs[-last_n:], cam.CAP_RAW)]\n else:\n caps += [cam.do_capture(reqs[-last_n:], raw_stat_fmt)]\n elif last_n > 0:\n if debug:\n caps += cam.do_capture(reqs[-last_n:], cam.CAP_RAW)\n else:\n caps += cam.do_capture(reqs[-last_n:], raw_stat_fmt)\n\n # Measure the mean of each channel.\n # Each shot should be brighter (except underexposed/overexposed scene)\n for i,cap in enumerate(caps):\n if debug:\n planes = its.image.convert_capture_to_planes(cap, props)\n tiles = [its.image.get_image_patch(p, 0.445, 0.445, 0.11, 0.11) for p in planes]\n mean = [m * white_level for tile in tiles\n for m in its.image.compute_image_means(tile)]\n img = its.image.convert_capture_to_rgb_image(cap, props=props)\n its.image.write_image(img, \"%s_s=%d_e=%05d.jpg\" % (NAME, s, e_test))\n else:\n mean_image, _ = its.image.unpack_rawstats_capture(cap)\n mean = mean_image[IMG_STATS_GRID/2, IMG_STATS_GRID/2]\n\n print \"ISO=%d, exposure time=%.3fms, mean=%s\" % (\n s, e_test[i] / 1000000.0, str(mean))\n means.append(mean)\n\n\n # means[0] is black level value\n r = [m[0] for m in means[1:]]\n gr = [m[1] for m in means[1:]]\n gb = [m[2] for m in means[1:]]\n b = [m[3] for m in means[1:]]\n\n pylab.plot(e_test_ms, r, \"r.-\")\n pylab.plot(e_test_ms, b, \"b.-\")\n pylab.plot(e_test_ms, gr, \"g.-\")\n pylab.plot(e_test_ms, gb, \"k.-\")\n pylab.xscale('log')\n pylab.yscale('log')\n pylab.title(\"%s ISO=%d\" % (NAME, s))\n pylab.xlabel(\"Exposure time (ms)\")\n pylab.ylabel(\"Center patch pixel mean\")\n matplotlib.pyplot.savefig(\"%s_s=%d.png\" % (NAME, s))\n pylab.clf()\n\n allow_under_saturated = True\n for i in xrange(1, len(means)):\n prev_mean = means[i-1]\n mean = means[i]\n\n if np.isclose(max(mean), white_level, rtol=SATURATION_TOL):\n print \"Saturated: white_level %f, max_mean %f\"% (white_level, max(mean))\n break;\n\n if allow_under_saturated and np.allclose(mean, black_levels, rtol=BLK_LVL_TOL):\n # All channel means are close to black level\n continue\n\n allow_under_saturated = False\n # Check pixel means are increasing (with small tolerance)\n channels = [\"Red\", \"Gr\", \"Gb\", \"Blue\"]\n for chan in range(4):\n err_msg = \"ISO=%d, %s, exptime %3fms mean: %.2f, %s mean: %.2f, TOL=%.f%%\" % (\n s, channels[chan],\n e_test_ms[i-1], mean[chan],\n \"black level\" if i == 1 else \"exptime %3fms\"%e_test_ms[i-2],\n prev_mean[chan],\n INCREASING_THR*100)\n assert mean[chan] > prev_mean[chan] * INCREASING_THR, err_msg", "def sharpen(im):\n kernel = np.array([[-1, -1, -1], [-1, 9, -1], [-1, -1, -1]])\n im = cv2.filter2D(im, -1, kernel)\n return im", "def surf_bright(im, coord, minrad=3.):\n r = minrad\n slist = []\n while r < 80:\n aperture = CircularAperture(coord, r=r)\n phot_tab = aperture_photometry(im, aperture)\n s = phot_tab['aperture_sum']\n sb = s/(np.pi * r**2)\n print(sb)\n r += 1", "def expose(self, cmd, expTime, expType):\n\n if not expType:\n expType = 'test'\n if cmd:\n cmd.inform('exposureState=\"exposing\"')\n if expType not in ('bias', 'test') and expTime > 0:\n time.sleep(expTime + self._exposureOverheadTime())\n\n if cmd:\n cmd.inform('exposureState=\"reading\"')\n\n f = pyfits.open('/home/chyan/mhs/data/mcs/schmidt_fiber_snr400_rmod71.fits')\n image = f[0].data\n # image = numpy.random.normal(self.biasLevel,\n # scale=self.readNoise,\n # size=self.imageSize).astype('u2')\n\n if expType != 'test':\n time.sleep(self._readoutTime())\n return image", "def sharpen(image_path, factor=3):\n BasicTransform.convert_image(image_path)\n\n with Image.open(image_path) as img:\n filter = ImageEnhance.Sharpness(img)\n new_image = filter.enhance(factor)\n new_image.save(image_path)", "def expand_slicer_aperture(system):\n\n # First of all, we need to find the Surface Number for the IMAGE SLICER\n N_surfaces = system.LDE.NumberOfSurfaces\n surface_names = {} # A dictionary of surface number -> surface comment\n for k in np.arange(1, N_surfaces):\n surface_names[k] = system.LDE.GetSurfaceAt(k).Comment\n # find the Slicer surface number\n try:\n # The naming convention for this surface has changed. Not the same for Nominal Design as Monte Carlos\n slicer_num = list(surface_names.keys())[list(surface_names.values()).index('Slicer Mirror')]\n except ValueError:\n slicer_num = list(surface_names.keys())[list(surface_names.values()).index('IFU ISA')]\n slicer = system.LDE.GetSurfaceAt(slicer_num)\n\n # Read Current Aperture Settings\n apt_type = slicer.ApertureData.CurrentType\n # print(\"Aperture type: \", apt_type)\n if apt_type == 4: # 4 is Rectangular aperture\n current_apt_sett = slicer.ApertureData.CurrentTypeSettings\n # print(\"Current Settings:\")\n x0 = current_apt_sett._S_RectangularAperture.XHalfWidth\n y0 = current_apt_sett._S_RectangularAperture.YHalfWidth\n # If the Y aperture hasn't been changed already, we change it here to 999 mm to get all rays through\n if y0 != 999:\n # Change Settings\n aperture_settings = slicer.ApertureData.CreateApertureTypeSettings(\n constants.SurfaceApertureTypes_RectangularAperture)\n aperture_settings._S_RectangularAperture.XHalfWidth = x0\n aperture_settings._S_RectangularAperture.YHalfWidth = 999\n slicer.ApertureData.ChangeApertureTypeSettings(aperture_settings)\n\n current_apt_sett = slicer.ApertureData.CurrentTypeSettings\n # Notify that we have successfully modified the aperture\n print(\"Changing aperture of surface: \", slicer.Comment)\n print(\"New Settings:\")\n print(\"X_HalfWidth = %.2f\" % current_apt_sett._S_RectangularAperture.XHalfWidth)\n print(\"Y_HalfWidth = %.2f\" % current_apt_sett._S_RectangularAperture.YHalfWidth)\n\n return", "def apply_sharpening_on(image):\n # Create kernel\n kernel = np.array([[0, -1, 0],\n [-1, 5, -1],\n [0, -1, 0]])\n\n # Sharpen image\n sharp_image = cv2.filter2D(image, -1, kernel)\n return sharp_image", "def on_run(self):\n self.set_illumination({'mode': 'breathe'})", "def main():\n NAME = os.path.basename(__file__).split(\".\")[0]\n\n with its.device.ItsSession() as cam:\n\n props = cam.get_camera_properties()\n\n white_level = float(props['android.sensor.info.whiteLevel'])\n black_levels = props['android.sensor.blackLevelPattern']\n idxs = its.image.get_canonical_cfa_order(props)\n black_levels = [black_levels[i] for i in idxs]\n\n # Expose for the scene with min sensitivity\n sens_min, sens_max = props['android.sensor.info.sensitivityRange']\n s_ae,e_ae,awb_gains,awb_ccm,_ = cam.do_3a(get_results=True)\n s_e_prod = s_ae * e_ae\n\n # Make the image brighter since the script looks at linear Bayer\n # raw patches rather than gamma-encoded YUV patches (and the AE\n # probably under-exposes a little for this use-case).\n s_e_prod *= 2\n\n # Capture raw frames across the full sensitivity range.\n NUM_SENS_STEPS = 15\n sens_step = int((sens_max - sens_min - 1) / float(NUM_SENS_STEPS))\n reqs = []\n sens = []\n for s in range(sens_min, sens_max, sens_step):\n e = int(s_e_prod / float(s))\n req = its.objects.manual_capture_request(s, e)\n req[\"android.colorCorrection.transform\"] = \\\n its.objects.float_to_rational(awb_ccm)\n req[\"android.colorCorrection.gains\"] = awb_gains\n reqs.append(req)\n sens.append(s)\n\n caps = cam.do_capture(reqs, cam.CAP_RAW)\n\n # A list of the (x,y) coords of the center pixel of a collection of\n # patches of a color checker chart. Each patch should be uniform,\n # however the actual color doesn't matter. Note that the coords are\n # relative to the *converted* RGB image, which is 1/2 x 1/2 of the\n # full size; convert back to full.\n img = its.image.convert_capture_to_rgb_image(caps[0], props=props)\n patches = its.image.get_color_checker_chart_patches(img, NAME+\"_debug\")\n patches = [(2*x,2*y) for (x,y) in sum(patches,[])]\n\n lines = []\n for (s,cap) in zip(sens,caps):\n # For each capture, compute the mean value in each patch, for each\n # Bayer plane; discard patches where pixels are close to clamped.\n # Also compute the variance.\n CLAMP_THRESH = 0.2\n planes = its.image.convert_capture_to_planes(cap, props)\n points = []\n for i,plane in enumerate(planes):\n plane = (plane * white_level - black_levels[i]) / (\n white_level - black_levels[i])\n for j,(x,y) in enumerate(patches):\n tile = plane[y/2-16:y/2+16:,x/2-16:x/2+16:,::]\n mean = its.image.compute_image_means(tile)[0]\n var = its.image.compute_image_variances(tile)[0]\n if (mean > CLAMP_THRESH and mean < 1.0-CLAMP_THRESH):\n # Each point is a (mean,variance) tuple for a patch;\n # for a given ISO, there should be a linear\n # relationship between these values.\n points.append((mean,var))\n\n # Fit a line to the points, with a line equation: y = mx + b.\n # This line is the relationship between mean and variance (i.e.)\n # between signal level and noise, for this particular sensor.\n # In the DNG noise model, the gradient (m) is \"S\", and the offset\n # (b) is \"O\".\n points.sort()\n xs = [x for (x,y) in points]\n ys = [y for (x,y) in points]\n m,b = numpy.polyfit(xs, ys, 1)\n lines.append((s,m,b))\n print s, \"->\", m, b\n\n # TODO: Clean up these checks (which currently fail in some cases).\n # Some sanity checks:\n # * Noise levels should increase with brightness.\n # * Extrapolating to a black image, the noise should be positive.\n # Basically, the \"b\" value should correspnd to the read noise,\n # which is the noise level if the sensor was operating in zero\n # light.\n #assert(m > 0)\n #assert(b >= 0)\n\n # Draw a plot.\n pylab.plot(xs, ys, 'r')\n pylab.plot([0,xs[-1]],[b,m*xs[-1]+b],'b')\n matplotlib.pyplot.savefig(\"%s_plot_mean_vs_variance.png\" % (NAME))\n\n # Now fit a line across the (m,b) line parameters for each sensitivity.\n # The gradient (m) params are fit to the \"S\" line, and the offset (b)\n # params are fit to the \"O\" line, both as a function of sensitivity.\n gains = [d[0] for d in lines]\n Ss = [d[1] for d in lines]\n Os = [d[2] for d in lines]\n mS,bS = numpy.polyfit(gains, Ss, 1)\n mO,bO = numpy.polyfit(gains, Os, 1)\n\n # Plot curve \"O\" as 10x, so it fits in the same scale as curve \"S\".\n pylab.plot(gains, [10*o for o in Os], 'r')\n pylab.plot([gains[0],gains[-1]],\n [10*mO*gains[0]+10*bO, 10*mO*gains[-1]+10*bO], 'b')\n pylab.plot(gains, Ss, 'r')\n pylab.plot([gains[0],gains[-1]], [mS*gains[0]+bS, mS*gains[-1]+bS], 'b')\n matplotlib.pyplot.savefig(\"%s_plot_S_O.png\" % (NAME))\n\n print \"\"\"\n /* Generated test code to dump a table of data for external validation\n * of the noise model parameters.\n */\n #include <stdio.h>\n #include <assert.h>\n double compute_noise_model_entry_S(int sens);\n double compute_noise_model_entry_O(int sens);\n int main(void) {\n int sens;\n for (sens = %d; sens <= %d; sens += 100) {\n double o = compute_noise_model_entry_O(sens);\n double s = compute_noise_model_entry_S(sens);\n printf(\"%%d,%%lf,%%lf\\\\n\", sens, o, s);\n }\n return 0;\n }\n\n /* Generated functions to map a given sensitivity to the O and S noise\n * model parameters in the DNG noise model.\n */\n double compute_noise_model_entry_S(int sens) {\n double s = %e * sens + %e;\n return s < 0.0 ? 0.0 : s;\n }\n double compute_noise_model_entry_O(int sens) {\n double o = %e * sens + %e;\n return o < 0.0 ? 0.0 : o;\n }\n \"\"\"%(sens_min,sens_max,mS,bS,mO,bO)", "def grass_drass():", "def flat(s='flat'):\n s = s.strip()[:80] #truncate to 80 char to fit in FITS header\n print camera.SetShutter(0)\n camera.status.imgtype = 'FLAT'\n camera.status.object = s\n camera.status.update()", "def __call__(self, frame_num):\n # propagate and set the density\n self.img.set_array(\n np.abs(self.quant_sys.propagate(10)) ** 2\n )\n return self.img,", "def opd_dither(cfg):\n \n setpoint_new = pi.getINDI('PLC.UBCSettings.PLSetpoint')\n file_number = pi.getINDI('NOMIC.CamInfo.FIndex') # Get initial file number\n offsets = np.array(cfg['nomic_dither_opd_pattern']) * 5.0 * 180.0 / np.pi # input in rad at 11um, but commandet offsets in deg in K band\n pi.setINDI('NOMIC.EditFITS.Keyword=spdthpos;Value=0.0;Comment=setpoint dither position (offset from nominal setpoint) in rad')\n while True:\n for i_step in range(0, len(cfg['nomic_dither_opd_pattern'])):\n file_number = file_number + cfg['nomic_dither_opd_ndits'][i_step]\n try:\n pi.evalINDI ('\"NOMIC.CamInfo.FIndex\" >= %d' % file_number, timeout=900) # wait for cfg['nomic_dither_opd_ndits'][i] new files to arrive\n except:\n info('OPD dither offset timed out (more than 15 min since last offset)')\n print(' or was interrupted.')\n print(' Make sure to restart OPD dither pattern.')\n return()\n setpoint_old = pi.getINDI('PLC.UBCSettings.PLSetpoint') # ask for current setpoint\n if (setpoint_old - setpoint_new) > 2.9: # if expected setpoint is more than ~0.01 rad (at 11um) different from current setpoint\n info('Setpoint has changed between two dither positions.')\n setpoint_new = setpoint_old + offsets[i_step] # determine new setpoint\n pi.setINDI('NOMIC.EditFITS.Keyword=spdthpos;Value=' + str(np.sum(offsets[:i_step+1])) + ';Comment=setpoint dither position (offset from nominal setpoint) in rad')\n pi.setINDI('PLC.PLSetpoint.PLSetpoint=' + str(setpoint_new) + ';forNAC=0') # send a dither offset", "def farm(w,h,status='harvest',loops=1):\n h1 = int(h/2) # var to get seeds from first half (or fewer) of rows\n food_key = 5 # hotbar key for food\n dirt_place_time = 4.5\n walk_time = 0.33\n wait_time = 0.7\n grow_time = 540 # time for plants to grow = 600s - (time to log out & in) \n \n ctrl.resetCamera('down')\n for loop in range(loops):\n for y in range(h):\n for x in range(w):\n # checks if char is hungry before moving, & eats food on food_key\n if img_obj.isHungry():\n ctrl.eatFood(key=food_key)\n if status == 'harvest':\n # picks up farm patch, loot seeds, places farm patch, continues \n if y < h1: \n ctrl.selectOption('down')\n time.sleep(dirt_place_time) # takes 3 sec + more time due to server lag\n ctrl.selectOption('right')\n ctrl.loot() \n \n ctrl.walk('down', walk_time * 2, jump=True)\n ctrl.selectOption('down')\n ctrl.clickLMB()\n time.sleep(dirt_place_time)\n \n ctrl.walk('up', walk_time, jump=True)\n \n else:\n ctrl.selectOption('right')\n for i in range(12): # takes 5 seconds to harvest, in 6 seconds, f key is pressed 12 times\n ctrl.loot()\n time.sleep(.5)\n \n time.sleep(wait_time) # delay to make sure it jumps to the next patch\n # plants seeds - assumes char is holding seeds\n ctrl.selectOption('right')\n for i in range(12): # takes 5 seconds to harvest, in 5.5 seconds, f key is pressed 11 times\n ctrl.loot()\n time.sleep(.5)\n time.sleep(wait_time)\n \n # moves char to next farm patch\n if x != (w-1): # if not on last farm patch of row\n if y % 2 == 0:\n direction = 'left'\n else:\n direction = 'right'\n ctrl.walk(direction, walk_time, jump=True)\n time.sleep(wait_time) # delay for server lag\n \n # moves char up to next row if not on last row\n if y != (h-1):\n ctrl.walk('up', walk_time, jump=True)\n \n # final loot loop to get remaining items\n # for i in range(10):\n # ctrl.loot()\n # time.sleep(.5)\n \n # returns char to bottom right farm patch\n if (h) % 2 != 0: # if height is odd, move char to right side\n for x in range(w-1):\n ctrl.walk('right', walk_time, jump=True)\n time.sleep(wait_time)\n for y in range(h-1):\n ctrl.walk('down', walk_time * 2, jump=True)\n time.sleep(wait_time)\n \n # returns to lobby, waits for plants to grow, logs in\n ctrl.returnToLobby()\n time.sleep(grow_time)\n ctrl.enterGame()\n # resets camera to be aligned facing North as camera will always be pointed North after logging in\n ctrl.walk('up', .1, jump=True) \n time.sleep(wait_time)\n ctrl.walk('down', .1, jump=True)\n \n ctrl.resetCamera('down')\n # when relogging, char is displaced sometimes, this checks if field is selected, if not it'll try to find it, if that fails the program exits\n if not (img_obj.checkObjTitle('field')):\n ctrl.walk('left', walk_time, jump=True)\n if not (img_obj.checkObjTitle('field')):\n ctrl.walk('up', walk_time, jump=True)\n if not (img_obj.checkObjTitle('field')):\n break", "def expose(self):\n\n ## Determine type of exposure (exp, series, stack)\n exptype = str(self.exptypeComboBox.currentText())\n mode = self.modedict[exptype]\n\n ## Get exposure parameters\n if mode == \"bias\":\n exptime = 0.0\n else:\n exptime = self.exptimeSpinBox.value()\n imcount = self.imstackSpinBox.value()\n seqnum = self.imnumSpinBox.value()\n mintime = self.minexpSpinBox.value()\n maxtime = self.maxexpSpinBox.value()\n step = self.tstepSpinBox.value()\n\n ## Determine filter kwargs\n if self.filterToggleButton.isChecked():\n kwargs = {'filter_name' : str(self.filterComboBox.currentText())}\n else:\n kwargs = {'monowl' : self.monoSpinBox.value()}\n\n if self.testimCheckBox.isChecked():\n title = 'test'\n else:\n title = str(self.imtitleLineEdit.text())\n\n ## Build filepath\n filepath = os.path.join(str(self.imfilenameLineEdit.text()),title)\n \n ## Check if single exposure\n if exptype in [\"Exposure\", \"Dark\", \"Bias\"]:\n\n ## Perform exposure\n self.logger.info(\"Starting {0}s {1} image.\".format(exptime, exptype))\n self.image_start.emit(1)\n\n try:\n filename = exposure.im_acq(mode, filepath, exptime, seqnum, **kwargs)\n self.image_taken.emit(1)\n except subprocess.CalledProcessError:\n self.logger.exception(\"Error in executable {0}_acq. Image not taken.\".format(mode))\n except OSError:\n self.logger.exception(\"Executable {0}_acq not found. Image not taken\".format(mode))\n except IOError:\n self.logger.exception(\"File already exits. Image not taken.\")\n else:\n self.logger.info(\"Exposure {0} finished successfully.\".format(filename))\n self.seqnum_inc.emit(seqnum)\n subprocess.Popen(['ds9', '-mosaicimage', 'iraf', filename,\n '-zoom', 'to', 'fit', '-cmap', 'b'])\n\n ## Check if a stack of exposures of same type\n elif exptype in [\"Exposure Stack\", \"Dark Stack\", \"Bias Stack\"]:\n\n total = seqnum + imcount\n self.logger.info(\"Starting {0}s {1} stack.\".format(exptime, exptype))\n self.image_start.emit(imcount)\n\n try:\n for i in range(seqnum, total):\n self.logger.info(\"Starting image {0} of {1}.\".format(i+1-seqnum, imcount))\n filename = exposure.im_acq(mode, filepath, exptime, i, **kwargs)\n self.logger.info(\"Exposure {0} finished successfully.\".format(filename))\n self.image_taken.emit(i+1-seqnum)\n self.seqnum_inc.emit(i)\n except subprocess.CalledProcessError:\n self.logger.exception(\"Error in executable {0}_acq. Image not taken.\".format(mode))\n except OSError:\n self.logger.exception(\"Executable {0}_acq not found. Image not taken.\".format(mode))\n except IOError:\n self.logger.exception(\"File already exists. Image not taken.\")\n else:\n self.logger.info(\"Exposure stack finished successfully.\")\n subprocess.Popen(['ds9', '-mosaicimage', 'iraf', filename, '-zoom', 'to', 'fit', '-cmap', 'b'])\n \n ## Check if a series of exposures of increase exposure time\n elif exptype in [\"Exposure Series\", \"Dark Series\"]:\n\n ## Parameter checks\n if mintime > maxtime:\n self.logger.warning(\"Minimum time must be less than Maximum time. Series not started.\")\n return\n elif step <= 0:\n self.logger.warning(\"Time step must be greater than 0. Series not started.\")\n return\n\n ## Construct array of exposure times\n t = mintime\n time_array = []\n while t <= maxtime:\n time_array.append(t)\n t += step\n \n ## Perform series\n self.logger.info(\"Starting {0} series with mintime {1}, maxtime {2}, and step {3}.\".format(exptype, mintime, maxtime, step))\n self.image_start.emit(len(time_array))\n \n try:\n for i, time in enumerate(time_array):\n self.logger.info(\"Starting {0}s {1} image.\".format(time, mode))\n filename = exposure.im_acq(mode, filepath, time, seqnum, **kwargs)\n self.logger.info(\"Exposure {0} finished successfully.\".format(filename))\n self.image_taken.emit(i+1)\n except subprocess.CalledProcessError:\n self.logger.exception(\"Error in executable {0}_acq. Image not taken.\".format(mode))\n except OSError:\n self.logger.exception(\"Executable {0}_acq not found. Image not taken.\".format(mode))\n except IOError:\n self.logger.exception(\"File already exists. Image not taken.\")\n else:\n self.logger.info(\"Exposure series finished successfully.\")\n subprocess.Popen(['ds9', '-mosaicimage', 'iraf', filename, '-zoom', 'to', 'fit', '-cmap', 'b'])\n self.seqnum_inc.emit(seqnum)", "def measure_flats(det, shutter, quantity, samStage, samPos):\n priorPosition = samStage.position\n yield from set_white_frame()\n yield from bps.mv(\n samStage, samPos,\n shutter, \"open\")\n yield from _acquire_n_frames(det, quantity)\n yield from bps.mv(samStage, priorPosition)", "def plasma_dose(self, width, power):\n print('Start plasma dose.')\n self.seren.settings['set_forward_power'] = power\n self.seren.settings['RF_enable'] = True\n time.sleep(width)\n self.seren.settings['RF_enable'] = False\n self.seren.settings['set_forward_power'] = 0\n print('Plasma dose finished.')", "def strut(self):\n self.fwd(left=50, right=50)\n for x in range(2):\n self.servo(1000)\n time.sleep(.1) \n self.servo(1500) # Look Straight\n time.sleep(1)\n self.servo(2000)\n time.sleep(.1)\n self.servo(1500)", "def dark(s='dark'):\n s = s.strip()[:80] #truncate to 80 char to fit in FITS header\n print camera.SetShutter(2)\n camera.status.imgtype = 'DARK'\n camera.status.object = s\n camera.status.update()", "def illustrate_sift(self):\n\n print 'passe illustrate sift in'\n\n work_dir = self.work_dir\n draw_keys_oriented(work_dir+'keys_im0.txt',\n work_dir+'input_0.orig.png',\n work_dir+'keys_im0.png')\n draw_keys_oriented(work_dir+'keys_im1.txt',\n work_dir+'input_1.orig.png',\n work_dir+'keys_im1.png')\n print 'passe illustrate sift in'\n\n for im in ['0','1']:\n for kypts in ['NES', 'DoGSoftThresh', 'ExtrInterp',\n 'ExtrInterpREJ', 'DoGThresh', 'OnEdgeResp',\n 'OnEdgeRespREJ']:\n draw_keys(work_dir+'extra_'+kypts+'_im'+im+'.txt',\n work_dir+'input_'+im+'.orig.png',\n work_dir+'extra_'+kypts+'_im'+im+'.png') \n\n draw_keys_oriented(work_dir+'extra_OriAssignedMULT_im'+im+'.txt',\n work_dir+'input_'+im+'.orig.png',\n work_dir+'extra_OriAssignedMULT_im'+im+'.png')\n\n return 1", "def smooth_stitch(*, input_dir, output_dir):\n image_paths = glob(os.path.join(input_dir, \"*.tif\"))\n if not image_paths:\n raise RuntimeError(\"%s does not contain any .tif file\" % (input_dir))\n\n # Get the profile and affine of some image as template for output image\n first_image = image_paths[0]\n with rasterio.open(first_image) as src:\n profile = src.profile.copy()\n src_res = src.res\n chip_size = src.width\n assert src.width == src.height\n\n with tempfile.TemporaryDirectory() as tmpdir:\n tmp_image_paths = generate_spline_window_chips(\n image_paths=image_paths, output_dir=tmpdir\n )\n\n # Get bounds from all images and build R-Tree index\n idx, (dst_w, dst_s, dst_e, dst_n) = build_bounds_index(tmp_image_paths)\n\n # Get affine transform for complete bounds\n logger.info(\"Output bounds: %r\", (dst_w, dst_s, dst_e, dst_n))\n output_transform = Affine.translation(dst_w, dst_n)\n logger.info(\"Output transform, before scaling: %r\", output_transform)\n\n output_transform *= Affine.scale(src_res[0], -src_res[1])\n logger.info(\"Output transform, after scaling: %r\", output_transform)\n\n # Compute output array shape. We guarantee it will cover the output\n # bounds completely. We need this to build windows list later.\n output_width = int(math.ceil((dst_e - dst_w) / src_res[0]))\n output_height = int(math.ceil((dst_n - dst_s) / src_res[1]))\n\n # Set width and height for output chips, and other attributes\n profile.update(width=chip_size, height=chip_size, tiled=True)\n\n windows = list(\n sliding_windows(chip_size, width=output_width, height=output_height)\n )\n logger.info(\"Num. windows: %d\", len(windows))\n\n for win, (i, j) in tqdm(windows):\n # Get window affine transform and bounds\n win_transform = rasterio.windows.transform(win, output_transform)\n win_bounds = rasterio.windows.bounds(win, output_transform)\n\n # Get chips that intersect with window\n intersect_chip_paths = [\n tmp_image_paths[i] for i in idx.intersection(win_bounds)\n ]\n\n if intersect_chip_paths:\n # Merge them with median method\n img = merge_chips(intersect_chip_paths, win_bounds=win_bounds)\n\n # Write output chip\n profile.update(transform=win_transform)\n output_path = os.path.join(output_dir, f\"{i}_{j}.tif\")\n\n os.makedirs(output_dir, exist_ok=True)\n with rasterio.open(output_path, \"w\", **profile) as dst:\n for i in range(img.shape[0]):\n dst.write(img[i, :, :], i + 1)", "def WarpOpen(wn, fn, ms = 15, Wres = 300, Hres = 300, M = 20, k_spr = 10, dt = 0.0125):\n MSG(\"WarpOpen\")\n cv2.namedWindow(wn)\n img = cv2.imread(fn,1)\n img = cv2.resize(img, (Wres,Hres))\n (h,w,cols) = img.shape\n L0 = list(img.flatten())\n warp.open(L0, M, w, h)\n warp.set_k_spr(k_spr)\n warp.set_dt(dt)\n return", "def swarpfilter(d, dir, directory, images, keys, filter, lamp, camera, done, output, type):\n filt = images.files_filtered(FWINAME=filter, FLSPECTR=lamp, CAMNAME=camera, HISTORY=done)\n files = [d + x for x in filt.tolist()]\n print(files)\n if files:\n swarp(files, output=directory + '/' + output + '.fits', celestial_type=type)", "def punched(self):\n if not self.dizzy:\n self.dizzy = 1\n self.original = self.image\n Chimp.count_punch += 1", "def expose_test(self):\n with self.lock:\n self.dark = 1\n self.tstart = time.time()\n self.timestamp = time.strftime(\"%Y-%m-%dT%H:%M:%S\", time.localtime(self.tstart))\n imagesize = (self.expArea[3] - self.expArea[1],\n self.expArea[2] - self.expArea[0])\n self.data = np.ones(shape=imagesize, dtype=np.uint16)\n self.tend = time.time()", "def capture(self, channel: LC):\n\n # check if gain information is available, if not, update first\n if \"d2d\" not in self.config:\n self.setup_d2d()\n self.update()\n\n # turn on the light\n self.light_control(channel, 1)\n\n # assemble the terminal command\n path_to_bright = os.getcwd() + \"/cam/tmp/bright.bmp\"\n path_to_dark = os.getcwd() + \"/cam/tmp/dark.bmp\"\n gain = self.config[\"d2d\"][channel][\"analog-gain\"] * self.config[\"d2d\"][channel][\"digital-gain\"]\n\n photo_cmd = \"raspistill -e bmp -w {} -h {} -ss {} -t 1000 -awb off -awbg {},{} -ag {} -dg {}\".format(self.settings.resolution[0], self.settings.resolution[1], self.settings.shutter_speed[channel], self.config[\"wb\"][channel][\"r\"], self.config[\"wb\"][channel][\"b\"], self.config[\"d2d\"][channel][\"analog-gain\"], self.config[\"d2d\"][channel][\"digital-gain\"])\n\n # run command and take bright and dark picture\n # start the bright image capture by spawning a clean process and executing the command, then waiting for the q\n p = mp.Process(target=photo_worker, args=(photo_cmd + \" -o {}\".format(path_to_bright),))\n try:\n p.start()\n p.join()\n except OSError:\n d_print(\"Could not start child process, out of memory\", 3)\n return (None, 0)\n # turn off the light\n self.light_control(channel, 0)\n # start the dark image capture by spawning a clean process and executing the command, then waiting for the q\n p = mp.Process(target=photo_worker, args=(photo_cmd + \" -o {}\".format(path_to_dark),))\n try:\n p.start()\n p.join()\n except OSError:\n d_print(\"Could not start child process, out of memory\", 3)\n return (None, 0)\n\n # load the images from file, perform dark frame subtraction and return the array\n bright = Image.open(path_to_bright)\n rgb = np.array(bright)\n if channel != LC.GROWTH:\n dark = Image.open(path_to_dark)\n rgb = cv2.subtract(rgb, np.array(dark))\n\n # if the time since last update is larger than a day, update the gains after the photo\n if time.time() - self.config[\"d2d\"][\"timestamp\"] > 3600*24:\n self.update()\n\n return (rgb, gain)", "def _do_expose_loop(self, fitsfile, seconds):\n log.debug(f\"Starting expose loop with {self.max_exposures} exposures\")\n while (self.current_exposure is not None and \n self.current_exposure < self.max_exposures):\n self.current_exposure += 1\n self.Expose(fitsfile, seconds)\n while self.process and self.process.poll() is None:\n sleep(5)\n if not self.process or self.process.returncode != 0:\n break\n \n self.current_exposure = None\n self.max_exposures = None", "def continue_with_exposure(self):\r\n # Allocate space to give to scan_until_abort, and name the two\r\n # rows appropriately.\r\n self.data_pair = self.cam.get_new_array(n_images=2)\r\n self.pump_probe_data = self.data_pair[0]\r\n self.probe_only_data = self.data_pair[1]\r\n # Keep track of which image will be updated next\r\n self.next_data_has_pump = True\r\n\r\n # Tell self.thread what to do when the camera has new images\r\n self.cam.new_images.connect(self.send_new_images)\r\n\r\n # Get the current array of wavelengths from cam\r\n self.wavelen_arr = self.cam.get_wavelen_array()\r\n\r\n # Queue a call to cam.scan_until_abort\r\n self.startAcq.emit(self.data_pair)\r\n\r\n # Tell listeners (plotting widgets) to start displaying data too\r\n self.startDisplay.emit()", "def fire_smelter(self):\n # Get the smelter\n screenshot = utils.take_screenshot()\n forge = screenshot[152:168, 168:184]\n\n # Check if the cold forge exists\n result = cv2.matchTemplate(forge, self.cold_forge_template, cv2.TM_CCORR_NORMED)\n max_val = cv2.minMaxLoc(result)[1]\n\n # Found cold forge, light it and wait\n if max_val > 0.9:\n pyautogui.moveTo(192, 159, 0.15)\n pyautogui.doubleClick()\n sleep(1.5)", "def spectate(self):\n pass", "def __init__(self, slit_width_xaxis, slit_height_zaxis):\n super(SwScreen, self).__init__()\n self.sw = self.create_instance()\n\n self.set_output_files(fwrite=0, f_angle=0) #write all, TODO: remove\n\n n_screen = 1\n i_screen = np.array([1, 0, 0, 0, 0, 0, 0, 0, 0, 0])\n i_abs = np.zeros(10)\n i_slit = np.zeros(10)\n i_stop = np.zeros(10)\n k_slit = np.zeros(10)\n thick = np.zeros(10)\n file_abs = np.array(['', '', '', '', '', '', '', '', '', ''])\n rx_slit = np.zeros(10)\n rz_slit = np.zeros(10)\n sl_dis = np.zeros(10)\n file_src_ext = np.array(['', '', '', '', '', '', '', '', '', ''])\n cx_slit = np.zeros(10)\n cz_slit = np.zeros(10)\n\n i_abs[0] = 0 # NO ABSORPTION\n i_slit[0] = 0 # APERTURING\n i_stop[0] = 0 # SLIT\n k_slit[0] = 0 # RECTANGULAR\n\n rx_slit[0] = slit_width_xaxis\n rz_slit[0] = slit_height_zaxis\n cx_slit[0] = 0.0\n cz_slit[0] = 0.0\n\n self.sw._oe.set_screens(n_screen,\n i_screen,\n i_abs,\n sl_dis,\n i_slit,\n i_stop,\n k_slit,\n thick,\n file_abs,\n rx_slit,\n rz_slit,\n cx_slit,\n cz_slit,\n file_src_ext)", "def test_healpix_to_point_source_cuts(healpix_disk_new):\n skyobj = healpix_disk_new\n skyobj.healpix_to_point()\n skyobj.select(max_brightness=0.9 * skyobj.stokes[0].max())", "def skyPixelsHPX(self,i, d,feedindex):\n\n # We store all the pointing information\n x = d['level1/spectrometer/pixel_pointing/pixel_ra'][feedindex,:][:,self.select_mask]\n x = x[:,0:self.datasizes[i]].flatten()\n y = d['level1/spectrometer/pixel_pointing/pixel_dec'][feedindex,:][:,self.select_mask]\n y = y[:,0:self.datasizes[i]].flatten()\n \n # convert to Galactic\n rot = hp.rotator.Rotator(coord=['C','G'])\n gb, gl = rot((90-y)*np.pi/180., x*np.pi/180.)\n\n pixels = hp.ang2pix(self.nside, gb, gl)\n return pixels", "def setup():\n wcs = galsim.TanWCS(\n galsim.AffineTransform(0.26, 0.05, -0.08, -0.24, galsim.PositionD(1024,1024)),\n #galsim.AffineTransform(0.26, 0., 0., 0.26, galsim.PositionD(1024,1024)),\n galsim.CelestialCoord(5 * galsim.arcmin, -25 * galsim.degrees)\n )\n\n # Make the image (copied from test_single_image in test_simple.py)\n image = galsim.Image(2048, 2048, wcs=wcs)\n\n # Where to put the stars.\n x_list = [ 123.12, 345.98, 567.25, 1094.94, 924.15, 1532.74, 1743.11, 888.39, 1033.29, 1409.31 ]\n y_list = [ 345.43, 567.45, 1094.32, 924.29, 1532.92, 1743.83, 888.83, 1033.19, 1409.20, 123.11 ]\n\n # Draw a Gaussian PSF at each location on the image.\n sigma = 1.3\n g1 = 0.23\n g2 = -0.17\n du = 0.09 # in arcsec\n dv = -0.07\n flux = 123.45\n psf = galsim.Gaussian(sigma=sigma).shear(g1=g1, g2=g2).shift(du,dv) * flux\n for x, y in zip(x_list, y_list):\n bounds = galsim.BoundsI(int(x-31), int(x+32), int(y-31), int(y+32))\n offset = galsim.PositionD(x-int(x)-0.5, y-int(y)-0.5)\n psf.drawImage(image=image[bounds], method='no_pixel', offset=offset)\n image.addNoise(galsim.GaussianNoise(rng=galsim.BaseDeviate(1234), sigma=1e-6))\n\n # Write out the image to a file\n image_file = os.path.join('output','test_stats_image.fits')\n image.write(image_file)\n\n # Write out the catalog to a file\n dtype = [ ('x','f8'), ('y','f8') ]\n data = np.empty(len(x_list), dtype=dtype)\n data['x'] = x_list\n data['y'] = y_list\n cat_file = os.path.join('output','test_stats_cat.fits')\n fitsio.write(cat_file, data, clobber=True)", "def sharpen(img, ker = (9,9), sigX=10.0):\n gaus = cv2.GaussianBlur(img, ker, sigX)\n unsharp = cv2.addWeighted(img, 1.5, gaus, -0.5, 0, img)\n return unsharp", "def exposure(frameType, expTime):\n\n blobEvent.clear() \n\n # set the specified frame type\n if frameType.lower() == 'light':\n ccd_frame[0].s = PyIndi.ISS_ON\n ccd_frame[1].s = PyIndi.ISS_OFF\n ccd_frame[2].s = PyIndi.ISS_OFF\n ccd_frame[3].s = PyIndi.ISS_OFF \n indiclient.sendNewSwitch(ccd_frame)\n elif frameType.lower() == 'bias':\n ccd_frame[0].s = PyIndi.ISS_OFF\n ccd_frame[1].s = PyIndi.ISS_ON\n ccd_frame[2].s = PyIndi.ISS_OFF\n ccd_frame[3].s = PyIndi.ISS_OFF \n indiclient.sendNewSwitch(ccd_frame)\n elif frameType.lower() == 'dark':\n ccd_frame[0].s = PyIndi.ISS_OFF\n ccd_frame[1].s = PyIndi.ISS_OFF\n ccd_frame[2].s = PyIndi.ISS_ON\n ccd_frame[3].s = PyIndi.ISS_OFF \n indiclient.sendNewSwitch(ccd_frame)\n elif frameType.lower() == 'flat':\n ccd_frame[0].s = PyIndi.ISS_OFF\n ccd_frame[1].s = PyIndi.ISS_OFF\n ccd_frame[2].s = PyIndi.ISS_OFF\n ccd_frame[3].s = PyIndi.ISS_ON \n indiclient.sendNewSwitch(ccd_frame)\n\n # set the value for the next exposure\n ccd_exposure[0].value=expTime\n\n indiclient.sendNewNumber(ccd_exposure)\n\n # wait for the exposure\n blobEvent.wait()\n\n for blob in ccd_ccd1:\n # pyindi-client adds a getblobdata() method to IBLOB item\n # for accessing the contents of the blob, which is a bytearray in Python\n image_data=blob.getblobdata()\n\n # write the byte array out to a FITS file\n global imgNum\n global imgName\n imgNum += 1\n fileName = fileDir+'raw-'+str(imgNum).zfill(8)+'.fits'\n f = open(fileName, 'wb')\n f.write(image_data)\n f.close()\n imgName = fileName\n \n return fileName", "def do_pixels(self):\n self.do_yurt_pixels()\n self.do_petal_strip_pixels()\n self.do_vase_pixels()", "def main():\n original = SimpleImage('images/mt-rainier.jpg')\n original.show()\n reflected = make_reflected('images/mt-rainier.jpg')\n reflected.show()", "def align(): # open EH and fast shutter\n\t#marAuxiliary.closeMarShield()\n\td2in()\n\td3in()\n\tsh('o')", "def cehs():\n\tcloseEHShutter()", "def endexposureloop(self):\n self.max_exposures = self.current_exposure", "def camera(self):\n self.spectrum = self.spectrum", "def do_stuff(self):\n self.create_tourism_raster()", "def flatcombine(dir='Flats/*/dark_subtracted/'):\n\n for d in glob(dir):\n\n directory = \"/\".join(d.split('/')[0:2]) + '/swarped'\n if not os.path.exists(directory):\n os.makedirs(directory)\n\n keys = ['OBJECT', 'CAMNAME', 'FWINAME', 'ITIME', 'OBSDATE', 'FLSPECTR', 'HISTORY']\n images = ImageFileCollection(d, keywords=keys, glob_include='d*.fits')\n\n swarpfilter(d, dir, directory, images, keys, filter='H', lamp='on', camera='narrow', \n done='Dark Subtracted', output='cKNarrowLampOnH', type='PIXEL')\n swarpfilter(d, dir, directory, images, keys, filter='H', lamp='off', camera='narrow', \n done='Dark Subtracted', output='cKNarrowLampOffH', type='PIXEL')\n swarpfilter(d, dir, directory, images, keys, filter='H', lamp='on', camera='wide', done='Dark Subtracted',\n output='cKWideLampOnH', type='PIXEL')\n swarpfilter(d, dir, directory, images, keys, filter='H', lamp='off', camera='wide', done='Dark Subtracted',\n output='cKWideLampOffH', type='PIXEL')\n swarpfilter(d, dir, directory, images, keys, filter='Ks', lamp='on', camera='narrow', done='Dark Subtracted',\n output='cKNarrowLampOnKs', type='PIXEL')\n swarpfilter(d, dir, directory, images, keys, filter='Ks', lamp='off', camera='narrow', done='Dark Subtracted',\n output='cKNarrowLampOffKs', type='PIXEL')\n swarpfilter(d, dir, directory, images, keys, filter='Ks', lamp='on', camera='wide', done='Dark Subtracted',\n output='cKWideLampOnKs', type='PIXEL')\n swarpfilter(d, dir, directory, images, keys, filter='Ks', lamp='off', camera='wide', done='Dark Subtracted',\n output='cKWideLampOffKs', type='PIXEL')\n swarpfilter(d, dir, directory, images, keys, filter='J', lamp='on', camera='narrow', done='Dark Subtracted',\n output='cNarrowLampOnJ', type='PIXEL')\n swarpfilter(d, dir, directory, images, keys, filter='J', lamp='off', camera='narrow', done='Dark Subtracted',\n output='cKNarrowLampOffJ', type='PIXEL')\n swarpfilter(d, dir, directory, images, keys, filter='J', lamp='on', camera='wide', done='Dark Subtracted',\n output='cKWideLampOnJ', type='PIXEL')\n swarpfilter(d, dir, directory, images, keys, filter='J', lamp='off', camera='wide', done='Dark Subtracted',\n output='cKWideLampOffJ', type='PIXEL')", "def spilloverEff(freq,fD, FFBW, dB_at_bw, feed_type):\n theta0 = fD2angle(fD,units='degrees')\n tt = 0.0\n dtt = 0.1\n theta = np.arange(0.0,180.0+dtt,dtt)\n g = feedPattern(freq, theta, FFBW, dB_at_bw, feed_type)\n theta = theta*math.pi/180.0\n\n # integrate over main beam\n gmb = np.where(theta < (theta0/2.0)*math.pi/180.0)\n kern = g[gmb]*np.sin(theta[gmb]) \n num = integ.trapz(kern,dx=dtt*math.pi/180.0)\n # integrate over full beam\n kern = g*np.sin(theta)\n den = integ.trapz(kern,dx=dtt*math.pi/180.0)\n \n n_spill = num/den\n return n_spill", "def testSmallSrc(self):\n fromWcs = afwGeom.makeSkyWcs(\n crpix=lsst.geom.Point2D(0, 0),\n crval=lsst.geom.SpherePoint(359, 0, lsst.geom.degrees),\n cdMatrix=afwGeom.makeCdMatrix(scale=1.0e-8*lsst.geom.degrees),\n )\n fromExp = afwImage.ExposureF(afwImage.MaskedImageF(1, 1), fromWcs)\n\n toWcs = afwGeom.makeSkyWcs(\n crpix=lsst.geom.Point2D(0, 0),\n crval=lsst.geom.SpherePoint(358, 0, lsst.geom.degrees),\n cdMatrix=afwGeom.makeCdMatrix(scale=1.1e-8*lsst.geom.degrees),\n )\n toExp = afwImage.ExposureF(afwImage.MaskedImageF(10, 10), toWcs)\n\n warpControl = afwMath.WarpingControl(\"lanczos3\")\n # if a bug described in ticket #2441 is present, this will raise an\n # exception:\n numGoodPix = afwMath.warpExposure(toExp, fromExp, warpControl)\n self.assertEqual(numGoodPix, 0)\n self.assertTrue(np.all(np.isnan(toExp.image.array)))\n self.assertTrue(np.all(np.isinf(toExp.variance.array)))\n noDataBitMask = afwImage.Mask.getPlaneBitMask(\"NO_DATA\")\n self.assertTrue(np.all(toExp.mask.array == noDataBitMask))", "def swingCapture(self, pulse, divider=2, count=5):\n pulse=int(pulse) #make sure is integer\n cap = cv2.VideoCapture(CAM0, cv2.CAP_DSHOW) # use camera to monitor the motor-mirror assemnbly \n frames=[] \n\n low, high=self._lo_hi_preprocess(CLK_WISE)\n low_antiClk, high_antiClk=self._lo_hi_preprocess(ANTI_CLK_W) # ret in C byte format already\n div=divider #divide pulses into groups\n for _ in range(count): #loop count\n sub_pulse=pulse//div\n for __ in range(div): #no of times to cap image\n for ___ in range(sub_pulse): # 1st clockwise direction\n self.objdll.USBIO_GPIOWrite(self.id, low, WRITE_EN) #;sleep(self.delay)#disable for highest motor speed\n self.objdll.USBIO_GPIOWrite(self.id, high, WRITE_EN) #; sleep(self.delay)#disable for highest motor speed\n ret, frame = cap.read() # Capture frame-by-frame \n frames.append(frame) # store per group\n\n for __ in range(div): \n for ___ in range(sub_pulse): # 1st anticlockwise direction\n self.objdll.USBIO_GPIOWrite(self.id, low_antiClk, WRITE_EN) #;sleep(self.delay)#disable for highest motor speed\n self.objdll.USBIO_GPIOWrite(self.id, high_antiClk, WRITE_EN) #; sleep(self.delay)#disable for highest motor speed\n ret, frame = cap.read() # Capture frame-by-frame \n frames.append(frame) # store per group\n\n\n \n cap.release()\n cv2.destroyAllWindows()\n return np.asarray(frames)", "def phot_aperture(input_file):\n #set the original directory\n original_path = os.getcwd()\n save_path = input_file['save_path']\n planet = input_file['exoplanet']\n #radii = np.arange(input_file['apertures'][0],input_file['apertures'][1],0.1)\n radii = np.array(input_file['apertures'])\n #change to save data reduction directory\n os.chdir(save_path)\n if not os.path.exists('phot_results'):\n os.makedirs('phot_results')\n tempo = time.time()\n print 'Starting aperture photometry'\n print 'Saving results on: '+save_path+'/phot_results/'\n \n #check the number of objects to make the photometry\n N_obj = len(input_file['pxpositions'])/2.\n print 'Number of objects = ',N_obj\n positions = [] #create the positions variable (X,Y) in pixels unit on the CCD\n for i in range(len(input_file['pxpositions'])):\n if i % 2 == 0: #if the number is a even (or not a odd), the turple is created\n positions.append((input_file['pxpositions'][i],input_file['pxpositions'][i+1]))\n print 'Radius from ',radii[0],' to ',radii[-1],'\\n'\n \n skysection = input_file['skysection']\n skysection[0] = int(skysection[0])\n skysection[1] = int(skysection[1])\n \n images = sorted(glob.glob('AB'+planet+'*.fits'))\n for radius in radii:\n flux_data = []\n for i in range(len(images)):\n im = fits.getdata(images[i],header=False)\n im = array(im,dtype='Float64')\n \n # ERROR\n #Traceback (most recent call last):\n # File \"ExoTRed.py\", line 105, in <module>\n # exotred.phot_aperture(input_file)\n # File \"./sources/ExoTRed_core.py\", line 637, in phot_aperture \n # File \"/home/walter/bin/anaconda3/envs/iraf27/lib/python2.7/site-packages/photutils/background/background_2d.py\", line 329, in __init__\n # self._calc_bkg_bkgrms()\n # File \"/home/walter/bin/anaconda3/envs/iraf27/lib/python2.7/site-packages/photutils/background/background_2d.py\", line 686, in _calc_bkg_bkgrms\n # bkg = self._interpolate_meshes(self._bkg1d)\n # File \"/home/walter/bin/anaconda3/envs/iraf27/lib/python2.7/site-packages/photutils/background/background_2d.py\", line 575, in _interpolate_meshes\n # f = ShepardIDWInterpolator(yx, data)\n # File \"/home/walter/bin/anaconda3/envs/iraf27/lib/python2.7/site-packages/photutils/utils/interpolation.py\", line 138, in __init__\n # raise ValueError('The number of values must match the number '\n # ValueError: The number of values must match the number of coordinates.\n\n # bkg = background.background_2d.Background2D(im,tuple(skysection))\n # bkg_data = bkg.background\n # bkg_rms = bkg.background_rms\n\n # phot_table = aperture_photometry(im - bkg_data, CircularAperture(positions, radius),\n # error=bkg_rms, method ='center')#,effective_gain=float(input_file['gain']))\n ####### SUBSTITUTE ROUTINE\n window = 100\n sky_size = im.shape\n sky_mean = float(np.median(im[int(skysection[1]-window):int(skysection[1]+window),int(skysection[0]-window):int(skysection[0]+window)]))\n bkg = np.random.poisson(sky_mean,sky_size)\n apertures = CircularAperture(positions, radius)\n phot_table = aperture_photometry(im, apertures, error=bkg)\n #######\n phot_table_flux = np.array([]) #saving results of aperture photometry\n for j in range(len(phot_table['aperture_sum'])):\n phot_table_flux = np.concatenate((phot_table_flux,np.array([phot_table['aperture_sum'][j]])),axis=0)\n phot_table_flux = np.concatenate((phot_table_flux,np.array([phot_table['aperture_sum_err'][j]])),axis=0)\n flux = np.concatenate((phot_table_flux,np.array([images[i]])),axis=0)\n # flux = [phot_table['aperture_sum'][0], phot_table['aperture_sum'][1],phot_table['aperture_sum_err'][0],\n # phot_table['aperture_sum_err'][1],images[i]]\n flux_data.append(flux)\n flux_data = DataFrame(flux_data)#,columns=['hoststar','refstar','hoststar_err','refstar_err','image'])\n flux_data.to_csv('./phot_results/'+planet+'_flux_radius_'+str(radius)+'.csv',index=False)\n use.update_progress((float(np.where(radii == radius)[0])+1.)/len(radii))\n print 'Time total = ',abs(time.time()-tempo)/60.,' minutes'\n os.chdir(original_path)", "def use_effect(self):\n if self.preview_name in FILTERS:\n photo = Image.open(self.path.url[1:])\n preview = photo.filter(FILTERS.get(self.preview_name))\n preview.save(self.path.url[1:])", "def main():\n parser = argparse.ArgumentParser()\n parser.add_argument(\"--interval\", \"-t\", type=int,\n help=\"length of time in seconds between images\",\n default=60)\n parser.add_argument(\"--images\", \"-n\", type=int, help=\"number of images\", default=1)\n parser.add_argument(\"--output\", \"-o\",\n help=\"path to output directory\", default=\"./\")\n parser.add_argument(\"--rotation\", \"-r\",\n help=\"degrees to rotate pi image\", default=0)\n parser.add_argument(\"--iso\", \"-i\", type=int, help=\"camera iso 0 is auto mode\", default=0)\n parser.add_argument(\"--shutter\", \"-s\", type=int,\n help=\"camera shutter speed in microseconds 0 is auto mode\", default=0)\n parser.add_argument(\"--exposure\", \"-e\", help=\"exposure compensation set between -25 and +25\", default=0)\n args = parser.parse_args()\n interval = int(args.interval)\n images = int(args.images)\n output = args.output\n rotation = int(args.rotation)\n iso = int(args.iso)\n shutter_speed = int(args.shutter)\n exposure_compensation = int(args.exposure)\n\n\n timelapse(int=interval,\n n=images,\n dir=output,\n rotation=rotation,\n iso=iso,\n shutter=shutter_speed,\n exposure=exposure_compensation)", "def run_shoe_rack_manipulation(self, debug=False, push_in_distance=0.00):\n\n print(\"\\n\\n--- Running Shoe Manipulation-------\\n\\n\")\n\n # self.taskRunner.callOnMain(self._poser_visualizer.visualize_result)\n\n if not self.check_category_goal_estimation_succeeded():\n return False\n\n # check that we really are doing mug\n category_manipulation_type = self.state.cache['category_manipulation_goal']['type']\n assert category_manipulation_type == CategoryManipulationType.SHOE_ON_RACK\n\n\n speed = self.graspingParams['speed']['fast']\n self.moveHome(speed=speed)\n\n result = self.state.cache['category_manipulation_goal']['result']\n T_W_fingertip = ros_numpy.numpify(result.T_world_gripper_fingertip)\n T_W_fingertip_vtk = transformUtils.getTransformFromNumpy(T_W_fingertip)\n\n grasp_data = GraspData.from_gripper_fingertip_frame(T_W_fingertip)\n grasp_data.gripper.params[\"hand_inner_diameter\"] = result.gripper_width\n grasp_data.gripper.params[\"hand_inner_diameter\"] = 0.07\n self.state.grasp_data = grasp_data\n\n # rotate the grasp to align with nominal\n params = self.getParamsForCurrentLocation()\n grasp_z_axis_nominal = np.array(params['grasp']['grasp_nominal_direction'])\n grasp_data.rotate_grasp_frame_to_nominal(grasp_z_axis_nominal)\n\n\n\n\n def vis_function():\n vis.updateFrame(T_W_fingertip_vtk, \"gripper fingertip frame\", scale=0.15, parent=self._vis_container)\n\n vis.updateFrame(grasp_data.grasp_frame, \"grasp frame\", scale=0.15, parent=self._vis_container)\n\n self.visualize_grasp(grasp_data)\n\n self.taskRunner.callOnMain(vis_function)\n\n # execute the grasp\n force_threshold_magnitude = 30\n object_in_gripper = self.execute_grasp(grasp_data, close_gripper=True, use_cartesian_plan=True, force_threshold_magnitude=force_threshold_magnitude, push_in_distance=0.04, ee_speed_m_s=0.1)\n\n\n if not object_in_gripper:\n print(\"grasp failed, returning\")\n return False\n\n print \"object_in_gripper:\", object_in_gripper\n\n T_goal_obs = self.state.cache['category_manipulation_goal'][\"T_goal_obs\"]\n T_W_G = self.state.cache['gripper_frame_at_grasp']\n\n\n\n pre_grasp_pose = self.state.cache['pre_grasp_ik_response'].joint_state.position\n pickup_speed = self.graspingParams['speed']['pickup']\n\n if not object_in_gripper:\n # open the gripper and back away\n self.gripperDriver.send_open_gripper_set_distance_from_current()\n self.robotService.moveToJointPosition(pre_grasp_pose,\n maxJointDegreesPerSecond=\n pickup_speed)\n return False\n\n # pickup the object\n self.robotService.moveToJointPosition(pre_grasp_pose,\n maxJointDegreesPerSecond=\n pickup_speed)\n\n # move home\n self.moveHome()\n\n # move to approach pose\n speed = self.graspingParams['speed']['fast']\n q_approach = np.array(self._stored_poses_director[\"left_table\"][\"shoe_approach\"])\n self.robotService.moveToJointPosition(q_approach, maxJointDegreesPerSecond=speed)\n\n\n # compute some poses\n T_goal_obs = ros_numpy.numpify(result.T_goal_obs) # 4 x 4 numpy matrix\n T_goal_obs_vtk = transformUtils.getTransformFromNumpy(T_goal_obs)\n object_manip = ObjectManipulation(T_goal_object=T_goal_obs_vtk, T_W_G=T_W_G)\n object_manip.compute_transforms()\n T_W_Gn_vtk = object_manip.T_W_Gn # gripper to world for place pose\n\n T_pre_goal_obs = ros_numpy.numpify(result.T_pre_goal_obs)\n T_pre_goal_obs_vtk = transformUtils.getTransformFromNumpy(T_pre_goal_obs)\n object_manip_approach = ObjectManipulation(T_goal_object=T_pre_goal_obs_vtk, T_W_G=T_W_G)\n object_manip_approach.compute_transforms()\n T_W_Gn_approach_vtk = object_manip_approach.T_W_Gn\n\n # move this down by push_in_distance\n\n pos, quat = transformUtils.poseFromTransform(T_W_Gn_approach_vtk)\n T_W_Gn_approach_vtk = transformUtils.transformFromPose(pos, quat)\n\n\n # now convert these to ee poses for running IK\n pos, quat = transformUtils.poseFromTransform(T_W_Gn_vtk)\n pos[2] -= push_in_distance\n T_W_Gn_vtk = transformUtils.transformFromPose(pos, quat)\n T_W_ee_vtk = self.getIiwaLinkEEFrameFromGraspFrame(T_W_Gn_vtk)\n T_W_ee = transformUtils.getNumpyFromTransform(T_W_ee_vtk)\n\n T_W_ee_approach_vtk = self.getIiwaLinkEEFrameFromGraspFrame(T_W_Gn_approach_vtk)\n T_W_ee_approach = transformUtils.getNumpyFromTransform(T_W_ee_approach_vtk)\n\n\n # place the object\n force_threshold_magnitude = 50 # shoes are heavy\n q_nom = np.array(self._stored_poses_director[\"Grasping\"][\"above_table_pre_grasp\"])\n q_nom = np.array(self._stored_poses_director[\"left_table\"][\"above_table_pre_grasp\"])\n code =self.execute_place_new(T_W_ee, T_W_ee_approach, q_nom=q_nom, use_cartesian_plan=True, force_threshold_magnitude=force_threshold_magnitude)\n\n print(\"\\n\\n--- Finished Shoe Manipulation-------\\n\\n\")\n\n self._shoe_manipulation_counter += 1\n\n return code", "def stitchSpectra(lamb_list,count_list, method=\"scale\", edgeremove=(0, 0), shiftToPositive=False, dlambda=None):\r\n rawData=np.array([np.array(lamb_list),np.array(count_list)])\r\n rawData=rawData.swapaxes(0,1)\r\n coefficients = []\r\n print(\"Removing edges for stitching:\", *edgeremove)\r\n omitRight = rawData[0].shape[1] - math.floor(rawData[0].shape[1] * edgeremove[1])\r\n print(\"Stitching index range is \", 0, omitRight)\r\n processed = np.array(rawData[0][:, 0:omitRight]) \r\n if dlambda is None:\r\n dlambda = math.fabs(processed[0, 1] - processed[0, 0]) ## lambda steps of first spectrum are kept\r\n for i, spec in enumerate(rawData[1:]):\r\n omitLeft = math.floor(spec.shape[1] * edgeremove[0])\r\n omitRight = spec.shape[1] - math.floor(spec.shape[1] * edgeremove[1])\r\n print(\"Stitching index range is \", omitLeft, omitRight)\r\n if i == len(rawData)-2:\r\n spec = np.array(spec[:, omitLeft:]) ## do not shorten last array at end\r\n else:\r\n spec = np.array(spec[:, omitLeft:omitRight]) # shorten middle arrays at both sides\r\n print(\"Stitching spectrum in range\", np.min(spec[0,]), np.max(spec[0,]))\r\n # calculate overlap\r\n overlap = (np.min(spec[0,]), np.max(processed[0,])) \r\n #lambdas = np.arange(*overlap, dlambda)\r\n #leftfun = interp1d(processed[0,], processed[1,])\r\n #rightfun = interp1d(spec[0,], spec[1,])\r\n left = np.mean(processed[1, processed[0,] > overlap[0]]) ##mean of counts of overlap\r\n right = np.mean(spec[1, spec[0,] < overlap[1]])\r\n if method == \"shift\":\r\n # calculate offset in overlap region\r\n offset = left - right\r\n print(\"Stitching offset %s in overlap\", offset, *overlap)\r\n # add shifted spectrum\r\n spec[1,] = spec[1,] + offset\r\n coefficients.append(offset)\r\n elif method == \"scale\":\r\n # calculate factor in overlap region\r\n factor = left/right\r\n print(\"Stitching factor\"+str(factor)+\" in overlap \", *overlap)\r\n spec[1,] = spec[1,] * factor\r\n coefficients.append(factor)\r\n processed = np.concatenate([processed, spec], axis=1)\r\n # interpolate data on grid\r\n interpolated = interp1d(processed[0,], processed[1,])\r\n lambdas = np.arange(processed[0, 0], processed[0, -1], dlambda)\r\n specdata = interpolated(lambdas)\r\n # shift above 0\r\n if shiftToPositive:\r\n minimum = np.min(specdata)\r\n if minimum < 0:\r\n specdata += math.fabs(minimum)\r\n \r\n return (lambdas,specdata,coefficients)", "def __call__(self):\n return self.shoot()", "def run_std_sift(self): \n for i in range(2):\n image = 'input_'+str(i)+'.png'\n label = 'im'+str(i)\n f = open(self.work_dir+'keys_'+label+'.txt','w')\n sift = self.run_proc(['sift_cli', image], stdout=f)\n self.wait_proc(sift, timeout=self.timeout)\n return 1", "def burning_ship(pixels, width, height, max_iterations, re_start, re_end, im_start, im_end, color_hue,\n color_saturation, color_intensity):\n\n for x in prange(0, width):\n for y in prange(0, height):\n c = complex((re_start + (x / width) * (re_end - re_start)),\n (im_start + (y / height) * (im_end - im_start)))\n z = 0.0j\n\n iterations = 0\n while (abs(z) < 4.0) and iterations < max_iterations:\n abs_z = complex(abs(z.real), abs(z.imag))\n z = abs_z * abs_z + c\n iterations += 1\n\n # Color smoothing\n smooth_iterations = iterations - math.log(math.log(z.real * z.real + z.imag * z.imag)) + 4.0\n\n if iterations >= max_iterations:\n pixels[x, y, 0] = 0\n pixels[x, y, 1] = 0\n pixels[x, y, 2] = 0\n else:\n pixels[x, y, 0] = 255 * (color_hue / 360)\n pixels[x, y, 1] = 255 * color_saturation\n pixels[x, y, 2] = 255 * min(color_intensity * smooth_iterations / max_iterations, 1)", "def main():\n # color = rb.Color.BLUE.value\n # move_to_color(color)\n infared_sensor()\n\n # WHITE/RED does not work same with the BLUE/GREEN going down", "def overlay(self):\n # retrieve header for photometry keywords\n # from current frame only\n hdr_str = self.run('fits header', via='get')\n\n # read it in to a fits header\n phdr = fits.Header()\n hdr = phdr.fromstring(hdr_str, sep='\\n')\n\n try:\n srcposx = hdr['SRCPOSX'] + 1\n srcposy = hdr['SRCPOSY'] + 1\n s1 = 'point({:f} {:f}) # ' \\\n 'point=x ' \\\n 'color=blue tag={{srcpos}} '\\\n 'text=SRCPOS'.format(srcposx, srcposy)\n self.run('regions', s1)\n except (KeyError, ValueError):\n pass\n try:\n stcentx = hdr['STCENTX'] + 1\n stcenty = hdr['STCENTY'] + 1\n photaper = hdr['PHOTAPER']\n photskap = [float(x) for x in hdr['PHOTSKAP'].split(',')]\n s1 = 'point({:f} {:f}) # ' \\\n 'point=x ' \\\n 'color=cyan tag={{srcpos}}'.format(stcentx, stcenty)\n self.run('regions', s1)\n s2 = 'circle({:f} {:f} {:f}) # ' \\\n 'color=cyan tag={{srcpos}}'.format(\n stcentx, stcenty, photaper)\n self.run('regions', s2)\n s3 = 'annulus({:f} {:f} {:f} {:f}) # ' \\\n 'color=cyan tag={{srcpos}} text=STCENT'.format(\n stcentx, stcenty, photskap[0], photskap[1])\n self.run('regions', s3)\n except (KeyError, ValueError):\n pass\n try:\n stcentx = hdr['STCENTX'] + 1\n stcenty = hdr['STCENTY'] + 1\n flux = hdr['STAPFLX']\n sky = hdr['STAPSKY']\n s1 = 'text({:f} {:f}) # color=cyan ' \\\n 'text=\"Flux={:.2f}, Sky={:.2f}\"'.format(\n stcentx, stcenty - 40, flux, sky)\n self.run('regions', s1)\n except (KeyError, ValueError):\n pass\n\n # try overlaying apertures as well\n try:\n self.overlay_aperture(hdr)\n except ValueError: # pragma: no cover\n # may be encountered with extensions with\n # unexpected WCSs\n pass", "def run(self,workspace):\n image_name = self.image_name.value\n cpimage = workspace.image_set.get_image(image_name)\n image = cpimage.pixel_data\n mask = cpimage.mask\n workspace.display_data.statistics = []\n level = int(self.atrous_level.value)\n\n wavelet = self.a_trous(1.0*image, level+1)\n wlevprod = wavelet[:,:,level-1] * 3.0\n\n spotthresh = wlevprod.mean() + float(self.noise_removal_factor.value) * wlevprod.std()\n tidx = wlevprod < spotthresh\n wlevprod[tidx] = 0\n\n wlevprod = self.circular_average_filter(wlevprod, int(self.smoothing_filter_size.value))\n wlevprod = self.smooth_image(wlevprod, mask)\n\n max_wlevprod = scipy.ndimage.filters.maximum_filter(wlevprod,3)\n maxloc = (wlevprod == max_wlevprod)\n twlevprod = max_wlevprod > float(self.final_spot_threshold.value)\n maxloc[twlevprod == 0] = 0\n \n labeled_image,object_count = scipy.ndimage.label(maxloc,\n np.ones((3,3),bool))\n\n unedited_labels = labeled_image.copy()\n # Filter out objects touching the border or mask\n border_excluded_labeled_image = labeled_image.copy()\n labeled_image = self.filter_on_border(image, labeled_image)\n border_excluded_labeled_image[labeled_image > 0] = 0\n \n # Relabel the image\n labeled_image,object_count = relabel(labeled_image)\n new_labeled_image, new_object_count = self.limit_object_count(\n labeled_image, object_count)\n if new_object_count < object_count:\n # Add the labels that were filtered out into the border\n # image.\n border_excluded_mask = ((border_excluded_labeled_image > 0) |\n ((labeled_image > 0) & \n (new_labeled_image == 0)))\n border_excluded_labeled_image = scipy.ndimage.label(border_excluded_mask,\n np.ones((3,3),bool))[0]\n object_count = new_object_count\n labeled_image = new_labeled_image\n \n # Make an outline image\n outline_image = cellprofiler.cpmath.outline.outline(labeled_image)\n outline_border_excluded_image = cellprofiler.cpmath.outline.outline(border_excluded_labeled_image)\n \n if self.show_window:\n statistics = workspace.display_data.statistics\n statistics.append([\"# of accepted objects\",\n \"%d\"%(object_count)])\n\n workspace.display_data.image = image\n workspace.display_data.labeled_image = labeled_image\n workspace.display_data.border_excluded_labels = border_excluded_labeled_image\n\n # Add image measurements\n objname = self.object_name.value\n measurements = workspace.measurements\n cpmi.add_object_count_measurements(measurements,\n objname, object_count)\n # Add label matrices to the object set\n objects = cellprofiler.objects.Objects()\n objects.segmented = labeled_image\n objects.unedited_segmented = unedited_labels\n objects.parent_image = image\n \n workspace.object_set.add_objects(objects,self.object_name.value)\n cpmi.add_object_location_measurements(workspace.measurements, \n self.object_name.value,\n labeled_image)\n if self.should_save_outlines.value:\n out_img = cpi.Image(outline_image.astype(bool),\n parent_image = image)\n workspace.image_set.add(self.save_outlines.value, out_img)", "def sim_image_ramp(det, im_slope, verbose=False, **kwargs):\n if verbose:\n _log.info('Generating image acquisition ramp...')\n\n return sim_dark_ramp(det, im_slope, ramp_avg_ch=None, verbose=False, **kwargs)", "def make_spectra(directory,frame):\n oober = st.short_oober(directory, frame=frame)\n #st.MakeVelocitySpectra(oober,frame)\n #st.MakeAccelSpectra(oober,frame)\n #st.MakeMagneticSpectra(oober,frame)\n st.MakeDensitySpectra(oober,frame)", "def punched(self):\n if not self.dizzy:\n self.dizzy= 1\n self.original= self.image", "def write_images(band,skypos,tranges,skyrange,write_cnt=False,write_int=False,write_rr=False,framesz=0,width=False,height=False,verbose=0,tscale=1000.,memlight=False,coadd=False,response=False,calpath='../cal/',clobber=False,retries=20):\n\t# No files were requested, so don't bother doing anything.\n\tif not (write_cnt or write_int or write_rr):\n\t\treturn\n\tcount,rr,intensity=create_images(band,skypos,tranges,skyrange,framesz=framesz,width=width,height=height,verbose=verbose,tscale=tscale,memlight=memlight,coadd=coadd,response=response,calpath=calpath,retries=retries)\n\n\t# Add a conditional so that this is only created for multi-frame images\n\ttbl = movie_tbl(band,tranges,framesz=framesz,verbose=verbose,retries=retries)\n\n\tif write_cnt:\n\t\thdu = pyfits.PrimaryHDU(count)\n\t\thdu = fits_header(band,skypos,tranges,skyrange,width=width,height=height,verbose=verbose,tscale=tscale,hdu=hdu,retries=retries)\n\t\thdulist = pyfits.HDUList([hdu,tbl])\n\t\tif verbose:\n\t\t\tprint 'Writing count image to '+str(write_cnt)\n\t\thdulist.writeto(write_cnt,clobber=clobber)\n\tif write_rr:\n\t\thdu = pyfits.PrimaryHDU(rr)\n\t\thdu = fits_header(band,skypos,tranges,skyrange,width=width,height=height,verbose=verbose,tscale=tscale,hdu=hdu,retries=retries)\n\t\thdulist = pyfits.HDUList([hdu,tbl])\n\t\tif verbose:\n\t\t\tprint 'Writing response image to '+str(write_rr)\n hdulist.writeto(write_rr,clobber=clobber)\n\tif write_int:\n\t\thdu = pyfits.PrimaryHDU(intensity)\n\t\thdu = fits_header(band,skypos,tranges,skyrange,width=width,height=height,verbose=verbose,tscale=tscale,hdu=hdu,retries=retries)\n\t\thdulist = pyfits.HDUList([hdu,tbl])\n\t\tif verbose:\n\t\t\tprint 'Writing intensity image to '+str(write_int)\n\t\thdulist.writeto(write_int,clobber=clobber)\n\n\treturn", "def psfphot(image, clobber=globclob, verbose=globver, pixtol=3.0,\n maxnpsf=5, interact=yes):\n\n # Defaults / constants\n psfmult=5.0 #standard factor (multiplied by fwhm to get psfradius)\n psfmultsmall=3.0 #similar to psfmult, adjusted for nstar and substar\n\n # Necessary package\n iraf.imutil()\n\n # Detect stars\n iqpkg.iqobjs(image, 3.0, 50000.0, wtimage=\"\", skyval=\"!MEDSKY\")\n\n root = image[:-5]\n [gain, rnoise, fwhm] = get_head(image, [\"GAIN\", \"READNOI\", \"SEEPIX\"])\n fwhm = float(fwhm); rnoise = float(rnoise)\n\n iraf.iterstat(image)\n \n # Saturation level\n if not check_head(image, \"SATURATE\"):\n saturate = 60000.0\n else:\n saturate = get_head(image, \"SATURATE\")\n \t \n # Update datapars and daopars\n iraf.datapars.fwhmpsf=fwhm\n iraf.datapars.sigma=iraf.iterstat.sigma\n iraf.datapars.datamin=iraf.iterstat.median-10*iraf.iterstat.sigma\n iraf.datapars.datamax=70000.0\n iraf.datapars.readnoise=rnoise\n iraf.datapars.epadu=gain \n iraf.daopars.psfrad=psfmult*fwhm\n iraf.daopars.fitrad=fwhm\n iraf.daopars.function=\"gauss,moffat15,moffat25,lorentz,penny1\"\n\n # coo file\n stars = Starlist(\"%s.stars\" % image)\n outf = open(\"%s.coo.1\" % image[:-5], \"w\")\n for star in stars:\n outf.write(\"%10.3f%10.3f\\n\" % (star.xval, star.yval))\n outf.close()\n\n #initial photometry\n iraf.daophot.phot(root,'default','default',aperture=fwhm,verify=no,\n verbose=verbose)\n\n iraf.datapars.datamax=30000.0\n iraf.pstselect(root,'default','default',maxnpsf,interactive=yes,\n verify=no,verbose=verbose)\n\n iraf.psf(root,'default','default','default','default','default',\n interactive=interact,verify=no,verbose=verbose)\n\n iraf.allstar(root,'default','default','default','default','default',\n verify=no,verbose=verbose)\n\n iraf.iterstat(\"%s.sub.fits\" % root)\n\n iraf.datapars.sigma=iraf.iterstat.sigma\n iraf.datapars.datamin=iraf.iterstat.median-10*iraf.iterstat.sigma\n\n iraf.datapars.datamax=70000.0\n iraf.daophot.phot(\"%s.sub.fits\" % root, \"SN.coo\", 'default', 'default',\n aperture=fwhm, verify=no, verbose=verbose)\n\n iraf.datapars.datamax=30000.0\n iraf.daopars.fitrad=fwhm*2.0\n iraf.allstar(\"%s.sub.fits\" % root, 'default', \"%s.psf.1.fits\" % root, \n 'default', 'default', 'default', verify=no, verbose=no)", "def get_sky(plate, mjd, output_path, verbose=False):\n tag = f'PLATE {plate:05d} MJD {mjd:05d} PATH {output_path}'\n if verbose:\n print('Starting {}'.format(tag))\n # Initialize output data.\n last_nexp = None\n plugmaps = []\n wlens = {'b': [], 'r': []}\n wdisps = {'b': [], 'r': []}\n fluxes = {'b': [], 'r': []}\n ivars = {'b': [], 'r': []}\n flats = {'b': [], 'r': []}\n rdnoises = {'b': [], 'r': []}\n masks = {'b': [], 'r': []}\n obskeys = ('EXPOSURE', 'TAI-BEG', 'EXPTIME', 'AZ', 'ALT', 'AIRMASS',\n 'PRESSURE', 'AIRTEMP',\n 'RDNOISE0', 'RDNOISE1', 'RDNOISE2', 'RDNOISE3')\n obsvals = {key: [] for key in obskeys}\n # Size of each amplifier in raw image pixels along (wlen, tracex) axes.\n ampsize = {'b': (2056, 2048), 'r': (2064, 2057)}\n # ampx[band] tabulates whether each wavelength index is readout by\n # amplifier 0/2 (=0) or 1/3 (=1).\n ampx = {'b': 1 * (np.arange(4112) >= 2056),\n 'r': 1 * (np.arange(4128) >= 2064)}\n # amplifer[band] is a function that takes a traceset as input an returns an\n # array that tabulates whether each wavelength index is readout by\n # amplifier 0-3.\n amplifier = {'b': lambda x: 2 * (x >= 2048) + ampx['b'],\n 'r': lambda x: 2 * (x >= 2057) + ampx['r']}\n # Scaling such that RMS = rdnoise_scale * RDNOISEn * neff.\n rdnoise_scale = (4 * np.pi) ** 0.25\n # Conversion from constant log-lambda pixels to wavelength ratio.\n wdisp_const = 1e-4 * np.log(10)\n # Allowed pixel mask bits.\n valid_mask = (1 << 32) - 1\n # Slices of valid data to save. These trim pixels at each end where\n # IVAR=0 or other serious pixel mask bits are often set.\n valid_slices = {'b': slice(767, 3299), 'r': slice(483, 3668) }\n # Initialize data access.\n finder = bossdata.path.Finder()\n mirror = bossdata.remote.Manager()\n # Loop over spectrographs.\n expected_fibers = []\n for specidx in 1, 2:\n # Load the list of science exposures used for this spectrograph's coadd.\n fiber = 500 * (specidx - 1) + 1\n spec_name = finder.get_spec_path(plate, mjd, fiber=fiber, lite=True)\n exposures = bossdata.spec.SpecFile(mirror.get(spec_name)).exposures\n for band in 'b', 'r':\n camera = '{}{}'.format(band, specidx)\n use = valid_slices[band]\n # Loop over science exposures for this camera.\n nexp = exposures.num_by_camera[camera]\n if not (last_nexp is None or nexp == last_nexp):\n print(f'Different nexp for {camera} {tag}')\n return None\n last_nexp = nexp\n for expidx in range(nexp):\n # Load this camera's spFrame file.\n name = exposures.get_exposure_name(expidx, camera, 'spFrame')\n path = mirror.get(finder.get_plate_path(plate, name))\n spFrame = bossdata.plate.FrameFile(path, calibrated=False)\n # Lookup this spectrograph's sky fibers.\n sky_name = binary_type('SKY ', 'ascii')\n fiberidx = np.where(\n spFrame.plug_map['OBJTYPE'] == sky_name)[0]\n if expidx == 0 and band == 'b':\n # Save plugmap metadata.\n plugmaps.append(spFrame.plug_map[\n ['FIBERID','RA','DEC','XFOCAL','YFOCAL']][fiberidx])\n if specidx == 2:\n plugmap = astropy.table.vstack(plugmaps)\n if specidx == 1 and band == 'b':\n # Record observation metadata.\n for key in obskeys:\n try:\n value = spFrame.header[key]\n except KeyError:\n value = -999 # invalid value for int/float types\n obsvals[key].append(value)\n # Load the sky fiber data.\n fibers = spFrame.plug_map['FIBERID'][fiberidx].data\n assert np.all(fiberidx == spFrame.get_fiber_offsets([fibers]))\n if expidx == 0 and band == 'b':\n expected_fibers.append(fibers)\n if verbose:\n print('Found {} sky fibers on spec{}: {}.'.format(\n len(fibers), specidx,\n ','.join([str(f) for f in fibers])))\n else:\n if not np.all(fibers == expected_fibers[specidx - 1]):\n print('Did not get expected fibers for {} exp {}'\n .format(camera, expidx))\n data = spFrame.get_valid_data(\n fibers, include_sky=True, include_wdisp=True, use_ivar=True,\n pixel_quality_mask=valid_mask)\n if verbose:\n print('Reading {} for exposure {} / {}...'\n .format(camera, expidx + 1, nexp))\n assert data.shape == (len(fibers), 2 * ampsize[band][0])\n mask = spFrame.get_pixel_masks(fibers)\n masks[band].append(mask[:, use])\n # Identify pixels with valid data.\n valid = ~data['ivar'].mask\n bad_fibers = ~np.any(valid, axis=1)\n if verbose and np.any(bad_fibers):\n print(' bad fibers: {}'.format(fibers[bad_fibers]))\n ivar = data['ivar'].data\n assert np.all(ivar[valid] > 0)\n ivars[band].append(ivar[:, use])\n # Load the superflat and trace vectors for sky fibers.\n superflat = spFrame.get_superflat(fibers)\n tracex = spFrame.hdulist[7].read()[fiberidx]\n # Load fiberflat and neff vectors from this camera's spFlat.\n name = exposures.get_exposure_name(expidx, camera, 'spFlat')\n path = mirror.get(finder.get_plate_path(plate, name))\n with fits.open(path) as spFlat:\n fiberflat = spFlat[0].data[fiberidx]\n neff = bossdata.plate.TraceSet(spFlat[3]).get_y()[fiberidx]\n if np.any(neff[valid] <= 0):\n print(f'WARNING: neff <= 0 for {camera} {expidx} {tag}')\n # Lookup the per-amplifier readnoise values.\n readnoises = np.array([\n spFrame.header['RDNOISE{}'.format(amp)]\n for amp in range(4)], dtype=np.float32)\n # Determine which amplifier (0-3) each pixel along the trace is\n # read out by and scale to RMS readnoise per wavelength pixel.\n amp = amplifier[band](tracex)\n rdnoise = rdnoise_scale * readnoises[amp] * neff\n rdnoises[band].append(rdnoise[:, use].astype(np.float32))\n # Combine the superflat and fiberflat.\n flat = superflat * fiberflat\n assert np.all(flat[valid] > 0)\n flats[band].append(flat[:, use])\n # Save wavelength solutions in angstroms.\n wlen = data['wavelength'].data\n wlens[band].append(wlen[:, use])\n # Save wavelength dispersions in angstroms.\n wdisp = data['wdisp'].data\n assert np.all(wdisp[valid] > 0)\n wdisp = wlen * np.expm1(wdisp_const * wdisp)\n wdisps[band].append(wdisp[:, use])\n # Save the combined flat-fielded sky models + residuals,\n # which might be negative due to readnoise.\n flux = data['flux'].data + data['sky'].data\n fluxes[band].append(flux[:, use])\n # Build observation metadata table.\n obslist = astropy.table.Table()\n for key in obskeys:\n obslist[key] = obsvals[key]\n # Build the output HDU list.\n hdus = fits.HDUList()\n cards = dict(PLATE=plate, MJD=mjd, NFIBERS=len(plugmap), NEXP=nexp)\n hdus.append(fits.PrimaryHDU(header=fits.Header(cards)))\n hdus.append(fits.table_to_hdu(obslist))\n hdus[-1].name = 'OBSLIST'\n hdus.append(fits.table_to_hdu(plugmap))\n hdus[-1].name = 'PLUGMAP'\n for band in 'b', 'r':\n Band = band.upper()\n # Combine arrays for each band and save an an image HDU.\n hdus.append(fits.ImageHDU(np.vstack(wlens[band]),\n name='{}WLEN'.format(Band)))\n hdus.append(fits.ImageHDU(np.vstack(wdisps[band]),\n name='{}WDISP'.format(Band)))\n hdus.append(fits.ImageHDU(np.vstack(rdnoises[band]),\n name='{}RDNOISE'.format(Band)))\n hdus.append(fits.ImageHDU(np.vstack(flats[band]),\n name='{}FLAT'.format(Band)))\n hdus.append(fits.ImageHDU(np.vstack(fluxes[band]),\n name='{}FLUX'.format(Band)))\n hdus.append(fits.ImageHDU(np.vstack(ivars[band]),\n name='{}IVAR'.format(Band)))\n hdus.append(fits.ImageHDU(np.vstack(masks[band]),\n name='{}MASK'.format(Band)))\n name = os.path.join(output_path, 'sky-{}-{}.fits'.format(plate, mjd))\n hdus.writeto(name, overwrite=True)\n print('Completed {}'.format(tag))\n return obslist", "def skycombine(dir = 'Objects'):\n \n if dir ==\"Objects\":\n dir = 'Objects/*/*/flat_corrected/'\n \n for d in glob(dir):\n \n directory = \"/\".join(d.split('/')[0:2]) + '/swarped'\n if not os.path.exists(directory):\n os.makedirs(directory)\n \n keys = ['OBJECTS', 'ITIME', 'FWINAME', 'OBSDATE', 'CAMNAME', 'HISTORY', 'FLSPECTR']\n images = ImageFileCollection(d, keywords = keys, glob_include = 'f*.fits')\n \n swarpfilter(d, dir, directory, images, keys, filter='H', lamp = '*', camera = 'narrow',\n done='Dark Subtracted', output='cKSkyNarrowH', type='EQUATORIAL')\n swarpfilter(d, dir, directory, images, keys, filter='H',lamp = '*', camera = 'wide', \n done='Dark Subtracted', output='cKSkyWideH', type='EQUATORIAL')\n swarpfilter(d, dir, directory, images, keys, filter='J',lamp = '*', camera = 'narrow', \n done='Dark Subtracted', output='cKSkyNarrowJ', type='EQUATORIAL')\n swarpfilter(d, dir, directory, images, keys, filter='J', lamp = '*',camera = 'wide', \n done='Dark Subtracted', output='cKSkyWideJ', type='EQUATORIAL') \n swarpfilter(d, dir, directory, images, keys, filter='Ks',lamp = '*', camera = 'narrow', \n done='Dark Subtracted', output='cKSkyNarrowKs', type='EQUATORIAL')\n swarpfilter(d, dir, directory, images, keys, filter='Ks',lamp = '*', camera = 'wide', \n done='Dark Subtracted', output='cKSkyWideKs', type='EQUATORIAL')\n swarpfilter(d, dir, directory, images, keys, filter='Lp',lamp = '*', camera = 'narrow', \n done='Dark Subtracted', output='cKSkyNarrowLp', type='EQUATORIAL')\n swarpfilter(d, dir, directory, images, keys, filter='Lp',lamp = '*', camera = 'wide', \n done='Dark Subtracted', output='cKSkyNarrowLp', type='EQUATORIAL')", "def main(s0, s1, s2, s3, delayFunc, dm, decimate = 0):\n\n\t# Ignore, testing\n\n\n\t# Get the delays (in seconds)\n\tprint(\"Finding time delays...\")\n\tdelays = delayFunc(absftop, -100. / 512, nchans, dm)\n\tprint(\"Delays (s):\")\n\tprint(delays)\n\tdelays /= sampleRate\n\tdelays = delays.astype(int)\n\n\t# Build up a \n\toutputLen = int(s0.shape[0] - np.max(delays))\n\tdataOut = np.ones((outputLen, nchans), dtype = np.int32)\n\tprint(delays)\n\n\t# Terribl approach to RFI\n\tzapchans = list(range(150,200)) + list(range(280, 290)) + list(range(305, 320)) + list(range(450, nchans))\n\n\t# SImple implementation that works, but isn't cache efficient\n\tprint(\"Forming Stokes I + Dedispersing... processing channel:\")\n\tfor i in range(nchans):\n\t\tif i in zapchans:\n\t\t\tcontinue\n\t\tprint(f\"{i} ({100 * i / min(430,nchans)} %)\", end = '\\r')\n\t\tdataOut[..., i] += np.square(s0[delays[i]: delays[i] + outputLen, i].astype(np.int32))\n\t\tdataOut[..., i] += np.square(s1[delays[i]: delays[i] + outputLen, i].astype(np.int32))\n\t\tdataOut[..., i] += np.square(s2[delays[i]: delays[i] + outputLen, i].astype(np.int32))\n\t\tdataOut[..., i] += np.square(s3[delays[i]: delays[i] + outputLen, i].astype(np.int32))\n\n\t# Cache efficient but untested\n\t\"\"\"\n\tprint(\"Forming Stokes I + Dedispersing... processing sample:\")\n\tblockCount = 128\n\tblockSize = int(dataOut.shape[0] / blockCount)\n\tfor i in range(blockCount):\n\t\tif i in zapchans:\n\t\t\tcontinue\n\t\tprint(f\"{i* blockSize} -> {(i + 1) * blockSize} ({100 * i / blockCount:07.3f} %)\", end = '\\r')\n\t\tdataOut[i * blockSize: (i+1) * blockSize, :] += np.square(s0[i * blockLength + delays[i]: (i + 1) * blockLength + delays[i] + outputLen, i].astype(np.int32))\n\t\tdataOut[i * blockSize: (i+1) * blockSize, :] += np.square(s1[i * blockLength + delays[i]: (i + 1) * blockLength + delays[i] + outputLen, i].astype(np.int32))\n\t\tdataOut[i * blockSize: (i+1) * blockSize, :] += np.square(s2[i * blockLength + delays[i]: (i + 1) * blockLength + delays[i] + outputLen, i].astype(np.int32))\n\t\tdataOut[i * blockSize: (i+1) * blockSize, :] += np.square(s3[i * blockLength + delays[i]: (i + 1) * blockLength + delays[i] + outputLen, i].astype(np.int32))\n\t\"\"\"\n\n\tif decimate:\n\t\tprint(\"Decimating...\")\n\t\trollingSum = np.cumsum(dataOut, axis = 0)\n\t\tdataOut = rollingSum[decimate::decimate, :] - rollingSum[:-decimate:decimate, :]\n\n\n\tprint(\"Plotting...\")\n\tplt.figure(figsize = (24,12))\n\tplt.imshow(dataOut.T, aspect = 'auto', vmax = np.percentile(dataOut, 95), vmin = np.percentile(dataOut, 33))\n\tplt.savefig(f'./debugfull_{datetime.datetime.now().isoformat()}.png')\n\n\tplt.figure(figsize = (24,12))\n\tplt.imshow(np.log10(dataOut.T), aspect = 'auto', vmax = np.log10(np.percentile(dataOut, 95)), vmin = np.log10(np.percentile(dataOut, 33)))\n\tplt.savefig(f'./debugfull2_{datetime.datetime.now().isoformat()}.png')\n\t\n\tplt.figure(figsize = (24,12))\n\td1 = dataOut[:, :100].sum(axis = 1)\n\td1 -= np.mean(d1, dtype = np.int64)\n\td2 = dataOut[:, 100:200].sum(axis = 1)\n\td2 -= np.mean(d2, dtype = np.int64)\n\td3 = dataOut[:, 200:300].sum(axis = 1)\n\td3 -= np.mean(d3, dtype = np.int64)\n\td4 = dataOut[:, 300:].sum(axis = 1)\n\td4 -= np.mean(d4, dtype = np.int64)\n\tplt.plot(d1, alpha = 0.3, label = '1')\n\tplt.plot(d2, alpha = 0.3, label = '2')\n\tplt.plot(d3, alpha = 0.3, label = '3')\n\tplt.plot(d4, alpha = 0.3, label = '4')\n\tplt.legend()\n\tplt.savefig(f'./debug_{datetime.datetime.now().isoformat()}.png')\n\n\tprint(\"Done!\")", "def activate(self):\n self.scriptedEffect.showEffectCursorInSliceView = False\n self.clippedMasterImageData = None", "def run_sift(self):\n paradic = self.cfg['param']['paradic']\n for i in range(2):\n image = 'input_'+str(i)+'.png'\n label = 'im'+str(i)\n f = open(self.work_dir+'keys_'+label+'.txt','w')\n sift = self.run_proc(['sift_cli', image, label,\n str(paradic['n_oct']),\n str(paradic['n_spo']),\n str(paradic['sigma_min']),\n str(paradic['delta_min']),\n str(paradic['sigma_in']),\n str(paradic['C_DoG']),\n str(paradic['C_edge']),\n str(paradic['n_bins']),\n str(paradic['lambda_ori']),\n str(paradic['t']),\n str(paradic['n_hist']),\n str(paradic['n_ori']),\n str(paradic['lambda_descr'])],\n stdout=f) \n self.wait_proc(sift, timeout=self.timeout) \n return 1", "def _generate_direct_image(self):\n filename = '0000_flt.fits'\n\n di_start_JD = (self.exp_start_times[0] - 1 * u.min).to(u.day)\n di_exp_gen = ExposureGenerator(self.detector, self.grism, self.NSAMP,\n self.SAMPSEQ, self.SUBARRAY,\n self.planet, filename, di_start_JD)\n\n try: # assume that its a list not a single value\n x_ref = self.x_ref[0]\n except TypeError:\n x_ref = self.x_ref\n\n try: # assume that its a list not a single value\n y_ref = self.y_ref[0]\n except TypeError:\n y_ref = self.y_ref\n\n exp = di_exp_gen.direct_image(x_ref, y_ref)\n exp.generate_fits(self.outdir, '0000_flt.fits')", "def bias(s='bias'):\n s = s.strip()[:80] #truncate to 80 char to fit in FITS header\n exptime(0.0)\n print camera.SetShutter(2)\n camera.status.imgtype = 'BIAS'\n camera.status.object = s\n camera.status.update()", "def _full_speed_rumble(self, images, duration):\n while duration > 0:\n self.microbit.display.show(images[0]) # pylint: disable=no-member\n time.sleep(0.04)\n self.microbit.display.show(images[1]) # pylint: disable=no-member\n time.sleep(0.04)\n duration -= 0.08", "def exposure():\n def r(x):\n return x/6e4\n\n def w(x):\n return int(x*6e4)\n return r, w", "def OFlow(testdirs, opath):\n\n\t# create directories if doesn't exist\n\tif not os.path.exists(opath):\n\t\tos.makedirs(opath)\n\n\tif not os.path.exists(opath+'/each_image'):\n\t\tos.makedirs(opath+'/each_image')\n\t\t\n\tif not os.path.exists(opath+'/masks'):\n\t\tos.makedirs(opath+'/masks')\n\t\t\n\tif not os.path.exists(opath+'/scaled_masks'):\n\t\tos.makedirs(opath+'/scaled_masks')\n\n\tdcount = 0\t# directory count\n\n\tfor d in testdirs:\n\n\t\th = d[-65:-1]\t# hash\n\n\t\t# create directory for each hash to save individual optical flow images separately\n\t\tif not os.path.exists(opath+'/each_image/'+h):\n\t\t\tos.makedirs(opath+'/each_image/'+h)\n\n\t\tprvs = cv2.imread(d + 'frame0000.png', 0)\t# previous image\n\n\t\ts = (prvs.shape[0], prvs.shape[1], 3)\t# hsv image shape\n\n\t\thsv = np.zeros(s, np.uint8)\n\t\thsv[...,1] = 255\n\t\t\n\t\tms = (prvs.shape[0], prvs.shape[1])\t\t# mask shape\n\t\t\n\t\tmask = np.zeros(ms, np.uint8)\n\t\tsum_mask = np.zeros(ms, np.uint8)\n\t\tscaled_mask = np.zeros(ms, np.uint8)\n\t\t\n\t\tprint(\"dir: \",dcount,\" dim: \",sum_mask.shape)\n\t\tdcount += 1\n\t\t\n\t\tflag = 0\n\n\t\tfor i in range(1,100):\n\t\t\tnxt = cv2.imread(d + 'frame00'+str(i).zfill(2)+'.png', 0)\t# next image\n\t\t\tflow = cv2.calcOpticalFlowFarneback(prvs,nxt, None, 0.5, 3, 15, 3, 5, 1.2, 0)\n\t\t\tmag, ang = cv2.cartToPolar(flow[...,0], flow[...,1])\t# calculates magnitude and angles of 2D vectors\n\t\t\t\n\t\t\thsv[...,0] = ang*180/np.pi/2\n\t\t\t\n\t\t\thsv[...,2] = cv2.normalize(mag,None,0,255,cv2.NORM_MINMAX)\n\t\t\tbgr = cv2.cvtColor(hsv,cv2.COLOR_HSV2BGR)\n\t\t\tomg = cv2.cvtColor(bgr,cv2.COLOR_BGR2GRAY)\n\t#\t\tprint(omg.dtype)\n\t#\t\tprint(omg.shape)\n\t\t\t\n\t\t\tcv2.imshow('frame1',omg)\n\t\t\tom = Image.fromarray(omg)\n\t\t\tom.save(opath+'/each_image/'+h+'/frame00'+str(i).zfill(2)+'.png', 0)\n\t\t\t\n\t\t\tomg2 = omg\n\n\t\t\t# scaling with random hardcoded values\n\t\t\tfor r in range(omg2.shape[0]):\n\t\t\t\tfor c in range(omg2.shape[1]):\n\t\t\t\t\tif omg2[r][c] < 32:\n\t\t\t\t\t\tsum_mask[r][c] += 0\n\t\t\t\t\telif omg2[r][c] < 128:\n\t\t\t\t\t\tsum_mask[r][c] += 1\n\t\t\t\t\telse:\n\t\t\t\t\t\tsum_mask[r][c] += 2\n\n\t\t\tflag = cv2.waitKey(30) & 0xff\n\t\t\tif flag == 27:\t\t# press ESC to exit\n\t\t\t break\n\n\t\t\tprvs = nxt\n\n\t\t# generating mask based on sum_mask again based on random hardcoded values\n\t\tfor r in range(prvs.shape[0]):\n\t\t\tfor c in range(prvs.shape[1]):\n\t\t\t\tif sum_mask[r][c] > 50:\n\t\t\t\t\tmask[r][c] = 2\n\t\t\t\t\tscaled_mask[r][c] = 255\n\t\t\t\telif sum_mask[r][c] > 15:\n\t\t\t\t\tmask[r][c] = 1\n\t\t\t\t\tscaled_mask[r][c] = 128\n\t\tomask = Image.fromarray(mask)\n\t\tomask.save(opath+'/masks/'+h+'.png', 0)\n\t\tosmask = Image.fromarray(scaled_mask)\n\t\tosmask.save(opath+'/scaled_masks/'+h+'.png', 0)\n\n\tcv2.destroyAllWindows()", "def toggle_exposure(self):\n\n checked1 = self.exp1_radio.isChecked()\n if checked1:\n self.exp2_radio.setChecked(True)\n else:\n self.exp1_radio.setChecked(True)\n self.select_exposure()", "def __init__(self, width, height, framerate, max_dist, sv_path, \n record_time, saveimg, savepc, savebag):\n self.width = width\n self.height = height\n self.framerate = framerate\n self.max_dist = max_dist # filter\n self.sv_path = sv_path\n self.record_time = record_time\n self.saveimg = saveimg\n self.savepc = savepc\n self.savebag = savebag\n\n self.lane_detector = LaneDetector()\n self.pc = rs.pointcloud()\n self.colorizer = rs.colorizer()\n self.create_filter()", "def setProtectSurfaces():\n dislin.shlsur()", "def step(self, amt=1):\n \n # For checking if all the animations have their framse looked at\n #activewormind = [i for i, x in enumerate(self._idlelist) if x == False]\n #print \"Worm {} at {:5g}\".format(activewormind, 1000*(time.time() - starttime))\n # save times activated for each worm \n [self.timedata[i].append(1000*(time.time() - starttime)) for i, x in enumerate(self._idlelist) if x == False]\n \n #self._led.buffer = [0] * 480\n self._led.pixheights = [-100] * self._led.numLEDs\n #print type(self._led.buffer)\n for ledcopy in self._ledcopies:\n # self._led.buffer = map(ixor, self._led.buffer, ledcopy.buffer)\n # use pixheights but assume all buffers same size\n # print ledcopy.driver[0].pixheights\n for pix in range(self._led.numLEDs):\n #for ledcopy in self._ledcopies:\n if self._led.pixheights[pix] == ledcopy.driver[0].pixheights[pix]:\n for i in range(3):\n self._led.buffer[3*pix + i] ^= ledcopy.buffer[3*pix + i]\n elif self._led.pixheights[pix] < ledcopy.driver[0].pixheights[pix]:\n for i in range(3):\n self._led.buffer[3*pix + i] = ledcopy.buffer[3*pix + i]\n self._led.pixheights[pix] = ledcopy.driver[0].pixheights[pix] \n self._step += 1", "def run_observation(self):\n\n self._generate_direct_image() # to calibrate x_ref and y_ref\n\n num_frames = len(self.exp_start_times)\n progress = Progress(num_frames)\n self.progess = progress\n\n progress_line = 'Generating frames 0/{} done'.format(num_frames)\n progress.print_status_line(progress_line)\n progress.progress_line = progress_line\n\n for i, start_time in enumerate(self.exp_start_times):\n filenum = i + 1\n self._generate_exposure(start_time, filenum)\n\n progress.increment()\n progress_line = 'Generating frames {}/{} done'.format(filenum,\n num_frames)\n progress.print_status_line(progress_line)\n\n # so it can be retreived by exposure_generator\n progress.progress_line = progress_line", "def fireRandom():\n#the increment and the A1-H8 define the possible colors the pixels can be\n\tincrement = 20\n A1 = Color(255, 0, 0)\n B2 = Color(255, increment, 0)\n C3 = Color(255, increment * 2, 0)\n D4 = Color(255, increment * 3, 0)\n E5 = Color(255, increment * 4, 0)\n F6 = Color(255, increment * 5, 0)\n G7 = Color(255, increment * 6, 0)\n H8 = Color(255, increment * 7, 0)\n#the for loop with i and the number of pixels cycles through all the pixels we have so that they each get called when a random number does\n for i in range (strip.numPixels()):\n threeQuarters = randrange(0, 4)\n if threeQuarters == 1 or threeQuarters == 2 or threeQuarters == 3 :\n#the above if statement and the threeQuarters variable makes it so that the pixels only recieve a color 75% of the time. This makes a flicker.\n shade = randrange(0,9)\n\n if shade == 1:\n strip.setPixelColor(i, A1)\n strip.show()\n elif shade == 2:\n strip.setPixelColor(i, B2)\n strip.show()\n elif shade == 3:\n strip.setPixelColor(i, C3)\n strip.show()\n elif shade == 4:\n strip.setPixelColor(i, D4)\n strip.show()\n elif shade == 5:\n strip.setPixelColor(i, E5)\n strip.show()\n elif shade == 6:\n strip.setPixelColor(i, F6)\n strip.show()\n elif shade == 7:\n strip.setPixelColor(i, G7)\n\t\t\t\tstrip.show()\n\t\t\telse: \n\t\t\t\tstrip.setPixelColor(i, H8)\n\t\t\t\tstrip.show()" ]
[ "0.609327", "0.60517883", "0.6042764", "0.5937301", "0.58709234", "0.57097524", "0.5626163", "0.56135774", "0.5605713", "0.5569856", "0.5565318", "0.55331594", "0.5474416", "0.54720265", "0.546944", "0.546139", "0.5431014", "0.5417215", "0.5389273", "0.5385319", "0.53823715", "0.53608334", "0.5350842", "0.5325013", "0.5324093", "0.53186107", "0.5307615", "0.5303014", "0.5300838", "0.5296238", "0.5292857", "0.5291221", "0.5282675", "0.52644855", "0.52548665", "0.5253392", "0.52530366", "0.5241206", "0.5239949", "0.5238053", "0.5229733", "0.5214106", "0.5202829", "0.5195585", "0.5184195", "0.5172646", "0.51605767", "0.5148059", "0.51400894", "0.5135868", "0.51358485", "0.51358145", "0.5132701", "0.51205426", "0.51172316", "0.5108857", "0.50945365", "0.5087174", "0.50836754", "0.5079301", "0.50784755", "0.50663245", "0.5059835", "0.505474", "0.5052357", "0.5051662", "0.5042399", "0.50404537", "0.5036776", "0.5036536", "0.50353616", "0.50246084", "0.5022891", "0.5021086", "0.5021034", "0.50131553", "0.5011978", "0.49950886", "0.49923852", "0.49839252", "0.4976915", "0.4969501", "0.4968737", "0.4968196", "0.49600044", "0.4956715", "0.49446705", "0.49427888", "0.49364802", "0.4929312", "0.49276537", "0.4926511", "0.4924106", "0.491814", "0.491698", "0.49169424", "0.49164906", "0.49164748", "0.4915689", "0.49103296", "0.49092796" ]
0.0
-1
one call per exposure/dither move
def test_do_apogee_science_1_pair_B_open(self): sopTester.updateModel('mcp', TestHelper.mcpState['apogee_science']) sopTester.updateModel('apogee', TestHelper.apogeeState['B_open']) self._do_apogee_science(3, 30, 0, 0, ditherPairs=1)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def opd_dither(cfg):\n \n setpoint_new = pi.getINDI('PLC.UBCSettings.PLSetpoint')\n file_number = pi.getINDI('NOMIC.CamInfo.FIndex') # Get initial file number\n offsets = np.array(cfg['nomic_dither_opd_pattern']) * 5.0 * 180.0 / np.pi # input in rad at 11um, but commandet offsets in deg in K band\n pi.setINDI('NOMIC.EditFITS.Keyword=spdthpos;Value=0.0;Comment=setpoint dither position (offset from nominal setpoint) in rad')\n while True:\n for i_step in range(0, len(cfg['nomic_dither_opd_pattern'])):\n file_number = file_number + cfg['nomic_dither_opd_ndits'][i_step]\n try:\n pi.evalINDI ('\"NOMIC.CamInfo.FIndex\" >= %d' % file_number, timeout=900) # wait for cfg['nomic_dither_opd_ndits'][i] new files to arrive\n except:\n info('OPD dither offset timed out (more than 15 min since last offset)')\n print(' or was interrupted.')\n print(' Make sure to restart OPD dither pattern.')\n return()\n setpoint_old = pi.getINDI('PLC.UBCSettings.PLSetpoint') # ask for current setpoint\n if (setpoint_old - setpoint_new) > 2.9: # if expected setpoint is more than ~0.01 rad (at 11um) different from current setpoint\n info('Setpoint has changed between two dither positions.')\n setpoint_new = setpoint_old + offsets[i_step] # determine new setpoint\n pi.setINDI('NOMIC.EditFITS.Keyword=spdthpos;Value=' + str(np.sum(offsets[:i_step+1])) + ';Comment=setpoint dither position (offset from nominal setpoint) in rad')\n pi.setINDI('PLC.PLSetpoint.PLSetpoint=' + str(setpoint_new) + ';forNAC=0') # send a dither offset", "def endexposureloop(self):\n self.max_exposures = self.current_exposure", "def do_pixels(self):\n self.do_yurt_pixels()\n self.do_petal_strip_pixels()\n self.do_vase_pixels()", "def run(self):\n openShutter = True\n actuateXed = False\n image_type = \"PPUMP\"\n\n wl = float(self.eo_config.get(\"PPUMP_WL\", 550))\n meas_flux = self.measured_flux(wl)\n seqno = 0\n for tokens in self.instructions:\n exptime = float(tokens[1])\n nframes = int(tokens[2])\n shifts = int(tokens[3])\n for iframe in range(nframes):\n self.image_clears()\n self.bias_image(seqno)\n self.take_image(seqno, exptime, openShutter, actuateXed,\n image_type)\n seqno += 1", "def pixelMove(*args, **kwargs)->None:\n pass", "def process(self, image):", "def update_image(self):\n if self.filenames:\n pos = self.slider.value()\n proj, flat, dark, theta = dx.read_aps_32id(self.filenames, proj=(pos, pos+1))\n if self.ffc_correction:\n image = proj[0,:,:].astype(np.float)/flat[0,:,:].astype(np.float)\n else:\n image = proj[0,:,:].astype(np.float)\n self.image_item.setImage(image)", "def warmup():\n print camera.CoolerOFF()\n camera.status.update()", "def punched(self):\n if not self.dizzy:\n self.dizzy = 1\n self.original = self.image\n Chimp.count_punch += 1", "def main():\n camera = picamera.PiCamera()\n camera.resolution = (RESOLUTIONX, RESOLUTIONY)\n camera.iso = 800\n time.sleep(2)\n while True:\n camera.capture('current-image.jpg')\n adapt_steering(navigation.get_xposition('current-image.jpg'))\n time.sleep(0.4)", "def __call__(self, frame_num):\n # propagate and set the density\n self.img.set_array(\n np.abs(self.quant_sys.propagate(10)) ** 2\n )\n return self.img,", "def continue_with_exposure(self):\r\n # Allocate space to give to scan_until_abort, and name the two\r\n # rows appropriately.\r\n self.data_pair = self.cam.get_new_array(n_images=2)\r\n self.pump_probe_data = self.data_pair[0]\r\n self.probe_only_data = self.data_pair[1]\r\n # Keep track of which image will be updated next\r\n self.next_data_has_pump = True\r\n\r\n # Tell self.thread what to do when the camera has new images\r\n self.cam.new_images.connect(self.send_new_images)\r\n\r\n # Get the current array of wavelengths from cam\r\n self.wavelen_arr = self.cam.get_wavelen_array()\r\n\r\n # Queue a call to cam.scan_until_abort\r\n self.startAcq.emit(self.data_pair)\r\n\r\n # Tell listeners (plotting widgets) to start displaying data too\r\n self.startDisplay.emit()", "def run_observation(self):\n\n self._generate_direct_image() # to calibrate x_ref and y_ref\n\n num_frames = len(self.exp_start_times)\n progress = Progress(num_frames)\n self.progess = progress\n\n progress_line = 'Generating frames 0/{} done'.format(num_frames)\n progress.print_status_line(progress_line)\n progress.progress_line = progress_line\n\n for i, start_time in enumerate(self.exp_start_times):\n filenum = i + 1\n self._generate_exposure(start_time, filenum)\n\n progress.increment()\n progress_line = 'Generating frames {}/{} done'.format(filenum,\n num_frames)\n progress.print_status_line(progress_line)\n\n # so it can be retreived by exposure_generator\n progress.progress_line = progress_line", "def __call__(self, img, target):\n if random.random() < 0.5:\n img = ImageEnhance.Brightness(img).enhance(0.5 + random.random())\n if random.random() < 0.5:\n img = ImageEnhance.Color(img).enhance(0.5 + random.random())\n if random.random() < 0.5:\n img = ImageEnhance.Contrast(img).enhance(0.5 + random.random())\n return img, target", "def do_stuff(self):\n self.create_tourism_raster()", "def sample_damaging(image):\r\n return crease_image(blotch_image(image, 100, True), 10, False)", "def grass_drass():", "def step(self,image):\r\n\r\n\t\tself.gray = image", "def punched(self):\n if not self.dizzy:\n self.dizzy= 1\n self.original= self.image", "def adjust_image_data(self):\r\n\r\n print('Adjusting image data: ')\r\n\r\n if self.removeFirstSequence: # used to remove the first trial from the sequence\r\n\r\n frames_per_rep = self.nFrames/self.nrepetitions\r\n\r\n self.imageData = self.imageData[frames_per_rep:, :, :]\r\n\r\n self.nFrames = self.imageData.shape[0]\r\n\r\n self.nrepetitions = int(self.nFrames/(self.period * self.framerate))\r\n\r\n self.times = np.arange(0, self.nFrames/self.framerate, 1.0/self.framerate)\r\n\r\n \r\n\r\n # first squeeze the image to 3d if it is 4d\r\n\r\n maxt = np.max(self.times) # find last image time\r\n\r\n sh = self.imageData.shape\r\n\r\n if len(sh) == 4:\r\n\r\n self.imageData = self.imageData.squeeze()\r\n\r\n sh = self.imageData.shape\r\n\r\n dt = np.mean(np.diff(self.times)) # get the mean dt\r\n\r\n n_Periods = int((maxt+dt)/self.period) # how many full periods in the image set - include the first?\r\n\r\n if self.nrepetitions > 0 and self.nrepetitions < n_Periods:\r\n\r\n n_Periods = self.nrepetitions\r\n\r\n n_PtsPerCycle = int(np.floor(self.period/dt)); # estimate image points in a stimulus cycle\r\n\r\n ndt = self.period/n_PtsPerCycle\r\n\r\n self.imageData = self.imageData[range(0, n_Periods*n_PtsPerCycle),:,:] # reduce to only what we need\r\n\r\n print (' Adjusted image info')\r\n\r\n print (\" # Periods: %d Pts/cycle: %d Cycle dt %8.4fs (%8.3fHz) Cycle: %7.4fs\" %(n_Periods, n_PtsPerCycle, ndt, 1.0/ndt, self.period))\r\n\r\n self.print_image_info()", "def _do_expose_loop(self, fitsfile, seconds):\n log.debug(f\"Starting expose loop with {self.max_exposures} exposures\")\n while (self.current_exposure is not None and \n self.current_exposure < self.max_exposures):\n self.current_exposure += 1\n self.Expose(fitsfile, seconds)\n while self.process and self.process.poll() is None:\n sleep(5)\n if not self.process or self.process.returncode != 0:\n break\n \n self.current_exposure = None\n self.max_exposures = None", "def on_run(self):\n self.set_illumination({'mode': 'breathe'})", "def _generate_exposure(self, expstart, number):\n\n index_number = number - 1 # for zero indexing\n\n filename = '{:04d}_raw.fits'.format(number)\n\n exp_gen = ExposureGenerator(self.detector, self.grism, self.NSAMP,\n self.SAMPSEQ, self.SUBARRAY,\n self.planet, filename, expstart)\n\n if not self.spatial_scan:\n self.sample_rate = 1 * u.year # high number reverts to read times\n\n _, sample_mid_points, sample_durations, read_index = \\\n exp_gen._gen_scanning_sample_times(self.sample_rate)\n\n time_array = (sample_mid_points + expstart).to(u.day)\n\n if self.transmission_spectroscopy:\n star_norm_flux = self.generate_lightcurves(time_array)\n planet_depths = 1 - star_norm_flux\n else:\n planet_depths = None\n\n # x shifts - linear shift with exposure, second exposure shifted by\n # x_shifts, direct image and first exp will match.\n x_ref = self._try_index(self.x_ref, index_number)\n y_ref = self._try_index(self.y_ref, index_number)\n sky_background = self._try_index(self.sky_background, index_number)\n\n # X and Y Shifts\n x_ref += self.x_shifts * index_number\n y_ref += self.y_shifts * index_number\n x_jitter = self.x_jitter\n y_jitter = self.y_jitter\n\n if self._visit_trend:\n scale_factor = self._visit_trend.get_scale_factor(index_number)\n else:\n scale_factor = None\n\n if self.spatial_scan:\n exp_frame = exp_gen.scanning_frame(\n x_ref, y_ref, x_jitter, y_jitter,\n self.wl, self.stellar_flux, planet_depths,\n self.scan_speed, self.sample_rate, sample_mid_points,\n sample_durations, read_index, ssv_generator=self.ssv_gen,\n noise_mean=self.noise_mean, noise_std=self.noise_std,\n add_flat=self.add_flat, add_dark=self.add_dark,\n scale_factor=scale_factor, sky_background=sky_background,\n cosmic_rate=self.cosmic_rate,\n add_gain_variations=self.add_gain_variations,\n add_non_linear=self.add_non_linear,\n clip_values_det_limits=self.clip_values_det_limits,\n add_read_noise=self.add_read_noise,\n add_stellar_noise=self.add_stellar_noise,\n add_initial_bias=self.add_initial_bias,\n progress_bar=self.progess,\n threads=self.threads\n )\n else:\n exp_frame = exp_gen.staring_frame(\n x_ref, y_ref, x_jitter, y_jitter,\n self.wl, self.stellar_flux, planet_depths,\n sample_mid_points, sample_durations, read_index,\n noise_mean=self.noise_mean, noise_std=self.noise_std,\n add_flat=self.add_flat, add_dark=self.add_dark,\n scale_factor=scale_factor, sky_background=sky_background,\n cosmic_rate=self.cosmic_rate,\n add_gain_variations=self.add_gain_variations,\n add_non_linear=self.add_non_linear,\n clip_values_det_limits=self.clip_values_det_limits,\n add_read_noise=self.add_read_noise,\n add_stellar_noise=self.add_stellar_noise,\n add_initial_bias=self.add_initial_bias,\n progress_bar=self.progess,\n threads=self.threads\n )\n\n exp_frame.generate_fits(self.outdir, filename, ldcoeffs=self.ldcoeffs)\n\n return exp_frame", "def process_walked_event(self, event):\n sprite = self.sprites[event.id][1]\n old_point = self.sprites[event.id][0]\n self.sprites[event.id][0] = event.point\n self.img[event.point.y, event.point.x] = self.sprite_colors[sprite]\n self.img[old_point.y, old_point.x] = self.sprite_colors[\"Background\"]", "def main():\n folder = \"D:\\\\Noam10\\\\Documents\\\\Documents\\\\dither 2\"\n filename = \"kirigiri\"\n filetype = \".jpg\"\n input_file = folder + \"\\\\\" + filename + filetype\n for palette in paletteDict.keys():\n output_file = folder + \"\\\\\" + filename + \"(\" + palette + \").bmp\"\n Dither(input_file, output=output_file, palette=paletteDict[palette])\n print(output_file)", "def analysis_dFF_map(self):\r\n\r\n \r\n\r\n print ('Starting dF/F analysis:')\r\n\r\n self.print_image_info()\r\n\r\n # smoothwin = int(self.imageData.shape[1]/8.)\r\n\r\n # get the average image and the average of the whole image over time\r\n\r\n avgimg = np.mean(self.imageData, axis=0) # get mean image for reference later: average across all time\r\n\r\n \r\n\r\n mpl.figure(99)\r\n\r\n mpl.imshow(avgimg, vmin=0, vmax=np.max(np.max(avgimg, axis=0), axis=0))\r\n\r\n # self.meanimagevalue = np.mean(np.mean(avgimg, axis=1), axis=0)\r\n\r\n # self.stdimg = np.std(self.imageData, axis= 0) # and standard deviation\r\n\r\n imgdatasm = scipy.ndimage.filters.gaussian_filter(self.imageData,[0,2,2],order=0,output=None,mode='reflect',cval=0.0,truncate=4.0)\r\n # field correction: smooth the average image, subtract it from the imagedata, then add back the mean value\r\n avgimgsm = scipy.ndimage.filters.gaussian_filter(avgimg, 2, order=0, output=None, mode='reflect', cval=0.0, truncate=4.0)\r\n\r\n # avgimgsm = scipy.ndimage.filters.gaussian_filter(avgimg, smoothwin, order=0, output=None, mode='reflect', cval=0.0, truncate=4.0)\r\n\r\n #self.imageData = (self.imageData-avgimgsm)+ self.meanimagevalue\r\n\r\n mpl.figure(98)\r\n mpl.imshow(avgimgsm,vmin=0, vmax=np.max(np.max(avgimgsm, axis=0), axis=0))\r\n mpl.figure(97)\r\n mpl.imshow(np.mean(imgdatasm,axis=0))\r\n self.n_times = self.timebase\r\n\r\n periodsize = int(self.period*self.framerate)\r\n print('periodsize: ',periodsize)\r\n\r\n # windowsize = int(self.freqperiod*self.framerate) # window size for every response\r\n\r\n # r = range(0, self.imageData.shape[0], windowsize)\r\n\r\n sig = np.reshape(imgdatasm, (self.nrepetitions, periodsize, \r\n\r\n self.imageData.shape[1], self.imageData.shape[2]), order='C')\r\n\r\n delresp=np.zeros([19,256,256])\r\n repback = np.mean(sig[:,1:4,:,:],axis=1)\r\n resp = np.mean(sig[:,5:9,:,:],axis=1)\r\n for counter in range(19):\r\n delresp[counter,:,:]=(resp[counter,:,:]-repback[counter,:,:])/repback[counter,:,:]\r\n quot=np.mean(delresp,axis=0)\r\n quot=-quot\r\n print ('shape of quot: ', np.shape(quot))\r\n # quot=(resp-repback)/repback\r\n # quot[quot>0]=0\r\n # quot=-1000*quot\r\n\r\n mpl.figure(7)\r\n mpl.imshow(quot,cmap=mpl.cm.binary)\r\n mpl.colorbar()\r\n\r\n quotsm = scipy.ndimage.filters.gaussian_filter(quot, 3, order=0, output=None, mode='reflect', cval=0.0, truncate=4.0)\r\n mpl.figure(8)\r\n mpl.imshow(quotsm,cmap=mpl.cm.binary)\r\n mpl.colorbar()\r\n \r\n # bl = np.mean(sig[:, range(0, sig.shape[1], windowsize), :, :], axis=0)\r\n\r\n # bl = scipy.ndimage.filters.gaussian_filter(bl, smoothwin, order=0, output=None, mode='reflect', cval=0.0, truncate=4.0)\r\n\r\n\r\n\r\n # print (' windowsize: ', windowsize)\r\n\r\n # print (' periodsize: ', periodsize)\r\n # mc = matplotlib.cm\r\n\r\n # only use sequential maps here\r\n\r\n # clist = [mc.Reds, mc.YlOrBr, mc.Oranges, mc.Greens, mc.GnBu, mc.Blues, mc.RdPu, mc.Purples,mc.Reds,mc.Greens,mc.Blues,mc.Reds,mc.Reds,mc.Reds,mc.Reds]\r\n # clist2 = ['red', 'orange', 'yellow', 'green', 'blue', 'indigo', 'violet', 'black','red','purple','green','blue','red','red','red','red']\r\n\r\n cs = {}\r\n\r\n # sigd = np.zeros((bl.shape[0], sig.shape[2], sig.shape[3]))\r\n# \r\n # localmax = {}\r\n\r\n # sigmax = 0.\r\n# \r\n # kernel = np.ones((5, 5))\r\n\r\n # psf = kernel / np.sum(kernel)\r\n\r\n # compute dF/F, and get maximum over all frequencies\r\n\r\n print (' sig shape: ', sig.shape)\r\n\r\n # print (' bl shape: ', bl.shape)\r\n\r\n # smax = np.zeros(bl.shape[0])\r\n\r\n # for i in range(bl.shape[0]):\r\n\r\n # sigd[i] = (np.mean(np.max(sig[:,range(i*windowsize, i*windowsize+windowsize),:,:], axis=0), axis=0) - bl[i,:,:])/bl[i,:,:]\r\n\r\n # sigd[i] = sigd[i]**2.0\r\n\r\n # smooth\r\n\r\n #sigd[i] = scipy.ndimage.filters.gaussian_filter(sigd[i], 1., order=0, output=None, mode='reflect', cval=0.0, truncate=4.0)\r\n\r\n # deconvolve\r\n\r\n # sigd[i] = restoration.richardson_lucy(sigd[i], psf, 5)\r\n\r\n# sm = sigd[i].max().max()\r\n\r\n# if sm > sigmax:\r\n\r\n# sigmax = sm\r\n\r\n# smax[i] = sm\r\n\r\n# print( ' i, sm: ', i, sm)\r\n\r\n# # now process for display\r\n\r\n# print (' sigd shape: ', sigd.shape)\r\n\r\n# wdat = np.mean(sig, axis=0)\r\n\r\n# wds = wdat.shape\r\n\r\n# print('wdat shape: ', wds)\r\n\r\n# # print (range(int(wds[1]/2.), int(3.*wds[1]/4.)), range(int(wds[2]/2.), int(3.*wds[2]/4.)))\r\n\r\n# print( 'reduced shape: ', wdat[:,range(int(wds[1]/2.),int(3.*wds[1]/4.)),:][:,:,range(int(wds[2]/2.), int(3.*wds[2]/4.))].shape)\r\n\r\n# wp = wdat[:,range(int(wds[1]/2.),int(3.*wds[1]/4.)),:][:,:,range(int(wds[2]/2.), int(3.*wds[2]/4.))]\r\n\r\n# wp = np.mean(np.mean(wdat, axis=1), axis=1)\r\n\r\n# mpl.figure(1)\r\n\r\n# mpl.plot(np.linspace(0., len(wp)*1./self.framerate, num=len(wp)), wp)\r\n\r\n\r\n\r\n# mpl.figure(2)\r\n\r\n# for i in range(sigd.shape[0]):\r\n\r\n# sigd[i][sigd[i] < self.threshold*sigmax] = 0.\r\n\r\n# # find center of mass of areas above threshold\r\n\r\n# # mass = sigd[i].copy()\r\n\r\n# # mass[sigd[i] > 0.] = 1.\r\n\r\n# # structuring_element = [[0,1,0],[1,1,1],[0,1,0]]\r\n\r\n# # segmentation, segments = scipy.ndimage.label(mass, structuring_element)\r\n\r\n# # coords = scipy.ndimage.center_of_mass(sigd[i], segmentation, range(1,segments+1))\r\n\r\n# # xcoords = np.array([x[1] for x in coords])\r\n\r\n# # ycoords = np.array([x[0] for x in coords])\r\n\r\n# # cs[i] = (xcoords, ycoords)\r\n\r\n\r\n\r\n# # Calculating local maxima\r\n\r\n# lm = skif.peak_local_max(sigd[i], min_distance=2, threshold_rel=0.25, exclude_border=False, \r\n\r\n# indices=True, num_peaks=10, footprint=None, labels=None)\r\n\r\n# localmax[i] = [(m[0], m[1], sigd[i][(m[0], m[1])]) for m in lm]\r\n\r\n# # print ('i, local max: ',i, localmax)\r\n\r\n# mpl.subplot(5,5,i+1)\r\n# print ('shape of sigd: ',[np.shape(sigd),i])\r\n\r\n# imga1 = mpl.imshow(sigd[i], cmap=clist[i], vmin=0, origin='lower')\r\n\r\n# if len(localmax[i]) > 0:\r\n\r\n# max_fr = np.max([m[2] for m in localmax[i]])\r\n\r\n# else:\r\n\r\n# continue\r\n\r\n# scattersize = 30.\r\n\r\n# for k, lm in enumerate(localmax[i]):\r\n\r\n# mpl.scatter(lm[1], lm[0], marker='o', c=clist2[i], edgecolors='k',\r\n\r\n# s=scattersize*lm[2]/max_fr, linewidths=0.125, alpha=0.5)\r\n\r\n# mpl.subplot(6,5,i+15+1)\r\n\r\n# wr = range(i*windowsize, i*windowsize+windowsize)\r\n\r\n# # print (' wr: len, min max: ', len(wr), min(wr), max(wr))\r\n\r\n# wmax = 0.\r\n\r\n# for lmax in localmax[i]: # was xcoords\r\n\r\n# wave = wdat[wr, lmax[0],lmax[1]]\r\n\r\n# wdff = (wave-wave[0])/wave[0]\r\n\r\n# if np.max(wdff) > wmax:\r\n\r\n# wmax = np.max(wdff)\r\n\r\n# mpl.plot(np.linspace(0., len(wave)*1./self.framerate, num=len(wave)),\r\n\r\n# wdff, color=clist2[i])\r\n\r\n# mpl.ylim(-0.1*wmax, wmax)\r\n\r\n# fig = mpl.figure(3)\r\n\r\n# for i in range(sigd.shape[0]):\r\n\r\n# if len(localmax[i]) == 0:\r\n\r\n# continue\r\n\r\n# max_fr = np.max([m[2] for m in localmax[i]])\r\n\r\n# for lm in localmax[i]:\r\n\r\n# mpl.scatter(lm[1], lm[0], marker='o', c=clist2[i], \r\n\r\n# s=scattersize*lm[2]/max_fr, alpha=0.5, edgecolors='k')\r\n\r\n# mpl.ylim(0, sigd.shape[2])\r\n\r\n# mpl.xlim(0, sigd.shape[1])\r\n\r\n# mpl.axis('equal')\r\n\r\n mpl.show()\r\n\r\n print (' DF/F analysis finished.\\n')", "def expose_test(self):\n with self.lock:\n self.dark = 1\n self.tstart = time.time()\n self.timestamp = time.strftime(\"%Y-%m-%dT%H:%M:%S\", time.localtime(self.tstart))\n imagesize = (self.expArea[3] - self.expArea[1],\n self.expArea[2] - self.expArea[0])\n self.data = np.ones(shape=imagesize, dtype=np.uint16)\n self.tend = time.time()", "def __init__(self, inifile, dry_run, output):\n\n config = ConfigParser()\n config.read(inifile)\n sequence = config['dithersequence']\n\n # Set up the output.\n self._output = output\n\n # Set up the file type and exposure sequence.\n self._location = sequence['location']\n self._filetype = sequence['filetype']\n self._date = sequence['date']\n self._exposures = [int(e) for e in sequence['exposures'].split()]\n\n if 'coordinates' not in config:\n raise ValueError('no coordinates set for dither!')\n \n coords = config['coordinates']\n self._dithertype = coords['dithertype']\n \n self._wcs = fits.getdata(coords['wcsfile'], 2)\n self._wcs = self._wcs[np.argsort(self._wcs['mjd_obs'])]\n self._central_exposure = int(sequence['centralexposure'])\n\n if coords['dithertype'] == 'telescope':\n fadir = coords['fiberassigndir']\n self._ditherfa = fits.getdata(os.path.join(\n fadir, 'fiberassign-%s.fits' % coords['ditheredtilenum']))\n self._unditherfa = fits.getdata(os.path.join(\n fadir, 'fiberassign-%s.fits' % coords['unditheredtilenum']))\n expnum = [int(fn.split('-')[1]) for fn in self._wcs['filename']]\n centralind = expnum.index(self._central_exposure)\n self._central_wcs = self._wcs[centralind]\n\n # Set the Tile ID for the output metadata.\n self._tileid = coords['unditheredtilenum']\n else:\n raise ValueError('not implemented')\n\n # Extract the list of exposures on disk.\n self._exposure_files = self._getfilenames()\n\n if not dry_run:\n # Construct fiber output.\n self._exposure_table = self._buildtable()", "def phot_aperture(input_file):\n #set the original directory\n original_path = os.getcwd()\n save_path = input_file['save_path']\n planet = input_file['exoplanet']\n #radii = np.arange(input_file['apertures'][0],input_file['apertures'][1],0.1)\n radii = np.array(input_file['apertures'])\n #change to save data reduction directory\n os.chdir(save_path)\n if not os.path.exists('phot_results'):\n os.makedirs('phot_results')\n tempo = time.time()\n print 'Starting aperture photometry'\n print 'Saving results on: '+save_path+'/phot_results/'\n \n #check the number of objects to make the photometry\n N_obj = len(input_file['pxpositions'])/2.\n print 'Number of objects = ',N_obj\n positions = [] #create the positions variable (X,Y) in pixels unit on the CCD\n for i in range(len(input_file['pxpositions'])):\n if i % 2 == 0: #if the number is a even (or not a odd), the turple is created\n positions.append((input_file['pxpositions'][i],input_file['pxpositions'][i+1]))\n print 'Radius from ',radii[0],' to ',radii[-1],'\\n'\n \n skysection = input_file['skysection']\n skysection[0] = int(skysection[0])\n skysection[1] = int(skysection[1])\n \n images = sorted(glob.glob('AB'+planet+'*.fits'))\n for radius in radii:\n flux_data = []\n for i in range(len(images)):\n im = fits.getdata(images[i],header=False)\n im = array(im,dtype='Float64')\n \n # ERROR\n #Traceback (most recent call last):\n # File \"ExoTRed.py\", line 105, in <module>\n # exotred.phot_aperture(input_file)\n # File \"./sources/ExoTRed_core.py\", line 637, in phot_aperture \n # File \"/home/walter/bin/anaconda3/envs/iraf27/lib/python2.7/site-packages/photutils/background/background_2d.py\", line 329, in __init__\n # self._calc_bkg_bkgrms()\n # File \"/home/walter/bin/anaconda3/envs/iraf27/lib/python2.7/site-packages/photutils/background/background_2d.py\", line 686, in _calc_bkg_bkgrms\n # bkg = self._interpolate_meshes(self._bkg1d)\n # File \"/home/walter/bin/anaconda3/envs/iraf27/lib/python2.7/site-packages/photutils/background/background_2d.py\", line 575, in _interpolate_meshes\n # f = ShepardIDWInterpolator(yx, data)\n # File \"/home/walter/bin/anaconda3/envs/iraf27/lib/python2.7/site-packages/photutils/utils/interpolation.py\", line 138, in __init__\n # raise ValueError('The number of values must match the number '\n # ValueError: The number of values must match the number of coordinates.\n\n # bkg = background.background_2d.Background2D(im,tuple(skysection))\n # bkg_data = bkg.background\n # bkg_rms = bkg.background_rms\n\n # phot_table = aperture_photometry(im - bkg_data, CircularAperture(positions, radius),\n # error=bkg_rms, method ='center')#,effective_gain=float(input_file['gain']))\n ####### SUBSTITUTE ROUTINE\n window = 100\n sky_size = im.shape\n sky_mean = float(np.median(im[int(skysection[1]-window):int(skysection[1]+window),int(skysection[0]-window):int(skysection[0]+window)]))\n bkg = np.random.poisson(sky_mean,sky_size)\n apertures = CircularAperture(positions, radius)\n phot_table = aperture_photometry(im, apertures, error=bkg)\n #######\n phot_table_flux = np.array([]) #saving results of aperture photometry\n for j in range(len(phot_table['aperture_sum'])):\n phot_table_flux = np.concatenate((phot_table_flux,np.array([phot_table['aperture_sum'][j]])),axis=0)\n phot_table_flux = np.concatenate((phot_table_flux,np.array([phot_table['aperture_sum_err'][j]])),axis=0)\n flux = np.concatenate((phot_table_flux,np.array([images[i]])),axis=0)\n # flux = [phot_table['aperture_sum'][0], phot_table['aperture_sum'][1],phot_table['aperture_sum_err'][0],\n # phot_table['aperture_sum_err'][1],images[i]]\n flux_data.append(flux)\n flux_data = DataFrame(flux_data)#,columns=['hoststar','refstar','hoststar_err','refstar_err','image'])\n flux_data.to_csv('./phot_results/'+planet+'_flux_radius_'+str(radius)+'.csv',index=False)\n use.update_progress((float(np.where(radii == radius)[0])+1.)/len(radii))\n print 'Time total = ',abs(time.time()-tempo)/60.,' minutes'\n os.chdir(original_path)", "def motion_extraction():\n # iterate through frames\n global frame_height, frame_width\n global limb_coords, init_coords\n frame_count = 0\n has_frames, frame = capture.read()\n\n while has_frames:\n img_out = frame.copy()\n img_out = insert_padding(img_out, 14*14, 12*14)\n\n if frame_count == 0:\n # change global values of height and width\n frame_height = frame_height + 14*14*2\n frame_width = frame_width + 12*14*2\n get_start_positions(img_out)\n img_out2 = segment_red(img_out, 200, 130)\n #erode(img_out2, 4, 6)\n remove_artifacts(img_out2)\n #enhance_contrast(img_out2)\n\n if frame_count > 0:\n get_motion(prev_frame, img_out2, frame_count)\n\n prev_frame = img_out2.copy()\n frame_count += 1\n has_frames, frame = capture.read()", "def main():\n test_image = load_image()\n\n pixelate_image(\n normalize_image(test_image)\n )\n pass", "def image_reduction(self, FF_name, BF_name, DF_name):\n print '--------------------------------------------------------------------- image_reduction'\n start_time = time.time() # Take time\n\n # Construct 3D cube of light-frames and flat-frames with dim(number, x-dim, y-dim):\n FF_files = sort(glob.glob('{}{}*'.format(self.path, FF_name)))\n BF_files = sort(glob.glob('{}{}*'.format(self.path, BF_name)))\n DF_files = sort(glob.glob('{}{}*'.format(self.path, DF_name)))\n\n # Load images into an array:\n if self.imgtype=='fits':\n FF_i = array([pyfits.getdata(str(files)) for files in FF_files])\n BF_i = array([pyfits.getdata(str(files)) for files in BF_files])\n DF_i = array([pyfits.getdata(str(files)) for files in DF_files])\n if self.imgtype=='rgb' : # Combine RGB:\n FF_i = sum(array([asarray(Image.open(str(files))) for files in FF_files]), axis=3)\n BF_i = sum(array([asarray(Image.open(str(files))) for files in BF_files]), axis=3)\n DF_i = sum(array([asarray(Image.open(str(files))) for files in DF_files]), axis=3) \n \n # Median correction images:\n FF = median(FF_i, axis=0) \n BF = median(BF_i, axis=0)\n DF = median(DF_i, axis=0)\n \n # Median correction images:\n BF = median(BF_i, axis=0) # Master bias \n # Correct for bias:\n FF_m_b = median(FF_i-BF, axis=0)\n FF_i_b = FF_i - BF\n DF_b_m = median(DF_i-BF, axis=0) # Dark Current\n # Scale dark to science frames:\n hdulist = fits.open('{}'.format(str(DF_files[0])))\n t_exp_DF = hdulist[0].header['EXPTIME'] # Exposure time (CHANGE HEADER NAME!!!)\n DF = self.t_exp_LF/t_exp_DF * DF_b_m # Master dark \n # Correct flats:\n FF = median(FF_i_b - DF, axis=0) # Master flat\n # Final correction:\n CF_i = (self.LF_i - BF - DF)/FF # Calibrated science images \n print 'Filter done in time: %0.5f s' %(time.time()-start_time)\n \n # Plot if you like:\n if self.plot==1:\n FITS(FF, 'linear', 3); plt.show() \n FITS(BF, 'linear', 3); plt.show()\n FITS(DF, 'linear', 3); plt.show() \n FITS(self.LF_i[0], 'linear', 2); plt.show()\n FITS(CF_i[0], 'linear', 2); plt.show()\n \n # Save if you like:\n if self.save==1:\n [imsave('{}CF_%02d.{}'.format(self.path, self.imgtype) %i, CF_i[i]) for i in range(self.n)]", "def __call__(self, rgbd_img_batch, color_id):\n color = vis.id2color[color_id]\n lower, upper = np.array(hsv_range[color])\n #print('debug: rgbd_img_batch shape: ', rgbd_img_batch.shape)\n\n color_imgs, depth_imgs = rgbd_img_batch[:, :3, :, :], rgbd_img_batch[:, 3, :, :]\n color_imgs = color_imgs.permute(0, 2, 3, 1) # NHWC\n color_imgs_np = color_imgs.cpu().numpy()\n\n n = color_imgs.shape[0]\n\n\n pcds = []\n for idx in range(n):\n depth_img = depth_imgs[idx]\n hsv_img = cv2.cvtColor(color_imgs_np[idx].astype('uint8'), cv2.COLOR_RGB2HSV)\n mask = cv2.inRange(hsv_img, lower, upper)\n mask = torch.from_numpy(mask)\n if self.use_cuda:\n mask = mask.cuda()\n mask_index = mask > 0\n \n #print('debug: {} points detected'.format(mask_index.sum().item()))\n if mask_index.sum() > 0:\n x_pcd = self.x_pix * depth_img / self.camera_f\n y_pcd = self.y_pix * depth_img / self.camera_f\n\n # apply mask\n x_pcd = -x_pcd[mask_index]\n y_pcd = -y_pcd[mask_index]\n z_pcd = -depth_img[mask_index]\n pcd = torch.stack([x_pcd, y_pcd, z_pcd], dim=0)\n pcd = self.camera_mat.mm(pcd) + self.camera_pos\n pcd = pcd.transpose(0, 1)\n pcd = sample_pcd(pcd, self.num_points)\n else:\n \"\"\"no points detected\"\"\"\n print('Debug: pix2pcd warning: no point detected')\n pcd = get_random_pcd(self.num_points)\n\n pcds.append(pcd)\n pcds = torch.stack(pcds, dim=0)\n return pcds", "def adjust(self, image):\n ...", "def generate_movie(filename, x_size=640, y_size=360, numframes=150, dpi=100):\n global timeflag\n timeflag = 1\n\n # Functions for red, green, and blue channels - where the magic happens!\n red_function = build_random_function(7, 9)\n green_function = build_random_function(7, 9)\n blue_function = build_random_function(7, 9)\n print \"red_function:\\t\" + str(red_function)\n print \"green_function:\\t\" + str(green_function)\n print \"blue_function:\\t\" + str(blue_function)\n\n for n in range(1, numframes+1):\n # Create image and loop over all pixels\n im = Image.new(\"RGB\", (x_size, y_size))\n pixels = im.load()\n for i in range(x_size):\n for j in range(y_size):\n x = remap_interval(i, 0, x_size, -1, 1)\n y = remap_interval(j, 0, y_size, -1, 1)\n t = remap_interval(n, 0, numframes, -1, 1)\n pixels[i, j] = (\n color_map(evaluate_random_function(red_function, x, y, t)),\n color_map(evaluate_random_function(green_function, x, y, t)),\n color_map(evaluate_random_function(blue_function, x, y, t))\n )\n im.save(\"movie_images/\"+'%03d'%n+\".png\")\n\n os.system(\"echo 'yes'|avconv -r 24 -i movie_images/%03d.png -vb 20M myart.mp4\")\n\n \"\"\"fig = plt.figure()\n ax = fig.add_subplot(111)\n ax.set_aspect('equal')\n ax.get_xaxis().set_visible(False)\n ax.get_yaxis().set_visible(False)\n\n im = Image.new(\"RGB\", (x_size, y_size))\n\n def update_img(n):\n # Functions for red, green, and blue channels - where the magic happens!\n red_function = build_random_function(7, 9)\n green_function = build_random_function(7, 9)\n blue_function = build_random_function(7, 9)\n\n # Create image and loop over all pixels\n im = Image.new(\"RGB\", (x_size, y_size))\n pixels = im.load()\n for i in range(x_size):\n for j in range(y_size):\n x = remap_interval(i, 0, x_size, -1, 1)\n y = remap_interval(j, 0, y_size, -1, 1)\n pixels[i, j] = (\n color_map(evaluate_random_function(red_function, x, y, n)),\n color_map(evaluate_random_function(green_function, x, y, n)),\n color_map(evaluate_random_function(blue_function, x, y, n))\n )\n im.save(\"test.png\")\n return im\n ani = animation.FuncAnimation(fig, update_img, numframes, interval=24) #TODO: FIX THIS\n writer = animation.writers['avconv'](fps=24)\n\n ani.save(filename, writer=writer, dpi=dpi)\"\"\"", "def timeWarp(destExposure, srcExposure, warpingControl):\n startTime = time.time()\n for nIter in range(1, MaxIter + 1):\n goodPix = afwMath.warpExposure(\n destExposure, srcExposure, warpingControl)\n endTime = time.time()\n if endTime - startTime > MaxTime:\n break\n\n return (endTime - startTime, nIter, goodPix)", "def __call__(self, images, targets):\n pass", "def prepare_output(self):\n self.output = np.copy(self.image)\n self._highlight_qr_codes()\n self._write_overlay_info()", "def Advance():\n warp.step()", "def doDrizSeparate(self, pars):\n \n # Start by applying input parameters to redefine\n # the output frame as necessary\n for p in self.assoc.parlist:\n\n # First do some cleaning up, in case you are restarting...\n fileutil.removeFile(p['outsingle'])\n if (p['outsweight'] != ''):\n fileutil.removeFile(p['outsweight'])\n\n # NB DO NOT USE \"tophat\" unless pixfrac is sufficiently\n # large (> sqrt(2))\n \n p['fillval'] = pars['fillval']\n \n # Pass in the new wt_scale value\n p['wt_scl'] = pars['wt_scl']\n\n if (p['single_driz_mask'] == None and self.static_mask != None):\n p['single_driz_mask'] = self.static_mask.getMask(p['image'].signature())\n \n # 'in_units' will always be counts given that the input is converted to 'electrons' in call cases\n p['in_units'] = 'counts'\n \n print(\"\\ndrizzle data='\"+p['data']+\"' outdata='\"+p['outsingle']+\"' outweig='\"+p['outsweight']+\n \"' in_mask='static_mask\"+\"' kernel='\"+p['kernel']+\n \"' outnx=\"+str(p['outnx'])+\" outny=\"+str(p['outny'])+\" xsh=\"+str(p['xsh'])+\" ysh=\"+str(p['ysh'])+\n \" scale=\"+str(p['scale'])+\" pixfrac=\"+str(p['pixfrac'])+\" rot=\"+str(p['rot'])+\n \" coeffs='\"+p['coeffs']+\"' wt_scl='\"+str(p['wt_scl'])+\"' align='center' shft_fr='output' shft_un='output'\"+\n \" out_un='\"+p['units']+\"' expkey='\"+\"EXPTIME\"+\"' fillval='\"+str(p['fillval'])+\"'\"+\n \" xgeoim='\"+p['xgeoim']+\"' ygeoim='\"+p['ygeoim']+\"'\\n\")\n\n # Perform separate drizzling now that all parameters have been setup...\n self.assoc.run(single=True,save=True,build=False)\n\n # Now that we are done with the static mask, delete it...\n del self.static_mask", "def extract(self, files):\n for i in range(len(files)):\n print(files[i])\n img = cv2.imread('{}/{}'.format('{}/{}/{}'.format(DIR_2DST_Mask, self.patient, self.plan), files[i]), 0)\n\n \"\"\"\n Find the indices of array elements that are non-zero, i.e,\n find the pixels' positions that represents the respiratory\n functions (pixels in the respiratory function are brighter).\n \"\"\"\n color_pts = np.argwhere(img > 70)\n\n \"\"\"\n Sorts the pixels according to their x coordenate.\n Obs: np.argwhere inverts x and y, it's like (y, x), because of it,\n the parameter of itemgetter is 1 (to get x coordinate)\n \"\"\"\n lcolor_pts = sorted(color_pts.tolist(), key=itemgetter(1))\n\n \"\"\"\n If there is no pixel representing the respiratory function\n (i.e., lighter pixel) it creates an empty image (without any\n respiratory function)\n \"\"\"\n if len(lcolor_pts) == 0:\n diaphragmatic_lvl = np.zeros((256, 50, 3), np.uint8)\n\n cv2.imwrite('{}/{}/{}/{}'.format(\n DIR_2DST_Diaphragm, patient, plan, files[i]), diaphragmatic_lvl)\n\n # file = open(\n # '{}/{}/{}/points.txt'.format(DIR_2DST_Diaphragm, self.patient, self.plan), 'a')\n # file.write(\"{}:{}\\n\".format(files[i], []))\n # file.close()\n\n continue\n\n # Reverse the coordinates and store the result in lordered_pts list\n lordered_pts = []\n for j in range(len(lcolor_pts)):\n lordered_pts.append(lcolor_pts[j][::-1])\n\n \"\"\"\n Convert pixels coordinates into a tuples and check which column\n has pixels that corresponding to diaphragmatic level\n Obs. There are some columns that doesnt have any pixel that\n correpond to diaphragmatic level.\n \"\"\"\n # Columns that have a pixel corresponding diaphragmatic level\n lcolumn_available = []\n for j in range(len(lordered_pts)):\n lordered_pts[j] = tuple(lordered_pts[j])\n lcolumn_available.append(lordered_pts[j][0])\n lcolumn_available = list(set(lcolumn_available))\n # print(\"Ordered points: \", lordered_pts)\n # print(\"Columns available: \", lcolumn_available)\n\n \"\"\"\n If there is not enough columns to build a respiratory pattern,\n create a blank image\n \"\"\"\n if len(lcolumn_available) < 20:\n diaphragmatic_lvl = np.zeros((256, 50, 3), np.uint8)\n cv2.imwrite('{}/{}/{}/{}'.format(\n DIR_2DST_Diaphragm, patient, plan, files[i]), diaphragmatic_lvl)\n continue\n\n \"\"\"\n If there are no pixel that corresponding diaphragmatic level in the\n first column, assign to it the value of the second y coordinate\n \"\"\"\n if lcolumn_available[0] is not 0:\n y = max(\n [x for x in lordered_pts if x[0] == lcolumn_available[0]],\n key=itemgetter(1))[1]\n lordered_pts.insert(0, (0, y))\n lcolumn_available.insert(0, 0)\n\n \"\"\"\n If there are no pixel that corresponding diaphragmatic level in the\n last column, assign to it the value of the penultimate y coordinate\n available\n \"\"\"\n if lcolumn_available[-1] is not 49:\n lordered_pts.append(\n (49, lordered_pts[len(lcolumn_available)][1]))\n lcolumn_available.append(49)\n\n \"\"\"\n Get the biggest y value in each column that represents the\n diaphragmatic level\n \"\"\"\n column = 0\n lcolumn = []\n ldiaphragm_pts = []\n for j in range(50):\n # Get the column's points\n lcolumn = [x for x in lordered_pts if x[0] == column]\n # print('{}: {}'.format(j, lcolumn))\n\n if len(lcolumn) > 0:\n ldiaphragm_pts.append(\n max(lcolumn, key=itemgetter(1))) # Get the biggest y\n else:\n # Get the y value from the previous column\n lcolumn_available.insert(column, column)\n ldiaphragm_pts.append((column, ldiaphragm_pts[-1][1]))\n column += 1\n lcolumn = []\n\n # Draw diaphragmatic level\n diaphragmatic_lvl = np.zeros((256, 50, 3), np.uint8)\n j = 0\n while(j < len(lcolumn_available) - 1):\n cv2.line(\n diaphragmatic_lvl,\n ldiaphragm_pts[j], ldiaphragm_pts[j + 1],\n (0, 0, 255), 1)\n j = j + 1\n\n lcolumn_available = []\n\n print(\"Diaphragmatic's points: \", ldiaphragm_pts)\n cv2.imshow('Diaphragmatic level', diaphragmatic_lvl)\n cv2.waitKey(0)\n cv2.destroyAllWindows()\n cv2.imwrite('{}/{}/{}/{}'.format(\n DIR_2DST_Diaphragm, patient, plan, files[i]), diaphragmatic_lvl)\n\n # file = open('{}/{}/{}/points.txt'.format(DIR_2DST_Diaphragm, self.patient, self.plan), 'a')\n # file.write(\"{}:{}\\n\".format(files[i], ldiaphragm_pts))\n # file.close()\n\n # return ldiaphragm_pts", "def sweep_image_model():\n for c1 in [4, 8, 16]:\n for c2 in [2, 4]:\n for c3 in [2, 4]:\n for c4 in [1, 2]:\n flags = flag_reader.read_flag()\n print(c1)\n flags.channel_list = c1 * np.array([1, c2, c2*c3, c2*c3*c4])\n print('channel list = ', flags.channel_list)\n flags.last_dim = flags.channel_list[-1]\n flags.model_name = flags.data_set + '_channel_' + str(flags.channel_list).replace('[','').replace(']','').replace(' ','_') + \\\n '_dim_last_' + str(flags.last_dim) + '_ind_' + str(flags.comp_ind) + \\\n '_lr_{}_decay_{}_reg_{}_bs_{}'.format(flags.lr, flags.lr_decay_rate, flags.reg_scale, flags.batch_size)\n print(flags.model_name)\n training_from_flag(flags)", "def pre_processing_image(img):\n\n #print(img.shape)\n # apply gamma correction and show the images\n #adjusted = adjust_gamma(img, gamma=0.65)\n\n adjusted = exposure.adjust_gamma(img, gamma=1.65)\n #print(adjusted.shape)\n\n # log transform of image\n\n logarithmic_corrected = exposure.adjust_log(adjusted, 1)\n #print(logarithmic_corrected.shape)\n\n # denoising\n #dst2 = cv2.fastNlMeansDenoisingColored(logarithmic_corrected, None, 10, 10, 7, 21)\n #print(dst2.shape)\n dst2 = logarithmic_corrected\n return dst2", "def exposure(frameType, expTime):\n\n blobEvent.clear() \n\n # set the specified frame type\n if frameType.lower() == 'light':\n ccd_frame[0].s = PyIndi.ISS_ON\n ccd_frame[1].s = PyIndi.ISS_OFF\n ccd_frame[2].s = PyIndi.ISS_OFF\n ccd_frame[3].s = PyIndi.ISS_OFF \n indiclient.sendNewSwitch(ccd_frame)\n elif frameType.lower() == 'bias':\n ccd_frame[0].s = PyIndi.ISS_OFF\n ccd_frame[1].s = PyIndi.ISS_ON\n ccd_frame[2].s = PyIndi.ISS_OFF\n ccd_frame[3].s = PyIndi.ISS_OFF \n indiclient.sendNewSwitch(ccd_frame)\n elif frameType.lower() == 'dark':\n ccd_frame[0].s = PyIndi.ISS_OFF\n ccd_frame[1].s = PyIndi.ISS_OFF\n ccd_frame[2].s = PyIndi.ISS_ON\n ccd_frame[3].s = PyIndi.ISS_OFF \n indiclient.sendNewSwitch(ccd_frame)\n elif frameType.lower() == 'flat':\n ccd_frame[0].s = PyIndi.ISS_OFF\n ccd_frame[1].s = PyIndi.ISS_OFF\n ccd_frame[2].s = PyIndi.ISS_OFF\n ccd_frame[3].s = PyIndi.ISS_ON \n indiclient.sendNewSwitch(ccd_frame)\n\n # set the value for the next exposure\n ccd_exposure[0].value=expTime\n\n indiclient.sendNewNumber(ccd_exposure)\n\n # wait for the exposure\n blobEvent.wait()\n\n for blob in ccd_ccd1:\n # pyindi-client adds a getblobdata() method to IBLOB item\n # for accessing the contents of the blob, which is a bytearray in Python\n image_data=blob.getblobdata()\n\n # write the byte array out to a FITS file\n global imgNum\n global imgName\n imgNum += 1\n fileName = fileDir+'raw-'+str(imgNum).zfill(8)+'.fits'\n f = open(fileName, 'wb')\n f.write(image_data)\n f.close()\n imgName = fileName\n \n return fileName", "def forward_test(self, img, img_metas, **kwargs):", "def main():\n original = SimpleImage('images/mt-rainier.jpg')\n original.show()\n reflected = make_reflected('images/mt-rainier.jpg')\n reflected.show()", "def run(self):\n\n # need to think about outpath\n\n # Make sure all files are here and okay...\n\n if not self.config.galfile_pixelized:\n raise ValueError(\"Code only runs with pixelized galfile.\")\n\n self.config.check_files(check_zredfile=True, check_bkgfile=True, check_bkgfile_components=True, check_parfile=True, check_zlambdafile=True)\n\n # Compute the border size\n\n self.config.border = self.config.compute_border()\n\n self.config.d.hpix = [self.pixel]\n self.config.d.nside = self.nside\n self.config.d.outbase = '%s_%d_%05d' % (self.config.outbase, self.nside, self.pixel)\n\n # Do the run\n self.config.start_file_logging()\n self.config.logger.info(\"Running redMaPPer on pixel %d\" % (self.pixel))\n\n firstpass = RunFirstPass(self.config)\n\n if not os.path.isfile(firstpass.filename):\n firstpass.run()\n firstpass.output(savemembers=False, withversion=False)\n else:\n self.config.logger.info(\"Firstpass file %s already present. Skipping...\" % (firstpass.filename))\n\n self.config.catfile = firstpass.filename\n\n # Clear out the firstpass memory\n del firstpass\n\n like = RunLikelihoods(self.config)\n\n if not os.path.isfile(like.filename):\n like.run()\n like.output(savemembers=False, withversion=False)\n else:\n self.config.logger.info(\"Likelihood file %s already present. Skipping...\" % (like.filename))\n\n self.config.catfile = like.filename\n\n # Clear out the likelihood memory\n del like\n\n perc = RunPercolation(self.config)\n\n if not os.path.isfile(perc.filename):\n perc.run()\n perc.output(savemembers=True, withversion=False)\n else:\n self.config.logger.info(\"Percolation file %s already present. Skipping...\" % (perc.filename))\n\n self.config.stop_file_logging()", "def use_effect(self):\n if self.preview_name in FILTERS:\n photo = Image.open(self.path.url[1:])\n preview = photo.filter(FILTERS.get(self.preview_name))\n preview.save(self.path.url[1:])", "def update(self):\r\n\r\n # Update the vision frames in the system\r\n self._system.update()\r\n\r\n # Create blank PIL images to hold the video streams\r\n layered = PIL.Image.new('RGBA', (400, 400))\r\n stacked = PIL.Image.new('RGBA', (200, 800))\r\n control = PIL.Image.new('RGBA', (600, 800))\r\n\r\n focalpoint = self._system[self._appString[\"device\"].get()].focalpoint()\r\n # print(focalpoint)\r\n\r\n # Get each vision key and vision for the selected device\r\n visionList = [(visionKey, vision) for visionKey, vision in self._system[self._appString[\"device\"].get()]]\r\n\r\n # Loop through each vision in the vision list\r\n for i, (visionKey, vision) in enumerate(visionList):\r\n\r\n # Grab the frames from the vision when it is \"curr\"\r\n frameList = [frame for frameKey, frame in vision if frameKey==self._appString[\"frame\"].get()]\r\n\r\n # Loop through each frame in the frame list\r\n for frame in frameList:\r\n\r\n # Get the properties and turn the image into RGBA\r\n ratio, size = vision.properties()\r\n rgbFrame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGBA)\r\n\r\n # print(rgbFrame.shape)\r\n width, height, channels = rgbFrame.shape\r\n\r\n # Paste the images together in layered\r\n\r\n imgFrame = PIL.Image.fromarray(cv2.resize(rgbFrame, (int(400 * ratio), int(400 * ratio))))\r\n layered.paste(imgFrame, (int(200 * (1 - ratio)), int(200 * (1 - ratio))))\r\n\r\n # layered.paste(imgFrame, (int(200 * (1 - ratio) + focalpoint[0] * (200 / width)), int(200 * (1 - ratio) - focalpoint[1] * (200 / height))))\r\n # layered.paste(imgFrame, (int(200 * (1 - ratio) + focalpoint[0] * (200 // width)), int(200 * (1 - ratio) - focalpoint[1] * (200 // height))))\r\n # layered.paste(imgFrame, (int(200 * (1 - ratio) + focalpoint[0] * (ratio ** -1)), int(200 * (1 - ratio) - focalpoint[1] * (ratio ** -1))))\r\n # layered.paste(imgFrame, (int(200 * (1 - ratio) + focalpoint[0] * (200/width) / ratio), int(200 * (1 - ratio) - focalpoint[1] * (200/height) / ratio)))\r\n # layered.paste(imgFrame, (int(200 * (1 - ratio) + focalpoint[0] * (200 / width)), int(200 * (1 - ratio) - focalpoint[1] * (200 / height))))\r\n # layered.paste(imgFrame, (int(200 * (1 - ratio) + focalpoint[0] * (ratio ** -1) / 200), int(200 * (1 - ratio) - focalpoint[1] * (ratio ** -1) / 200)))\r\n # layered.paste(imgFrame, (int(200 * (1 - ratio) + focalpoint[0] * (400//width * (1- ratio))), int(200 * (1 - ratio) - focalpoint[1] * (400//height * (1 - ratio)))))\r\n\r\n # Paste the images together in stacked\r\n imgFrame = PIL.Image.fromarray(cv2.resize(rgbFrame, (200, 200)))\r\n stacked.paste(imgFrame, (0, 200 * i))\r\n\r\n # Add the stacked image to the canvas\r\n self._pilFrames[\"stacked\"] = PIL.ImageTk.PhotoImage(image=stacked)\r\n self._appCanvas[\"stacked\"].create_image(100, 0, image=self._pilFrames[\"stacked\"], anchor=tkinter.NW)\r\n\r\n # Add the layered image to the canvas\r\n self._pilFrames[\"layered\"] = PIL.ImageTk.PhotoImage(image=layered)\r\n self._appCanvas[\"layered\"].create_image(0, 0, image=self._pilFrames[\"layered\"], anchor=tkinter.NW)\r\n\r\n # Add the control image to the canvas\r\n imgFrame = cv2.cvtColor(self._system[self._appString[\"device\"].get()][self._appString[\"vision\"].get()][self._appString[\"frame\"].get()], cv2.COLOR_BGR2RGBA)\r\n control = PIL.Image.fromarray(cv2.resize(imgFrame, (600, 600)))\r\n self._pilFrames[\"control\"] = PIL.ImageTk.PhotoImage(image=control)\r\n self._appCanvas[\"control\"].create_image(100, 90, image=self._pilFrames[\"control\"], anchor=tkinter.NW)\r\n\r\n # Continue to update with a delay of 15\r\n self.after(15, self.update)", "def update(self):\n if (self.j + self.step >= self.image.shape[0]) and (self.i + self.step >= self.image.shape[1]):\n self.no_more_crops = True\n elif self.i + self.step >= self.image.shape[1]:\n self.i = 0\n self.j += self.step\n else:\n self.i += self.step", "def transform(self, previousimage):", "def step(self, amt=1):\n \n # For checking if all the animations have their framse looked at\n #activewormind = [i for i, x in enumerate(self._idlelist) if x == False]\n #print \"Worm {} at {:5g}\".format(activewormind, 1000*(time.time() - starttime))\n # save times activated for each worm \n [self.timedata[i].append(1000*(time.time() - starttime)) for i, x in enumerate(self._idlelist) if x == False]\n \n #self._led.buffer = [0] * 480\n self._led.pixheights = [-100] * self._led.numLEDs\n #print type(self._led.buffer)\n for ledcopy in self._ledcopies:\n # self._led.buffer = map(ixor, self._led.buffer, ledcopy.buffer)\n # use pixheights but assume all buffers same size\n # print ledcopy.driver[0].pixheights\n for pix in range(self._led.numLEDs):\n #for ledcopy in self._ledcopies:\n if self._led.pixheights[pix] == ledcopy.driver[0].pixheights[pix]:\n for i in range(3):\n self._led.buffer[3*pix + i] ^= ledcopy.buffer[3*pix + i]\n elif self._led.pixheights[pix] < ledcopy.driver[0].pixheights[pix]:\n for i in range(3):\n self._led.buffer[3*pix + i] = ledcopy.buffer[3*pix + i]\n self._led.pixheights[pix] = ledcopy.driver[0].pixheights[pix] \n self._step += 1", "def process(image):\n pass", "def cleanup(self):\n self.subpixel, self.pixel = self.stepup(self.subpixel, self.pixel, AxisDistance.pixelsize)\n self.pixel, self.tile = self.stepup(self.pixel, self.tile, AxisDistance.tilesize)", "def process_died_event(self, event):\n old_point = self.sprites[event.id][0]\n self.img[old_point.y, old_point.x] = self.sprite_colors[\"Background\"]\n del self.sprites[event.id]", "def aperphot(fn, timekey=None, pos=[0,0], dap=[2,4,6], mask=None, verbose=False, nanval=999, resamp=None, retfull=False):\n # 2009-09-14 10:49 IJC: Created\n # 2010-01-15 14:20 IJC: Added numpy \"_string\" check\n # 2011-12-29 12:01 IJMC: Added peak pixel values to photometry report.\n # 2012-01-25 11:26 IJMC: Adding \"resamp\" option -- thanks to\n # K. Stevenson and J. Harrington of UCF for\n # the suggestion.\n # 2012-02-26 11:53 IJMC: Now return 'ntarg' and 'nsky' -- number of pixels used.\n # 2012-06-07 08:27 IJMC: 'peak' values are now corrected for the\n # resampling factor.\n # 2012-07-03 10:35 IJMC: Fixed a key bug: frames were not\n # correctly background-subtracted when\n # applying partial-pixel resampling.\n # 2012-10-19 13:41 IJMC: Documented 'retfull' option; changed default.\n # 2013-03-20 09:21 IJMC: More error-checking for saving header\n # keywords. Thanks to A. Weigel @\n # ETH-Zurich for catching this!\n\n from numpy import meshgrid, median,isfinite,sort,ndarray,string_\n import numpy as np\n import pyfits\n #from analysis import fixval\n from os import path\n from scipy import interpolate\n\n thisobs = phot()\n x0, y0 = pos\n dap_targ, dap_skyinner, dap_skyouter = dap\n if resamp is None or resamp<1:\n resamp = 1\n else:\n resamp = float(resamp)\n \n # Determine size:\n if isinstance(fn,str):\n nx = pyfits.getval(fn, 'NAXIS1')\n ny = pyfits.getval(fn, 'NAXIS2')\n elif isinstance(fn,ndarray):\n nx,ny = fn.shape\n\n nx0, ny0 = nx, ny\n nx = ((nx - 1)*resamp + 1.) # Avoid resampling at pixel locations\n ny = ((ny - 1)*resamp + 1.) # outside the original boundaries.\n\n # Generate or load masks:\n if mask==None:\n xx,yy = meshgrid(np.arange(ny)/resamp, np.arange(nx)/resamp)\n mask_targ = makemask(xx, yy, (x0, y0, dap_targ))\n mask_s1 = makemask(xx, yy, (x0,y0, dap_skyinner))\n mask_s2 = makemask(xx, yy, (x0,y0, dap_skyouter))\n mask_sky = mask_s2 - mask_s1\n else:\n mask_targ = mask==1\n mask_sky = mask==2\n if resamp>1:\n print \"In aperphot, resamp>1 and user-specified mask passed in... beware!\"\n\n # Load data frame:\n thisobs = phot()\n if isinstance(fn,ndarray):\n frame = fn\n elif isinstance(fn, str) or isinstance(fn,string_):\n if not path.isfile(fn):\n print \"file %s not found! exiting...\" % fn\n return thisobs\n frame = pyfits.getdata(fn)\n fixval(frame, nanval)\n\n # Resample data frame\n if resamp>1:\n frame0 = frame.copy()\n xx0 = range(nx0)\n yy0 = range(ny0)\n x1,y1 = np.arange(nx)/resamp, np.arange(ny)/resamp\n rectspline = interpolate.fitpack2.RectBivariateSpline(xx0, yy0, frame0, kx=1, ky=1, s=0)\n frame = rectspline(x1, y1)\n\n #from pylab import *\n #pdb.set_trace()\n # Measure background and aperture photometry\n thisbg, thisebg = estbg(frame, mask=mask_sky, plotalot=verbose, rout=[3,99])\n thisphot = (mask_targ*(frame - thisbg)).sum() /resamp/resamp\n peak = frame.max()\n peak_targ = (mask_targ * frame).max()\n peak_annulus = (mask_sky * frame).max()\n\n thisobs.bg=thisbg\n thisobs.ebg=thisebg\n thisobs.bgstr='phot.estbg: SDOM on bg histogram mean & dispersion after outlier rejection'\n thisobs.phot=thisphot\n thisobs.photstr='by-hand background-subtracted aperture photometry'\n thisobs.ntarg = mask_targ.sum()/resamp/resamp\n thisobs.nsky = mask_sky.sum()/resamp/resamp\n\n thisobs.peak = peak\n thisobs.peak_targ = peak_targ\n thisobs.peak_annulus = peak_annulus\n thisobs.peakstr = 'peak pixel value in frame'\n thisobs.peak_targstr = 'peak pixel value in target aperture'\n thisobs.peak_annulusstr = 'peak pixel value in sky annulus'\n thisobs.position = pos\n thisobs.positionstr = 'user-specified, zero-indexed pixel coordinates.'\n if isinstance(fn, str):\n header = pyfits.getheader(fn)\n if not timekey==None:\n if timekey in header: \n thisobs.time=header['timekey']\n thisobs.timestr='heliocentric modified julian date'\n if 'object' in header: thisobs.object = header['object']\n if 'exptime' in header: thisobs.exptime = header['exptime']\n thisobs.aper = dap\n thisobs.aperstr = 'target, inner, outer aperture diameters, in pixels.'\n thisobs.filename=fn\n thisobs.resamp = resamp\n if retfull:\n thisobs.mask_targ = mask_targ\n thisobs.mask_sky = mask_sky\n thisobs.frame = frame\n\n if verbose:\n from pylab import figure, colorbar\n from nsdata import imshow\n figure(); imshow(frame*mask_targ); colorbar()\n figure(); imshow(frame*mask_sky); colorbar()\n\n return thisobs", "def start(self):\n self.frame = 0\n self._init_level(1)\n self.reward = 0\n self.pcontinue = 1\n self.ghost_speed = self.ghost_speed_init\n return self._make_image(), self.reward, self.pcontinue", "def hook_func(module, input, output):\n image_name = get_image_name_for_hook(module)\n data = output.clone().detach()\n data = data.permute(1, 0, 2, 3)\n vutil.save_image(data, image_name, pad_value=0.5)", "def augment(self, image):\n pass", "def post_process(self):\n\t\ti_s = 0\n\t\ti_e = 0\n\t\tif self.trans_t_dict[0][1] == 0:\n\t\t\tif len(self.noise_itv) == 0:\n\t\t\t\tself.trans_t_dict[0][1] = self.fake_start_offset\n\t\t\telse:\n\t\t\t\tself.trans_t_dict[0][1] = self.noise_itv[0][1] # start_offset\n\t\t\tself.trans_t_dict[0][2] = 0.1\n\t\tif self.trans_t_dict[len(self.trans_t_dict)-1][1] == 0:\n\t\t\tif len(self.noise_itv) == 0:\n\t\t\t\tself.trans_t_dict[len(self.trans_t_dict)-1][1] = self.fake_end_offset\n\t\t\telse:\n\t\t\t\tself.trans_t_dict[len(self.trans_t_dict)-1][1] = self.noise_itv[-1][0] # end_offset\n\t\t\tself.trans_t_dict[len(self.trans_t_dict)-1][2] = 0.1\n\n\t\twhile i_s < len(self.trans_t_dict):\n\t\t\twhile i_s < len(self.trans_t_dict) and self.trans_t_dict[i_s][1] != 0:\n\t\t\t\ti_s += 1\n\t\t\tif i_s == len(self.trans_t_dict):\n\t\t\t\ti_e = len(self.trans_t_dict)\n\t\t\tif i_s < len(self.trans_t_dict):\n\t\t\t\ti_s -= 1\n\t\t\t\ti_e = i_s + 1\n\t\t\t\twhile i_e < len(self.trans_t_dict) and self.trans_t_dict[i_e][1] == 0:\n\t\t\t\t\ti_e += 1\n\t\t\t\tif i_e == len(self.trans_t_dict):\n\t\t\t\t\tbreak\n\n\t\t\t\t# incorperate the noise inverval\n\t\t\t\ts_time = self.trans_t_dict[i_s][1]\n\t\t\t\te_time = self.trans_t_dict[i_e][1]\n\t\t\t\t\"\"\"\n\t\t\t\tfor ts in self.noise_itv:\n\t\t\t\t\tif len(ts) == 2:\t\t\t\t\t\t\n\t\t\t\t\t\ttime1 = ts[0]\n\t\t\t\t\t\ttime2 = ts[1]\n\t\t\t\t\t\tif s_time < time1 and time2 < e_time:\n\t\t\t\t\t\t\te_time = min(e_time, time1)\n\t\t\t\t\telse:\n\t\t\t\t\t\ttime0 = ts[0]\n\t\t\t\t\t\tif s_time < time0 and time0 < e_time:\n\t\t\t\t\t\t\te_time = min(e_time, time0)\n\t\t\t\t\"\"\"\n\t\t\t\tchar_len = 0\n\t\t\t\tfor i in range(i_s, i_e):\n\t\t\t\t\tchar_len += len(self.trans_t_dict[i][0])\n\t\t\t\t# ratio = float(self.trans_t_dict[i_e][1]-self.trans_t_dict[i_s][1]) / float(char_len)\n\t\t\t\tratio = float(e_time - s_time) / float(char_len)\n\t\t\t\tchar_len = 0\n\t\t\t\t# s_time = self.trans_t_dict[i_s][1]\n\t\t\t\tfor i in range(i_s+1, i_e):\n\t\t\t\t\tchar_len += len(self.trans_t_dict[i-1][0])\n\t\t\t\t\tself.trans_t_dict[i][1] = s_time + char_len * ratio\n\t\t\t\t\tself.trans_t_dict[i][2] = len(self.trans_t_dict[i][0]) * ratio\n\t\t\ti_s = i_e", "def apply_filter(self, image):\n pass", "def preprocess(args):\n \n # Set up options\n src = args.src\n dest = args.dest\n collect_path = args.collect_path\n formats = args.formats\n ref_img_path = args.ref_img_path\n width = args.width\n debug = args.debug\n if debug:\n print args.__dict__\n # Make necessary directories if there is not.\n if not os.path.exists(dest):\n os.mkdir(dest)\n if not os.path.exists(collect_path):\n os.mkdir(collect_path)\n\n # Open referce image and trying to find the face in it.\n try:\n ref_img_origin = Image.open(os.path.abspath(ref_img_path))\n except IOError as e:\n print \"[IOError] Can't open the reference imgae: {}\".format(ref_img_path)\n print \"[Info] Terminating....\"\n return 1\n\n face_ref_coor, degree_ref = segment_tools.faces_positions(ref_img_origin)\n \n # Only one face is allowed in referece image. Raise error if it isn't.\n # Crop the origin image to get the face image.\n if face_ref_coor.shape[0] > 1:\n raise MultiFaceError(\"Detect multiple faces in reference image. There should be only one face.\")\n face_ref = segment_tools.crop_img(ref_img_origin, face_ref_coor[0], offset = True)\n\n # Adjust that image to make eyes lie on horizontal line.\n try:\n eye_angle = face_align_tools.eyes_horizon_angle(face_ref)\n except segment_tools.NotDetectedError:\n print \"[NotDetectedError] This reference image is not good enough. The program can't make the eyes horizontal.\"\n print \"[NotDetectedError] Pleas use another reference image.\"\n print \"Terminating....\"\n return 1\n\n total_degree = eye_angle + degree_ref\n img_ref_rotated = ref_img_origin.rotate(total_degree, resample = Image.CUBIC)\n face_ref_coor, _ = segment_tools.faces_positions(img_ref_rotated)\n face_ref = segment_tools.crop_img(img_ref_rotated, face_ref_coor[0], offset = True)\n \n # Resize the reference face to desired witdh (but preserve the width/heigh ratio.)\n ref_width, ref_heigh = face_ref.size\n face_ref = face_ref.resize((width, ref_heigh*width/ref_width))\n if debug:\n face_ref.show()\n \n ref_file_name = os.path.basename(ref_img_path)\n face_ref.save(os.path.join(os.path.abspath(dest), \"ref_\" + ref_file_name))\n print \"[Info] Complete preprocess of reference image.\"\n\n # Walk through the source directory.\n print \"[Info] Start processing files in {src}.\".format(src = os.path.abspath(src))\n for rel_path, dir_names, file_names in os.walk(os.path.abspath(src)):\n for filename in file_names:\n if np.any(map(filename.endswith, formats)):\n file_path = os.path.join(os.path.abspath(rel_path), filename)\n print \"[Info] Start processing {file_path}.\".format(file_path = file_path)\n try:\n target_img_origin = Image.open(file_path)\n except IOError as e:\n print \"[IOError] Can not open {}\".format(file_path)\n print \"[Info] Passing this image.\"\n continue\n \n # Try to find faces in target image. If don't, copy it to collection directory.\n try:\n faces_target_coors, degree_target = segment_tools.faces_positions(target_img_origin)\n except segment_tools.NotDetectedError as e:\n print \"[NotDetectedError] Does not find any face in {filename}. Collect it into {collect_path}\".format(filename = filename, collect_path = collect_path)\n target_img_origin.save(os.path.join(os.path.abspath(collect_path), filename))\n continue # Brake loop for not finding any face in the picture.\n\n # Adjust all found faces to make them just.\n target_img_rotated = target_img_origin.rotate(degree_target, resample = Image.CUBIC)\n for face_coor in faces_target_coors:\n temp_img = segment_tools.crop_img(target_img_rotated, face_coor, offset=True)\n try:\n eyes_degree = face_align_tools.eyes_horizon_angle(temp_img)\n except segment_tools.NotDetectedError:\n eyes_degree = 0\n face_target = temp_img.rotate(eyes_degree)\n temp_file_name = random_prefix() + filename\n if debug:\n face_target.show()\n face_target.save(os.path.join(os.path.abspath(dest), temp_file_name))\n temp_aligned_file_name = \"aligned_\" + temp_file_name\n try:\n face_target_aligned = face_align_tools.face_align(face_ref, face_target)\n face_target_aligned.save(os.path.join(os.path.abspath(dest), temp_aligned_file_name))\n except segment_tools.NotDetectedError:\n print \"[AlignError] Can't align face. Moving to {collection}.\".format(collection = collect_path)\n face_target.save(os.path.join(os.path.abspath(collect_path), \"not_aligned_\" + temp_file_name))\n print \"[Info] Saving {}\".format(os.path.join(os.path.abspath(collect_path), \"not_aligned_\" + temp_file_name))\n continue\n masked_target_img = segment_tools.mask_img(target_img_rotated, faces_target_coors)\n\n if debug:\n masked_target_img.show()\n masked_target_img.save(\"masked.jpg\")\n \n try:\n while True:\n temp_face_coors, temp_degree = segment_tools.faces_positions(masked_target_img)\n temp_img = masked_target_img.rotate(temp_degree, resample = Image.CUBIC)\n if debug:\n print \"temp_face_coors\", temp_face_coors\n print \"[Info] Multiple faces are found in {file_path}\".format(file_path = file_path)\n for face_coor in temp_face_coors:\n temp_face = segment_tools.crop_img(temp_img, face_coor, offset = True)\n eye_angle = face_align_tools.eyes_horizon_angle(temp_face)\n face_target = temp_face.rotate(eye_angle, resample = Image.CUBIC)\n if debug:\n face_target.show()\n face_target_aligned = face_align_tools.face_align(face_ref, face_target)\n temp_file_name = random_prefix() + filename\n temp_aligned_file_name = \"aligned_\" + temp_file_name\n print \"[Info] Sucessful aligned {}\".format(temp_file_name)\n if debug:\n masked_target_img.show()\n except segment_tools.NotDetectedError:\n file_path = os.path.join(os.path.abspath(rel_path), filename)\n print \"[Info] Complete searching faces in {file_path}\".format(file_path = file_path)", "def test_rolling_before_analysis(self):\n cheese = TomoCheese.from_demo_images()\n cheese.analyze()\n original_roi_1 = copy.copy(cheese.module.rois[\"1\"].pixel_value)\n for img in cheese.dicom_stack:\n img.roll(direction=\"x\", amount=20)\n cheese.analyze()\n new_roi_1 = cheese.module.rois[\"1\"].pixel_value\n assert math.isclose(original_roi_1, new_roi_1, abs_tol=3)", "def test_do_manga_dither_after_sequence(self):\n sopTester.updateModel('mcp', TestHelper.mcpState['boss_science'])\n dither = 'N'\n cmdState = self.actorState.doMangaSequence\n cmdState.reinitialize(self.cmd)\n cmdState.count = 1\n cmdState.dithers = 'NSE'\n cmdState.reset_ditherSeq()\n self.cmd.verbose = False\n masterThread.do_apogeemanga_sequence(self.cmd, cmdState, myGlobals.actorState)\n self.cmd.reset()\n self.cmd.verbose = self.verbose\n self._do_manga_dither(4, 28, 0, 0, dither=dither)", "def augmenter(x, y):\n # Note that we only use fliprots along axis=(1,2), i.e. the yx axis\n # as 3D microscopy acquisitions are usually not axially symmetric\n x, y = random_fliprot(x, y, axis=(1, 2))\n x = random_intensity_change(x)\n return x, y", "def go():\n ##########\n #\n # MB19284\n #\n ##########\n\n ##########\n # Kp-band reduction\n ##########\n\n target = 'mb19284'\n sci_files = ['i200822_a011{0:03d}_flip'.format(ii) for ii in range(2, 5+1)]\n sci_files += ['i200822_a012{0:03d}_flip'.format(ii) for ii in range(2, 25+1)]\n sky_files = ['i200822_a018{0:03d}_flip'.format(ii) for ii in range(2, 6+1)]\n refSrc = [917.75, 1033.5] # This is the target\n # Alternative star to try (bright star to bottom of target): [1015, 581.9]\n \n sky.makesky(sky_files, target, 'kp_tdOpen', instrument=osiris)\n data.clean(sci_files, target, 'kp_tdOpen', refSrc, refSrc, field=target, instrument=osiris)\n data.calcStrehl(sci_files, 'kp_tdOpen', field=target, instrument=osiris)\n data.combine(sci_files, 'kp_tdOpen', epoch, field=target,\n trim=0, weight='strehl', submaps=3, instrument=osiris)\n\n ##########\n #\n # KB200101\n #\n ##########\n\n ##########\n # Kp-band reduction\n ##########\n\n # -- If you have more than one position angle, make sure to\n # clean them seperatly.\n # -- Strehl and Ref src should be the pixel coordinates of a bright\n # (but non saturated) source in the first exposure of sci_files.\n # -- If you use the OSIRIS image, you must include the full filename in the list. \n target = 'kb200101'\n sci_files = ['i200822_a014{0:03d}_flip'.format(ii) for ii in range(2, 28+1)]\n sci_files += ['i200822_a015{0:03d}_flip'.format(ii) for ii in range(2, 5+1)]\n sci_files += ['i200822_a016{0:03d}_flip'.format(ii) for ii in range(2, 5+1)]\n sky_files = ['i200822_a017{0:03d}_flip'.format(ii) for ii in range(2, 6+1)]\n refSrc = [975, 1006] # This is the target\n # Alternative star to try (bright star to right of target): [1158, 994]\n \n sky.makesky(sky_files, target, 'kp_tdOpen', instrument=osiris)\n data.clean(sci_files, target, 'kp_tdOpen', refSrc, refSrc, field=target, instrument=osiris)\n data.calcStrehl(sci_files, 'kp_tdOpen', field=target, instrument=osiris)\n data.combine(sci_files, 'kp_tdOpen', epoch, field=target,\n trim=1, weight='strehl', submaps=3, instrument=osiris)", "def main():\n NAME = os.path.basename(__file__).split(\".\")[0]\n\n with its.device.ItsSession() as cam:\n\n props = cam.get_camera_properties()\n\n white_level = float(props['android.sensor.info.whiteLevel'])\n black_levels = props['android.sensor.blackLevelPattern']\n idxs = its.image.get_canonical_cfa_order(props)\n black_levels = [black_levels[i] for i in idxs]\n\n # Expose for the scene with min sensitivity\n sens_min, sens_max = props['android.sensor.info.sensitivityRange']\n s_ae,e_ae,awb_gains,awb_ccm,_ = cam.do_3a(get_results=True)\n s_e_prod = s_ae * e_ae\n\n # Make the image brighter since the script looks at linear Bayer\n # raw patches rather than gamma-encoded YUV patches (and the AE\n # probably under-exposes a little for this use-case).\n s_e_prod *= 2\n\n # Capture raw frames across the full sensitivity range.\n NUM_SENS_STEPS = 15\n sens_step = int((sens_max - sens_min - 1) / float(NUM_SENS_STEPS))\n reqs = []\n sens = []\n for s in range(sens_min, sens_max, sens_step):\n e = int(s_e_prod / float(s))\n req = its.objects.manual_capture_request(s, e)\n req[\"android.colorCorrection.transform\"] = \\\n its.objects.float_to_rational(awb_ccm)\n req[\"android.colorCorrection.gains\"] = awb_gains\n reqs.append(req)\n sens.append(s)\n\n caps = cam.do_capture(reqs, cam.CAP_RAW)\n\n # A list of the (x,y) coords of the center pixel of a collection of\n # patches of a color checker chart. Each patch should be uniform,\n # however the actual color doesn't matter. Note that the coords are\n # relative to the *converted* RGB image, which is 1/2 x 1/2 of the\n # full size; convert back to full.\n img = its.image.convert_capture_to_rgb_image(caps[0], props=props)\n patches = its.image.get_color_checker_chart_patches(img, NAME+\"_debug\")\n patches = [(2*x,2*y) for (x,y) in sum(patches,[])]\n\n lines = []\n for (s,cap) in zip(sens,caps):\n # For each capture, compute the mean value in each patch, for each\n # Bayer plane; discard patches where pixels are close to clamped.\n # Also compute the variance.\n CLAMP_THRESH = 0.2\n planes = its.image.convert_capture_to_planes(cap, props)\n points = []\n for i,plane in enumerate(planes):\n plane = (plane * white_level - black_levels[i]) / (\n white_level - black_levels[i])\n for j,(x,y) in enumerate(patches):\n tile = plane[y/2-16:y/2+16:,x/2-16:x/2+16:,::]\n mean = its.image.compute_image_means(tile)[0]\n var = its.image.compute_image_variances(tile)[0]\n if (mean > CLAMP_THRESH and mean < 1.0-CLAMP_THRESH):\n # Each point is a (mean,variance) tuple for a patch;\n # for a given ISO, there should be a linear\n # relationship between these values.\n points.append((mean,var))\n\n # Fit a line to the points, with a line equation: y = mx + b.\n # This line is the relationship between mean and variance (i.e.)\n # between signal level and noise, for this particular sensor.\n # In the DNG noise model, the gradient (m) is \"S\", and the offset\n # (b) is \"O\".\n points.sort()\n xs = [x for (x,y) in points]\n ys = [y for (x,y) in points]\n m,b = numpy.polyfit(xs, ys, 1)\n lines.append((s,m,b))\n print s, \"->\", m, b\n\n # TODO: Clean up these checks (which currently fail in some cases).\n # Some sanity checks:\n # * Noise levels should increase with brightness.\n # * Extrapolating to a black image, the noise should be positive.\n # Basically, the \"b\" value should correspnd to the read noise,\n # which is the noise level if the sensor was operating in zero\n # light.\n #assert(m > 0)\n #assert(b >= 0)\n\n # Draw a plot.\n pylab.plot(xs, ys, 'r')\n pylab.plot([0,xs[-1]],[b,m*xs[-1]+b],'b')\n matplotlib.pyplot.savefig(\"%s_plot_mean_vs_variance.png\" % (NAME))\n\n # Now fit a line across the (m,b) line parameters for each sensitivity.\n # The gradient (m) params are fit to the \"S\" line, and the offset (b)\n # params are fit to the \"O\" line, both as a function of sensitivity.\n gains = [d[0] for d in lines]\n Ss = [d[1] for d in lines]\n Os = [d[2] for d in lines]\n mS,bS = numpy.polyfit(gains, Ss, 1)\n mO,bO = numpy.polyfit(gains, Os, 1)\n\n # Plot curve \"O\" as 10x, so it fits in the same scale as curve \"S\".\n pylab.plot(gains, [10*o for o in Os], 'r')\n pylab.plot([gains[0],gains[-1]],\n [10*mO*gains[0]+10*bO, 10*mO*gains[-1]+10*bO], 'b')\n pylab.plot(gains, Ss, 'r')\n pylab.plot([gains[0],gains[-1]], [mS*gains[0]+bS, mS*gains[-1]+bS], 'b')\n matplotlib.pyplot.savefig(\"%s_plot_S_O.png\" % (NAME))\n\n print \"\"\"\n /* Generated test code to dump a table of data for external validation\n * of the noise model parameters.\n */\n #include <stdio.h>\n #include <assert.h>\n double compute_noise_model_entry_S(int sens);\n double compute_noise_model_entry_O(int sens);\n int main(void) {\n int sens;\n for (sens = %d; sens <= %d; sens += 100) {\n double o = compute_noise_model_entry_O(sens);\n double s = compute_noise_model_entry_S(sens);\n printf(\"%%d,%%lf,%%lf\\\\n\", sens, o, s);\n }\n return 0;\n }\n\n /* Generated functions to map a given sensitivity to the O and S noise\n * model parameters in the DNG noise model.\n */\n double compute_noise_model_entry_S(int sens) {\n double s = %e * sens + %e;\n return s < 0.0 ? 0.0 : s;\n }\n double compute_noise_model_entry_O(int sens) {\n double o = %e * sens + %e;\n return o < 0.0 ? 0.0 : o;\n }\n \"\"\"%(sens_min,sens_max,mS,bS,mO,bO)", "def prepro(I):\n# I = env.reset() # Use this to verify, whats happening\n# plt.imshow(I)\n I = I[35:195] # crop and keep only the play area\n I = I[::2,::2,0] # downsample by factor of 2, take every second row and column, and take only \"R\" component out of RGB image\n I[I == 144] = 0 # erase background (background type 1)\n I[I == 109] = 0 # erase background (background type 2)\n I[I != 0] = 1 # everything else (but paddles, ball) just set to 1\n return I.astype(np.float).ravel() # convert to 1D array and return", "def forward(self, x): \n pal1_sources = list()\n pal2_sources = list()\n loc_pal1 = list()\n conf_pal1 = list()\n loc_pal2 = list()\n conf_pal2 = list()\n\n # apply vgg up to conv3_3 relu\n for k in range(16):\n x = self.vgg[k](x)\n\n of1 = x\n s = self.L2Normof1(of1)\n pal1_sources.append(s)\n \n # apply vgg up to conv4_3 relu\n for k in range(16, 23):\n x = self.vgg[k](x)\n\n of2 = x\n s = self.L2Normof2(of2)\n pal1_sources.append(s)\n\n # apply vgg up to conv5_3 relu\n for k in range(23, 30):\n x = self.vgg[k](x)\n of3 = x\n s = self.L2Normof3(of3)\n pal1_sources.append(s)\n\n # apply vgg up to fc7\n for k in range(30, len(self.vgg)):\n x = self.vgg[k](x)\n of4 = x\n pal1_sources.append(of4)\n \n # apply extra layers and cache source layer outputs\n for k in range(2):\n x = F.relu(self.extras[k](x), inplace=True)\n of5 = x\n pal1_sources.append(of5)\n for k in range(2, 4):\n x = F.relu(self.extras[k](x), inplace=True)\n of6 = x\n pal1_sources.append(of6)\n\n ## fpn module\n \"\"\"\n lfpn6 = self.fpn_topdown6(of6)\n lfpn5 = self._upsample_product(self.fpn_topdown5(of6), self.fpn_latlayer5(of5))\n lfpn4 = self._upsample_product(self.fpn_topdown4(of5), self.fpn_latlayer4(of4))\n lfpn3 = self._upsample_product(self.fpn_topdown3(of4), self.fpn_latlayer3(of3))\n lfpn2 = self._upsample_product(self.fpn_topdown2(of3), self.fpn_latlayer2(of2))\n lfpn1 = self._upsample_product(self.fpn_topdown1(of2), self.fpn_latlayer1(of1))\n\n\n ef1 = self.fpn_fem3_3(lfpn1)\n ef1 = self.L2Normef1(ef1)\n ef2 = self.fpn_fem4_3(lfpn2)\n ef2 = self.L2Normef2(ef2)\n ef3 = self.fpn_fem5_3(lfpn3)\n ef3 = self.L2Normef3(ef3)\n\n ef4 = self.fpn_fem7(lfpn4)\n ef5 = self.fpn_fem6_2(lfpn5)\n ef6 = self.fpn_fem7_2(lfpn6)\n \"\"\"\n\n conv7 = F.relu(self.fpn_topdown[0](of6), inplace=True)\n x = F.relu(self.fpn_topdown[1](conv7), inplace=True)\n conv6 = F.relu(self._upsample_product(x, self.fpn_latlayer[0](of5)), inplace=True)\n\n x = F.relu(self.fpn_topdown[2](conv6), inplace=True)\n convfc7_2 = F.relu(self._upsample_product(x, self.fpn_latlayer[1](of4)), inplace=True)\n\n x = F.relu(self.fpn_topdown[3](convfc7_2), inplace=True)\n conv5 = F.relu(self._upsample_product(x, self.fpn_latlayer[2](of3)), inplace=True)\n\n x = F.relu(self.fpn_topdown[4](conv5), inplace=True)\n conv4 = F.relu(self._upsample_product(x, self.fpn_latlayer[3](of2)), inplace=True)\n\n x = F.relu(self.fpn_topdown[5](conv4), inplace=True)\n conv3 = F.relu(self._upsample_product(x, self.fpn_latlayer[4](of1)), inplace=True)\n\n ef1 = self.fpn_fem[0](conv3)\n ef1 = self.L2Normef1(ef1)\n ef2 = self.fpn_fem[1](conv4)\n ef2 = self.L2Normef2(ef2)\n ef3 = self.fpn_fem[2](conv5)\n ef3 = self.L2Normef3(ef3)\n ef4 = self.fpn_fem[3](convfc7_2)\n ef5 = self.fpn_fem[4](conv6)\n ef6 = self.fpn_fem[5](conv7)\n\n pal2_sources = (ef1, ef2, ef3, ef4, ef5, ef6)\n\n ## first shot \n for (x, l, c) in zip(pal1_sources, self.loc_pal1, self.conf_pal1):\n loc_pal1.append(l(x).permute(0, 2, 3, 1).contiguous())\n conf_pal1.append(c(x).permute(0, 2, 3, 1).contiguous())\n \n ## second shot\n for (x, l, c) in zip(pal2_sources, self.loc_pal2, self.conf_pal2):\n loc_pal2.append(l(x).permute(0, 2, 3, 1).contiguous())\n conf_pal2.append(c(x).permute(0, 2, 3, 1).contiguous())\n\n # first shot\n loc_pal1 = torch.cat([o.view(o.size(0), -1) for o in loc_pal1], 1)\n conf_pal1 = torch.cat([o.view(o.size(0), -1) for o in conf_pal1], 1)\n \n # second shot\n loc_pal2 = torch.cat([o.view(o.size(0), -1) for o in loc_pal2], 1)\n conf_pal2 = torch.cat([o.view(o.size(0), -1) for o in conf_pal2], 1)\n\n if self.phase == 'test':\n # 测试时, 仅使用shot2 的输出\n output = self.detect(\n loc_pal2.view(loc_pal2.size(0), -1, 4),\n self.softmax(conf_pal2.view(conf_pal2.size(0), -1,\n self.num_classes)), # conf preds\n )\n else:\n ## 训练时,使用shot1 和 shot2 的输出\n output = (\n loc_pal1.view(loc_pal1.size(0), -1, 4),\n conf_pal1.view(conf_pal1.size(0), -1, self.num_classes),\n loc_pal2.view(loc_pal2.size(0), -1, 4),\n conf_pal2.view(conf_pal2.size(0), -1, self.num_classes))\n return output", "def forward_dummy(self, img):\n output = self.generator(img)\n return output", "def noiseReduction(self):\n pass", "def __getitem__(self, index):\n A_path = self.A_paths[index % self.A_size] # make sure index is within then range\n #if self.opt.serial_batches: # make sure index is within then range\n \n\n A_img = Image.open(A_path).convert('L')\n \n A = self.transform_A(A_img)\n # B20 = self.transform_B(B20_img)\n #B2 = self.transform_B(B2_img)\n\n\n index_B50 = index % self.B50_size\n B50_path = self.B50_paths[index_B50]\n B50_img = Image.open(B50_path).convert('L')\n B50 = self.transform_B(B50_img)\n\n\n\n index_B100 = index % self.B100_size\n B100_path = self.B100_paths[index_B100]\n B100_img = Image.open(B100_path).convert('L')\n B100 = self.transform_B(B100_img)\n\n index_B150 = index % self.B150_size\n B150_path = self.B150_paths[index_B150]\n B150_img = Image.open(B150_path).convert('L')\n B150 = self.transform_B(B150_img)\n\n\n \n\n index_m0 = 0\n m0_path = self.m0_paths[index_m0]\n m0_img = Image.open(m0_path).convert('L')\n m0 = self.transform_B(m0_img)\n \n index_m50 = 0\n m50_path = self.m50_paths[index_m50]\n m50_img = Image.open(m50_path).convert('L')\n m50 = self.transform_B(m50_img)\n\n index_m100 = 0\n m100_path = self.m100_paths[index_m100]\n m100_img = Image.open(m100_path).convert('L')\n m100 = self.transform_B(m100_img)\n\n index_m150 = 0\n m150_path = self.m150_paths[index_m150]\n m150_img = Image.open(m150_path).convert('L')\n m150 = self.transform_B(m150_img)\n\n\n\n return {'A': A, 'B50': B50,'B100': B100, 'B150': B150, 'A_paths': A_path, 'B50_paths': B50_path,'B100_paths': B100_path, 'B150_paths': B150_path, 'm0':m0, 'm50':m50,'m100':m100, 'm150':m150}", "def Resampler(name):\n\n def resample_average(path, dsquery, dstile, image_format):\n for i in range(1, dstile.RasterCount+1):\n res = gdal.RegenerateOverview(dsquery.GetRasterBand(i), dstile.GetRasterBand(i), \"average\")\n if res != 0:\n raise ImageOutputException(\"RegenerateOverview() failed with error %d\" % res)\n\n gdal_write(path, dstile, image_format)\n\n def resample_antialias(path, dsquery, dstile, image_format):\n querysize = dsquery.RasterXSize\n tilesize = dstile.RasterXSize\n\n array = numpy.zeros((querysize, querysize, 4), numpy.uint8)\n for i in range(dstile.RasterCount):\n array[:,:,i] = gdalarray.BandReadAsArray(dsquery.GetRasterBand(i+1), 0, 0, querysize, querysize)\n im = Image.fromarray(array, 'RGBA') # Always four bands\n im1 = im.resize((tilesize,tilesize), Image.ANTIALIAS)\n\n if os.path.exists(path):\n im0 = Image.open(path)\n im1 = Image.composite(im1, im0, im1)\n\n ensure_dir_exists(path)\n\n if image_format == \"JPEG\":\n im1.save(path, image_format, quality=jpeg_quality)\n else:\n im1.save(path, image_format)\n\n\n if name == \"average\":\n return resample_average\n elif name == \"antialias\":\n return resample_antialias\n\n resampling_methods = {\n \"near\" : gdal.GRA_NearestNeighbour,\n \"bilinear\" : gdal.GRA_Bilinear,\n \"cubic\" : gdal.GRA_Cubic,\n \"cubicspline\" : gdal.GRA_CubicSpline,\n \"lanczos\" : gdal.GRA_Lanczos\n }\n\n resampling_method = resampling_methods[name]\n\n def resample_gdal(path, dsquery, dstile, image_format):\n querysize = dsquery.RasterXSize\n tilesize = dstile.RasterXSize\n\n dsquery.SetGeoTransform( (0.0, tilesize / float(querysize), 0.0, 0.0, 0.0, tilesize / float(querysize)) )\n dstile.SetGeoTransform( (0.0, 1.0, 0.0, 0.0, 0.0, 1.0) )\n\n res = gdal.ReprojectImage(dsquery, dstile, None, None, resampling_method)\n if res != 0:\n raise ImageOutputException(\"ReprojectImage() failed with error %d\" % res)\n\n gdal_write(path, dstile, image_format)\n\n return resample_gdal", "def exposure():\n def r(x):\n return x/6e4\n\n def w(x):\n return int(x*6e4)\n return r, w", "def process_frame(self, downsize):\n # if (not hasattr(downsize,'shape')) and (not hasattr(downsize,'len')):\n # downsize = np.array(downsize)\n\n if type(downsize) != np.ndarray:\n raise TypeError\n\n if not downsize.any():\n raise ValueError\n\n if self.pre_resize:\n downsize = cv2.resize(downsize, (0, 0), fx=self.resize_factor, fy=self.resize_factor)\n\n self.frame_history.append(downsize)\n\n # Remove no longer needed frames from memory\n self.frame_history = self.frame_history[-(self.LMC_rec_depth):]\n downsize = signal.lfilter(self.b, self.a, self.frame_history, axis=0)[-1]\n\n # Center surround antagonism kernel applied.\n\n downsize = cv2.filter2D(downsize, -1, self.CSKernel)\n\n # RTC filter.\n u_pos = deepcopy(downsize)\n u_neg = deepcopy(downsize)\n u_pos[u_pos < 0] = 0\n u_neg[u_neg > 0] = 0\n u_neg = -u_neg\n\n # On first step, instead of computing just save the images.\n if self.t == self.T0:\n self.v_pos_prev = deepcopy(u_pos)\n self.v_neg_prev = deepcopy(u_neg)\n self.u_pos_prev = deepcopy(u_pos)\n self.u_neg_prev = deepcopy(u_neg)\n\n # Do everything for pos == ON.\n tau_pos = u_pos - self.u_pos_prev\n tau_pos[tau_pos >= 0] = 0.001\n tau_pos[tau_pos < 0] = 0.1\n mult_pos = self.rtc_exp(self.dt, tau_pos)\n v_pos = -(mult_pos - 1) * u_pos + mult_pos * self.v_pos_prev\n self.v_pos_prev = deepcopy(v_pos)\n\n # Do everything for neg == OFF.\n tau_neg = u_neg - self.u_neg_prev\n tau_neg[tau_neg >= 0] = 0.001\n tau_neg[tau_neg < 0] = 0.1\n mult_neg = self.rtc_exp(self.dt, tau_neg)\n v_neg = -(mult_neg - 1) * u_neg + mult_neg * self.v_neg_prev\n self.v_neg_prev = deepcopy(v_neg)\n\n # keep track of previous u.\n self.u_pos_prev = deepcopy(u_pos)\n self.u_neg_prev = deepcopy(u_neg)\n\n # Subtract v from u to give the output of each channel.\n out_pos = u_pos - v_pos\n out_neg = u_neg - v_neg\n\n # Now apply yet another filter to both parts.\n out_pos = cv2.filter2D(out_pos, -1, self.H_filter)\n out_neg = cv2.filter2D(out_neg, -1, self.H_filter)\n out_pos[out_pos < 0] = 0\n out_neg[out_neg < 0] = 0\n\n if self.t == self.T0:\n self.out_neg_prev = deepcopy(out_neg)\n\n # Delay off channel.\n out_neg = signal.lfilter(self.b1, self.a1, [self.out_neg_prev, out_neg], axis=0)[-1]\n self.out_neg_prev = out_neg\n downsize = out_neg * out_pos\n\n # Show image.\n downsize *= self.gain\n downsize = np.tanh(downsize)\n\n # Threshold.\n downsize[downsize < self.threshold] = 0\n\n if not self.pre_resize:\n downsize = cv2.resize(downsize, (0, 0), fx=self.resize_factor, fy=self.resize_factor)\n\n self.t += self.dt\n\n return downsize", "def manipulations(path):\r\n\r\n print (\"\\n Working on %s\\n\" %(path))\r\n\r\n # Creates a folder with the results for the current image\r\n if not os.path.exists(\"Results\\\\%s\" %(path)):\r\n os.makedirs(\"Results\\\\%s\" %(path))\r\n\r\n # The variations made of the image\r\n func.pixelImage(path, 10, 10)\r\n func.animate(path)\r\n func.colorScale(path, 0)\r\n func.colorScale(path, 1)\r\n func.colorScale(path, 2)\r\n func.scan(path, 280)\r\n func.greyImage(path)\r\n func.colorSteps(path, 1)\r\n func.inverted(path)", "def generate_images_pred(self, inputs, outputs):\n for scale in self.scales:\n disp = outputs[(\"disp\", scale)]\n disp = F.interpolate(\n disp, [self.height, self.width], mode=\"bilinear\", align_corners=False)\n source_scale = 0\n\n _, depth = disp_to_depth(disp, self.min_depth, self.max_depth)\n\n outputs[(\"depth\", 0, scale)] = depth\n\n for i, frame_id in enumerate(self.frame_ids[1:]):\n\n T = outputs[(\"cam_T_cam\", 0, frame_id)]\n\n # from the authors of https://arxiv.org/abs/1712.00175\n # mean-normalized inverse depth from [62] to discourage shrinking of the estimated depth\n\n axisangle = outputs[(\"axisangle\", 0, frame_id)]\n translation = outputs[(\"translation\", 0, frame_id)]\n\n inv_depth = 1 / depth\n mean_inv_depth = inv_depth.mean(3, True).mean(2, True)\n\n T = transformation_from_parameters(\n axisangle[:, 0], translation[:, 0] * mean_inv_depth[:, 0], frame_id < 0)\n\n cam_points = self.backproject_depth[source_scale](\n depth, inputs[(\"inv_K\", source_scale)])\n pix_coords = self.project_3d[source_scale](\n cam_points, inputs[(\"K\", source_scale)], T)\n\n outputs[(\"sample\", frame_id, scale)] = pix_coords\n\n outputs[(\"color\", frame_id, scale)] = F.grid_sample(\n inputs[(\"color\", frame_id, source_scale)],\n outputs[(\"sample\", frame_id, scale)],\n padding_mode=\"border\")\n\n outputs[(\"color_identity\", frame_id, scale)] = \\\n inputs[(\"color\", frame_id, source_scale)]", "def des_image(exptime=100,mag=None, Nstar=1,ccd=None,seeing=[0.9,0.,0.],npix=npix,zenith=0,filter='r', theta=0., phi=0,corrector='corrector',x=None,y=None,z=None,suband=None,regular=False,setbkg=True):\n gain = 0.21 # convert electrons to ADU\n zeropoint = 26.794176 # r band, from Nikolay\n \n if setbkg == False:\n skyphoton = 0.\n else:\n skyphoton = 8.460140*exptime\n bkg = skyphoton*gain\n res = genImgV(filename=None,Nstar=Nstar,ccd=ccd,seeing=seeing,npix=npix,zenith=zenith,filter=filter, theta=theta, phi=phi,corrector=corrector,x=x,y=y,z=z,suband=suband,regular=regular)\n if Nstar == 1:\n objectphoton = exptime*10**(0.4*(zeropoint - mag))\n psf = res[0][0][4:].reshape(npix,npix)\n img = (psf * objectphoton + skyphoton)*gain\n img = rebin(img,(40,40))\n psf = rebin(psf,(40,40))\n img = img + add_imageNoise(img)\n if Nstar > 1:\n img = []\n psf = []\n magg = []\n for i in range(Nstar):\n if type(mag) == list:\n rng = abs(mag[1] - mag[0])\n magi = min(mag)+np.random.rand()*rng\n objectphoton = exptime*10**(0.4*(zeropoint - magi))\n psfi = res[0][i][4:].reshape(npix,npix)\n imgi = (psfi * objectphoton + skyphoton)*gain\n imgi = imgi+np.sqrt(imgi)\n imgi = rebin(imgi,(40,40))\n psfi = rebin(psfi,(40,40))\n imgi = imgi + add_imageNoise(imgi)\n img.append(imgi)\n psf.append(psfi)\n magg.append(magi)\n return img,bkg,psf,magg", "def main():\n logging.basicConfig() # create a baseconfiguration s.t. we cann now log \n cycle = 0\n while True:\n\n logging.info(f\"{time.now()} - Start cycle {cycle}\") # changed from print to info \n do_unstable_magick(cycle)\n logging.info(f\"{time.nos()} - Finished cycle {cycle}\")", "def _aperture_to_photopoint_(self, *args, **kwargs):\n\n if not self.has_sky():\n return super(GALEX,self)._aperture_to_photopoint_( *args, **kwargs )\n\n datacps, bkgdcps = args\n datacounts = datacps[0]*self.exposuretime\n bkgdcounts = bkgdcps[0]*self.exposuretime\n \n # ------------------\n # - One Photopoint\n if len(np.atleast_1d(datacounts))==1:\n return get_photopoint(lbda=self.lbda,\n datacounts=np.atleast_1d(datacounts)[0],\n bkgdcounts=np.atleast_1d(bkgdcounts)[0],\n exptime=self.exposuretime,\n source=\"image\",mjd=self.mjd,\n zp=self.mab0,\n bandname=self.bandname,\n instrument_name=self.instrument_name)\n # -----------------------\n # - Several Photopoints\n return [get_photopoint(lbda=self.lbda,\n datacounts=datacounts_,\n bkgdcounts=bkgdcounts_,\n exptime=self.exposuretime,\n source=\"image\",mjd=self.mjd,\n zp=self.mab0,bandname=self.bandname,\n instrument_name=self.instrument_name)\n for datacounts_,bkgdcounts_ in zip(datacounts,bkgdcounts)]", "def features_combine():\n\n\n\t# PROCESSING AUDIO", "def process_image(image):\r\n image = random_brightness(image)\r\n image = crop_image(image)\r\n image = resize(image)\r\n return image", "def process_sound_map():\n pass", "def update_channels(imgs, msks, **settings):\n\n\tshp = imgs.shape\n\tnew_imgs = np.zeros((shp[0],shp[1],shp[2], settings['IN_CHANNEL_NO']))\n\tnew_msks = np.zeros((shp[0],shp[1],shp[2], settings['OUT_CHANNEL_NO']))\n\n\tif settings['MODE']==1:\n\t\tnew_imgs[:,:,:,0] = imgs[:,:,:,2] # flair\n\t\tnew_msks[:,:,:,0] = msks[:,:,:,0]+msks[:,:,:,1]+msks[:,:,:,2]+msks[:,:,:,3]\n\t\tprint('-'*10,' Whole tumor', '-'*10)\n\n\telif settings['MODE'] == 2:\n\t\t#core (non enhancing)\n\t\tnew_imgs[:,:,:,0] = imgs[:,:,:,0] # t1 post\n\t\tnew_msks[:,:,:,0] = msks[:,:,:,3]\n\t\tprint('-'*10,' Predicing enhancing tumor', '-'*10)\n\n\telif settings['MODE'] == 3:\n\t\t#core (non enhancing)\n\t\tnew_imgs[:,:,:,0] = imgs[:,:,:,1]# t2 post\n\t\tnew_msks[:,:,:,0] = msks[:,:,:,0]+msks[:,:,:,2]+msks[:,:,:,3] # active core\n\t\tprint('-'*10,' Predicing active Core', '-'*10)\n\n\telse:\n\t\tnew_msks[:,:,:,0] = msks[:,:,:,0]+msks[:,:,:,1]+msks[:,:,:,2]+msks[:,:,:,3]\n\n\treturn new_imgs.astype(np.float32), new_msks.astype(np.float32)", "def preprocess_images():\n \n # Set up the lists to collect the images and measurements\n images = []\n measurements = []\n \n # Set up the path to the data files \n data_sets_path = 'data'\n data_sets = [os.path.join(data_sets_path, i) for i\n in os.listdir(data_sets_path)]\n \n # Step through the data folders and collect the images\n # and the steering angles\n for data_set in data_sets:\n lines = []\n \n # Open up the csv file of image paths and steering angles\n with open(os.path.join(data_set,\n 'driving_log.csv')) as csvfile:\n reader = csv.reader(csvfile)\n for line in reader:\n lines.append(line)\n for line in lines:\n source_path = line[0]\n filename = source_path.split('\\\\')[-1]\n current_path = os.path.join(data_set, 'IMG',\n filename)\n \n # Import each image and change it to RGB\n BGR_image = cv2.imread(current_path)\n image = cv2.cvtColor(BGR_image, cv2.COLOR_BGR2RGB)\n rows, cols, depth = image.shape\n flipped_image = cv2.flip(image, 1)\n \n # Create a scaled version of the image\n scale = [0.9, 1.1]\n zoomfactor = random.choice(scale)\n scale_matrix = cv2.getRotationMatrix2D((cols/2, rows/2),\n 0, zoomfactor)\n scaled_image = cv2.warpAffine(image, scale_matrix,\n (cols, rows))\n\n # Append the images to the image list\n images.append(image)\n images.append(scaled_image)\n images.append(flipped_image)\n \n # Append the steering angle to the measurements list\n measurement = float(line[3])\n measurements.append(measurement)\n measurements.append(measurement)\n measurements.append(-1*measurement)\n \n return images, measurements", "def measure(self, imgage, previous=None):", "def run():\n scene = lm.scene_object()\n copy_latest_low()\n copy_latest_high()", "def main():\n cam = Realsense()\n # cam.access_intr_and_extr()\n profile = cam.pipeline.start(cam.config)\n depth_sensor = profile.get_device().first_depth_sensor()\n depth_scale = depth_sensor.get_depth_scale()\n align_to = rs.stream.color\n align = rs.align(align_to)\n\n objp = np.zeros((3*4,3), np.float32)\n objp[:,:2] = np.mgrid[0:4,0:3].T.reshape(-1,2)\n axis = np.float32([[1,0,0], [0,1,0], [0,0,-1]]).reshape(-1,3)\n # print(objp)\n\n try:\n while (True):\n # detect ArUco markers in RGB images\n frames = cam.pipeline.wait_for_frames()\n aligned_frames = align.process(frames)\n color_frame = aligned_frames.get_color_frame()\n color_image = np.asanyarray(color_frame.get_data()) \n frame = color_image\n font = cv2.FONT_HERSHEY_SIMPLEX\n corners, ids, rvecs, tvecs = cam.detect_markers_realsense(frame)\n \n if np.all(ids != None): # if markers are detected\n for i in range(0, ids.size):\n aruco.drawAxis(frame, cam.newcameramtx, cam.dist, rvecs[i],\n tvecs[i], 0.1) # Draw axis\n aruco.drawDetectedMarkers(frame, corners) # draw square around markers\n\n ###### DRAW ID #####\n strg = ''\n for i in range(0, ids.size):\n strg += str(ids[i][0])+', '\n\n cv2.putText(frame, \"Id: \" + strg, (0,25), font, 1, (0,255,0), 2,\n cv2.LINE_AA)\n\n\t ###### Output marker positions in camera frame ######\n \t # output tvec\n y0 = 60\n dy = 40\n for i in range(0, ids.size):\n y = y0 + i*dy\n cv2.putText(frame, str(tvecs[i][0]), (0, y), font, 1, (0,255,0),\n 2, cv2.LINE_AA)\n\n else:\n ##### DRAW \"NO IDS\" #####\n cv2.putText(frame, \"No Ids\", (0,64), font, 1, (0,255,0), 2,\n cv2.LINE_AA)\n\n gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)\n ret, corners = cv2.findChessboardCorners(gray, (4,3), None)\n if ret == True:\n corners2 = cv2.cornerSubPix(gray, corners,(11,11), (-1,-1),\n cam.criteria)\n corners2 = corners2[::-1]\n # print(corners2)\n # print(objp)\n frame = cv2.drawChessboardCorners(frame, (4,3), corners2, ret)\n # Find the rotation and translation vectors.\n _, rvecs, tvecs = cv2.solvePnP(objp, corners2, cam.newcameramtx,\n cam.dist)\n rot, _ = cv2.Rodrigues(rvecs)\n # print(rot)\n # project 3D points to image plane\n imgpts, jac = cv2.projectPoints(axis, rvecs, tvecs,\n cam.newcameramtx, cam.dist)\n frame = draw(frame, corners2, imgpts)\n\n # Display the resulting frame\n cv2.imshow('frame',frame)\n cv2.waitKey(5)\n\n # When everything done, release the capture\n cv2.destroyAllWindows()\n\n finally:\n cam.pipeline.stop()", "def update_anim(frame, self):\n self.step()\n self.im.set_data(self.array)\n self.im2.set_data(self.array2)", "def augment():\n print(\"augmenting......\")\n path1 = '../trainp1/'\n path2 = '../trainp2/'\n # path of pair1 and pair2 similar to img & mask task for segmentation\n p = Augmentor.Pipeline(path1) # pair1\n p.ground_truth(path2) # pair2\n p.rotate(probability=0.3, max_left_rotation=3, max_right_rotation=3) \n p.flip_left_right(probability=0.2) \n p.random_distortion(0.5, 2, 2, 2)\n p.zoom(probability=0.5, min_factor=0.95, max_factor=1.05)\n p.process()", "def main():\n NAME = os.path.basename(__file__).split(\".\")[0]\n\n # Pass/fail thresholds\n MIN_AVG_FRAME_DELTA = 30 # at least 30ms delta between frames\n MAX_VAR_FRAME_DELTA = 0.01 # variance of frame deltas\n MAX_FRAME_DELTA_JITTER = 0.3 # max ms gap from the average frame delta\n\n with its.device.ItsSession() as cam:\n props = cam.get_camera_properties()\n if not its.caps.manual_sensor(props):\n print \"Test skipped\"\n return\n\n req, fmt = its.objects.get_fastest_manual_capture_settings(props)\n caps = cam.do_capture([req]*50, [fmt])\n\n # Print out the millisecond delta between the start of each exposure\n tstamps = [c['metadata']['android.sensor.timestamp'] for c in caps]\n deltas = [tstamps[i]-tstamps[i-1] for i in range(1,len(tstamps))]\n deltas_ms = [d/1000000.0 for d in deltas]\n avg = sum(deltas_ms) / len(deltas_ms)\n var = sum([d*d for d in deltas_ms]) / len(deltas_ms) - avg * avg\n range0 = min(deltas_ms) - avg\n range1 = max(deltas_ms) - avg\n print \"Average:\", avg\n print \"Variance:\", var\n print \"Jitter range:\", range0, \"to\", range1\n\n # Draw a plot.\n pylab.plot(range(len(deltas_ms)), deltas_ms)\n matplotlib.pyplot.savefig(\"%s_deltas.png\" % (NAME))\n\n # Test for pass/fail.\n assert(avg > MIN_AVG_FRAME_DELTA)\n assert(var < MAX_VAR_FRAME_DELTA)\n assert(abs(range0) < MAX_FRAME_DELTA_JITTER)\n assert(abs(range1) < MAX_FRAME_DELTA_JITTER)", "def expose(self):\n\n ## Determine type of exposure (exp, series, stack)\n exptype = str(self.exptypeComboBox.currentText())\n mode = self.modedict[exptype]\n\n ## Get exposure parameters\n if mode == \"bias\":\n exptime = 0.0\n else:\n exptime = self.exptimeSpinBox.value()\n imcount = self.imstackSpinBox.value()\n seqnum = self.imnumSpinBox.value()\n mintime = self.minexpSpinBox.value()\n maxtime = self.maxexpSpinBox.value()\n step = self.tstepSpinBox.value()\n\n ## Determine filter kwargs\n if self.filterToggleButton.isChecked():\n kwargs = {'filter_name' : str(self.filterComboBox.currentText())}\n else:\n kwargs = {'monowl' : self.monoSpinBox.value()}\n\n if self.testimCheckBox.isChecked():\n title = 'test'\n else:\n title = str(self.imtitleLineEdit.text())\n\n ## Build filepath\n filepath = os.path.join(str(self.imfilenameLineEdit.text()),title)\n \n ## Check if single exposure\n if exptype in [\"Exposure\", \"Dark\", \"Bias\"]:\n\n ## Perform exposure\n self.logger.info(\"Starting {0}s {1} image.\".format(exptime, exptype))\n self.image_start.emit(1)\n\n try:\n filename = exposure.im_acq(mode, filepath, exptime, seqnum, **kwargs)\n self.image_taken.emit(1)\n except subprocess.CalledProcessError:\n self.logger.exception(\"Error in executable {0}_acq. Image not taken.\".format(mode))\n except OSError:\n self.logger.exception(\"Executable {0}_acq not found. Image not taken\".format(mode))\n except IOError:\n self.logger.exception(\"File already exits. Image not taken.\")\n else:\n self.logger.info(\"Exposure {0} finished successfully.\".format(filename))\n self.seqnum_inc.emit(seqnum)\n subprocess.Popen(['ds9', '-mosaicimage', 'iraf', filename,\n '-zoom', 'to', 'fit', '-cmap', 'b'])\n\n ## Check if a stack of exposures of same type\n elif exptype in [\"Exposure Stack\", \"Dark Stack\", \"Bias Stack\"]:\n\n total = seqnum + imcount\n self.logger.info(\"Starting {0}s {1} stack.\".format(exptime, exptype))\n self.image_start.emit(imcount)\n\n try:\n for i in range(seqnum, total):\n self.logger.info(\"Starting image {0} of {1}.\".format(i+1-seqnum, imcount))\n filename = exposure.im_acq(mode, filepath, exptime, i, **kwargs)\n self.logger.info(\"Exposure {0} finished successfully.\".format(filename))\n self.image_taken.emit(i+1-seqnum)\n self.seqnum_inc.emit(i)\n except subprocess.CalledProcessError:\n self.logger.exception(\"Error in executable {0}_acq. Image not taken.\".format(mode))\n except OSError:\n self.logger.exception(\"Executable {0}_acq not found. Image not taken.\".format(mode))\n except IOError:\n self.logger.exception(\"File already exists. Image not taken.\")\n else:\n self.logger.info(\"Exposure stack finished successfully.\")\n subprocess.Popen(['ds9', '-mosaicimage', 'iraf', filename, '-zoom', 'to', 'fit', '-cmap', 'b'])\n \n ## Check if a series of exposures of increase exposure time\n elif exptype in [\"Exposure Series\", \"Dark Series\"]:\n\n ## Parameter checks\n if mintime > maxtime:\n self.logger.warning(\"Minimum time must be less than Maximum time. Series not started.\")\n return\n elif step <= 0:\n self.logger.warning(\"Time step must be greater than 0. Series not started.\")\n return\n\n ## Construct array of exposure times\n t = mintime\n time_array = []\n while t <= maxtime:\n time_array.append(t)\n t += step\n \n ## Perform series\n self.logger.info(\"Starting {0} series with mintime {1}, maxtime {2}, and step {3}.\".format(exptype, mintime, maxtime, step))\n self.image_start.emit(len(time_array))\n \n try:\n for i, time in enumerate(time_array):\n self.logger.info(\"Starting {0}s {1} image.\".format(time, mode))\n filename = exposure.im_acq(mode, filepath, time, seqnum, **kwargs)\n self.logger.info(\"Exposure {0} finished successfully.\".format(filename))\n self.image_taken.emit(i+1)\n except subprocess.CalledProcessError:\n self.logger.exception(\"Error in executable {0}_acq. Image not taken.\".format(mode))\n except OSError:\n self.logger.exception(\"Executable {0}_acq not found. Image not taken.\".format(mode))\n except IOError:\n self.logger.exception(\"File already exists. Image not taken.\")\n else:\n self.logger.info(\"Exposure series finished successfully.\")\n subprocess.Popen(['ds9', '-mosaicimage', 'iraf', filename, '-zoom', 'to', 'fit', '-cmap', 'b'])\n self.seqnum_inc.emit(seqnum)", "def problem2():\n\n data = loaddata(\"data/bayerdata.npy\")\n r, g, b = separatechannels(data)\n\n img = assembleimage(r, g, b)\n display_image(img)\n\n img_interpolated = interpolate(r, g, b)\n display_image(img_interpolated)", "def upscale_main_side(output_img):\n #Fixing pixel which y % 2 != 1\n outputx, outputy = output_img.size\n for oy in range(0, outputy-1, 2):\n for ox in range(1, outputx, 2):\n pixel1 = output_img.getpixel((ox-1, oy))\n p1 = pixel1[0]\n p2 = pixel1[1]\n p3 = pixel1[2]\n if ox == outputx-1 :\n output_img.putpixel((ox, oy), (p1, p2, p3))\n else:\n pixel2 = output_img.getpixel((ox+1, oy))\n P1 = pixel2[0]\n P2 = pixel2[1]\n P3 = pixel2[2]\n output_img.putpixel((ox, oy), (int((p1+P1)/2), int((p2+P2)/2), int((p3+P3)/2)))\n #print(f'pixel:{ox, oy} output:{output_img.getpixel((ox,oy))}')\n #Fixing pixel which y % 2 == 1\n for oy in range(1, outputy-1, 2):\n for ox in range(0, outputx):\n pixel1 = output_img.getpixel((ox, oy-1))\n p1 = pixel1[0]\n p2 = pixel1[1]\n p3 = pixel1[2]\n if oy == outputx:\n output_img.putpixel((ox, oy), (p1, p2, p3))\n break\n else:\n pixel2 = output_img.getpixel((ox, oy+1))\n P1 = pixel2[0]\n P2 = pixel2[1]\n P3 = pixel2[2]\n output_img.putpixel((ox, oy), (int((p1+P1)/2), int((p2+P2)/2), int((p3+P3)/2)))\n #print(f'pixel:{ox, oy} output:{output_img.getpixel((ox,oy))}')\n #Save image \n result_img = output_path+'/output.'+working_img.format.lower()\n output_img.save(result_img)\n print('Upscale finished..!')\n output_img.show()", "def run(self):\n #calculate platescale of first input image\n try:\n det = np.linalg.det(wcs.WCS(self.datain[0].header).wcs.cd)\n pscale = np.sqrt(np.abs(det))*3600.\n except:\n try:\n det = np.linalg.det(wcs.WCS(self.datain[0].header).wcs.pc)\n pscale = np.sqrt(np.abs(det))*3600.\n except:\n pscale = self.datain[0].header['PIXSCAL']\n #filtering out images which are too far away from the others\n #passing images added to a list of (image, WCS) tuples\n '''\n image_centers = []\n for f in self.datain:\n image_centers.append((f.header['CRVAL1'], f.header['CRVAL2']))\n filtered_datain = []\n dist_list = [[[0]*(len(image_centers)-1)]*len(image_centers)]\n for i in range(len(image_centers)):\n for j in range(len(image_centers)-1):\n dist_list[i][j+1] = np.sqrt((image_)**2+()**2)\n '''\n #calculations necessary for updating wcs information\n px = []\n py = []\n \n #in order to avoid NaN interactions, creating weight map\n weights=[]\n for f in self.datain:\n weights.append((np.where(np.isnan(f.image) == True, 0, 1)))\n \n for f in self.datain:\n px.extend(wcs.WCS(f.header).calc_footprint()[:,0])\n py.extend(wcs.WCS(f.header).calc_footprint()[:,1])\n x0 = (max(px)+min(px))/2.\n y0 = (max(py)+min(py))/2.\n sx = (max(px)-min(px))*np.cos(y0/180*np.pi) # arcsec\n sy = (max(py)-min(py)) # arcsec\n size = (sx*3600+self.getarg('pad')*2, sy*3600+self.getarg('pad')*2)\n xpix = size[0]//pscale\n ypix = size[1]//pscale\n cdelt = [pscale/3600.]*2\n \n #create self.dataout and give it a copy of an input's header\n self.dataout = DataFits(config = self.config)\n self.dataout.header = self.datain[0].header.copy()\n \n #update header wcs information\n self.log.info('Creating new WCS header')\n \n self.dataout.header['CRPIX1'] = xpix/2\n self.dataout.header['CRPIX2'] = ypix/2\n self.dataout.header['CRVAL1'] = x0\n self.dataout.header['CRVAL2'] = y0\n self.dataout.header['CD1_1'] = -cdelt[0]\n self.dataout.header['CD1_2'] = self.dataout.header['CD2_1'] = 0.\n self.dataout.header['CD2_2'] = cdelt[1]\n self.dataout.header['NAXIS1'] = int(xpix)\n self.dataout.header['NAXIS2'] = int(ypix)\n self.dataout.header['CTYPE1'] = 'RA---TAN-SIP'\n self.dataout.header['CTYPE2'] = 'DEC--TAN-SIP'\n self.dataout.header['RADESYS'] = 'ICRS'\n self.dataout.header['EQUINOX'] = 2000\n self.dataout.header['LATPOLE'] = self.datain[0].header['CRVAL2']\n self.dataout.header['LONPOLE'] = 180\n self.dataout.header['PIXASEC'] = pscale\n \n theta_rad = np.deg2rad(self.getarg('outangle'))\n rot_matrix = np.array([[np.cos(theta_rad), -np.sin(theta_rad)], \n [np.sin(theta_rad), np.cos(theta_rad)]])\n rot_cd = np.dot(rot_matrix, np.array([[self.dataout.header['CD1_1'], 0.],[0., self.dataout.header['CD2_2']]]))\n for i in [0,1]:\n for j in [0,1]:\n self.dataout.header['CD{0:d}_{1:d}'.format(i+1, j+1)] = rot_cd[i,j]\n \n #check drizzle arguments\n if self.getarg('kernel') == 'smoothing':\n kernel = 'lanczos3'\n elif self.getarg('kernel') in ['square', 'point', 'gaussian', 'tophat']:\n kernel = self.getarg('kernel')\n else:\n self.log.error('Kernel name not recognized, using default')\n kernel = 'square'\n if self.getarg('drizzleweights') == 'uniform':\n driz_wt = ''\n elif self.getarg('drizzleweights') in ['exptime', 'expsq']:\n driz_wt = self.getarg('drizzleweights')\n else:\n self.log.error('Drizzle weighting not recognized, using default')\n driz_wt = ''\n \n #create drizzle object and add input images\n fullwcs = wcs.WCS(self.dataout.header)\n self.log.info('Starting drizzle')\n driz = drz.Drizzle(outwcs = fullwcs, pixfrac=self.getarg('pixfrac'), \\\n kernel=kernel, fillval='10000', wt_scl=driz_wt)\n for i,f in enumerate(self.datain):\n self.log.info('Adding %s to drizzle stack' % f.filename)\n driz.add_image(f.imgdata[0], wcs.WCS(f.header), inwht=weights[i])\n \n try:\n fillval=float(self.getarg('fillval'))\n except:\n fillval=np.nan\n self.log.error('Fillvalue not recognized or missing, using default')\n \n #creates output fits file from drizzle output\n self.dataout.imageset(np.where(driz.outsci == 10000, fillval, driz.outsci))\n self.dataout.imageset(driz.outwht,'OutWeight', self.dataout.header)\n self.dataout.filename = self.datain[0].filename\n\n #add history\n self.dataout.setheadval('HISTORY','Coadd: %d files combined with %s kernel, pixfrac %f at %f times resolution' \\\n % (len(self.datain), kernel, self.getarg('pixfrac'), self.getarg('resolution')))", "def run(self, src, dest):\n self.logger.debug('Start the blurring. src=\"%s\", dest=\"%s\"', src, dest)\n create_frames(src, self.work['frames'])\n data = self.analyze()\n\n for frame_no, frame in enumerate(self.work.files('frames')):\n basename = os.path.basename(frame)\n areas = []\n for values in data:\n for sector in values['sectors']:\n if frame_no in range(sector[0]-self.offset, sector[1]+self.offset):\n areas.append(values['area'])\n break\n if areas:\n self.blur.blur_image(frame, areas, os.path.join(self.work['cleaned'], basename))\n else:\n copyfile(frame, os.path.join(self.work['cleaned'], basename))\n save_frames(self.work['cleaned'], dest)", "def camera(self):\n self.spectrum = self.spectrum", "def degrade_image(im, psf, downsample, shift_range):\n\n shift = np.random.randint(shift_range[0], shift_range[1], (1, 2))[0]\n\n # Add shift\n im = fourier_shift(np.fft.fftn(im), shift)\n im = np.fft.ifftn(im).real\n\n # Blur and downsample\n im = convolve2d(im, psf)\n im = downscale_local_mean(im, (downsample, downsample))\n\n return im", "def testSmallSrc(self):\n fromWcs = afwGeom.makeSkyWcs(\n crpix=lsst.geom.Point2D(0, 0),\n crval=lsst.geom.SpherePoint(359, 0, lsst.geom.degrees),\n cdMatrix=afwGeom.makeCdMatrix(scale=1.0e-8*lsst.geom.degrees),\n )\n fromExp = afwImage.ExposureF(afwImage.MaskedImageF(1, 1), fromWcs)\n\n toWcs = afwGeom.makeSkyWcs(\n crpix=lsst.geom.Point2D(0, 0),\n crval=lsst.geom.SpherePoint(358, 0, lsst.geom.degrees),\n cdMatrix=afwGeom.makeCdMatrix(scale=1.1e-8*lsst.geom.degrees),\n )\n toExp = afwImage.ExposureF(afwImage.MaskedImageF(10, 10), toWcs)\n\n warpControl = afwMath.WarpingControl(\"lanczos3\")\n # if a bug described in ticket #2441 is present, this will raise an\n # exception:\n numGoodPix = afwMath.warpExposure(toExp, fromExp, warpControl)\n self.assertEqual(numGoodPix, 0)\n self.assertTrue(np.all(np.isnan(toExp.image.array)))\n self.assertTrue(np.all(np.isinf(toExp.variance.array)))\n noDataBitMask = afwImage.Mask.getPlaneBitMask(\"NO_DATA\")\n self.assertTrue(np.all(toExp.mask.array == noDataBitMask))", "def _image(self):\n print(\"imaging\")\n self.images.append(self.device_control.image())\n yield", "def dither_img(img,num_pxl,bw_threshold=128):\n\n img_copy = np.copy(img)\n img_copy[img_copy >= bw_threshold] = 255\n img_copy[img_copy < bw_threshold] = 0\n\n h = img_copy.shape[0]\n w = img_copy.shape[1]\n\n coordinates_0 = np.where(img_copy == 0)\n coordinates_0 = tuple(zip(coordinates_0[0],coordinates_0[1]))\n\n coordinates_255 = np.where(img_copy == 255)\n coordinates_255 = tuple(zip(coordinates_255[0],coordinates_255[1]))\n\n if num_pxl == 0:\n return img_copy\n\n selected_coordinates_0 = random.sample(coordinates_0,min(num_pxl,min(len(coordinates_0),len(coordinates_255))))\n selected_coordinates_255 = random.sample(coordinates_255,min(num_pxl,min(len(coordinates_0),len(coordinates_255))))\n\n selected_coordinates_0 = tuple(zip(*selected_coordinates_0))\n selected_coordinates_255 = tuple(zip(*selected_coordinates_255))\n\n img_copy[selected_coordinates_0[0],selected_coordinates_0[1]] = 255\n img_copy[selected_coordinates_255[0],selected_coordinates_255[1]] = 0\n\n return img_copy" ]
[ "0.60497373", "0.5954956", "0.588953", "0.58547735", "0.5736053", "0.5712237", "0.56600004", "0.5645142", "0.56387717", "0.5601205", "0.56006426", "0.5587478", "0.5586383", "0.5524406", "0.55159074", "0.5505001", "0.5496937", "0.5476632", "0.54593825", "0.5451838", "0.54492474", "0.5445616", "0.5441269", "0.54300416", "0.5424851", "0.54109013", "0.5403154", "0.5366931", "0.53665185", "0.53582674", "0.5353076", "0.5335535", "0.53159547", "0.53137016", "0.5298819", "0.52911645", "0.5286996", "0.52816683", "0.5265932", "0.52646303", "0.524895", "0.52478915", "0.5239897", "0.5229337", "0.52285933", "0.5226021", "0.5219829", "0.52040786", "0.52027154", "0.5186683", "0.5183898", "0.51808286", "0.51775485", "0.51773673", "0.51721567", "0.5171474", "0.5170893", "0.5163017", "0.51604587", "0.5159254", "0.51551074", "0.51451784", "0.5144641", "0.5138974", "0.5132032", "0.51265526", "0.5120042", "0.51152587", "0.5106123", "0.5103409", "0.5101557", "0.5097954", "0.50907975", "0.50907815", "0.50862676", "0.50842017", "0.50824827", "0.50785106", "0.507828", "0.5078229", "0.5068843", "0.5066857", "0.5063911", "0.5063681", "0.50634927", "0.5062824", "0.5062676", "0.5062385", "0.5060525", "0.5053911", "0.50452757", "0.50397307", "0.50318706", "0.5025127", "0.502506", "0.5024688", "0.50231785", "0.50212985", "0.5015748", "0.50137407", "0.5010108" ]
0.0
-1
Tests if expTime is set to 500s after a double length exposure.
def test_do_apogee_science_500s_after_1000s_cart7(self): sopTester.updateModel('platedb', TestHelper.platedbState['apogeeLead1000sCart7']) self._update_cart(7, 'APOGEE') cmdState = self.actorState.doApogeeScience cmdState.reinitialize(self.cmd) self.assertEqual(cmdState.expTime, 1000) self.assertEqual(cmdState.keywords['expTime'], 1000) sopTester.updateModel('platedb', TestHelper.platedbState['apogeeLeadCart7']) self._update_cart(7, 'APOGEE') cmdState = self.actorState.doApogeeScience cmdState.reinitialize(self.cmd) self.assertEqual(cmdState.expTime, 500) self.assertEqual(cmdState.keywords['expTime'], 500)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def exp(self, exposure_time):\n print(f\"exp: {exposure_time}\")\n self.device_control.exposure = exposure_time\n yield", "def exposuretime(self) -> ErrorValue:\n return ErrorValue(self._data['ExpTime'], self._data.setdefault('ExpTimeError',0.0))", "def exptime(et=0.02):\n if et < 0.02:\n et = 0.02\n logger.error('Exposure time less than 0.02 seconds specified, using 0.02.')\n print camera.exptime(et)\n camera.status.update()", "def setExposureTime(self, cmd, expTime):\n\n pass", "def setExpTime(self, exptime):\n with self.lock:\n self.exptime = exptime", "def time_to_fire(self):\n return(self.frequency < (time.time() - self.last_fired))", "def _validate_exp(self):\n now = timegm(datetime.utcnow().utctimetuple())\n\n if self.authtype == 'jwt':\n if not hasattr(self, 'token'):\n # I pass here only one time, when I request a token\n self.token = None\n return True\n payload = jwt.decode(self.token, verify=False)\n try:\n exp = int(payload['exp'])\n except ValueError:\n raise jwt.DecodeError('Expiration Time claim (exp) must be an'\n ' integer.')\n\n if exp < now:\n # raise jwt.ExpiredSignatureError('Signature has expired')\n return False\n else:\n self.s.auth = JWTAuth(self.token)\n return True\n else:\n return True", "def validate_exp(self, now, leeway):\n if 'exp' in self:\n exp = self['exp']\n if not _validate_numeric_time(exp):\n raise InvalidClaimError('exp')\n if exp < (now - leeway):\n raise ExpiredTokenError()", "def is_time(self) -> bool:\n return self.times > 1", "def set_exptime(self, exptime):\n exptime = u.Quantity(exptime, unit=u.s)\n if not np.isscalar(exptime):\n raise TypeError('Exposure time must be an integer, a float or an Astropy Unit object')\n if exptime.value <= 0:\n raise ValueError('Exposure time can not be zero or negative')\n self.exptime = exptime.value\n try:\n if self.cycle < self.exptime:\n warnings.warn('Exposure time ({:0.4f} seconds) higher than Cycle time ({:0.4f} seconds)'.\n format(self.exptime, self.cycle))\n except:\n pass", "def check_time():\n times = get_times()\n time_difference = abs((times['local'] - times['target']).total_seconds())\n return time_difference < post_time_tol_seconds", "def time_is_out(self):\n return self.get_simulation_time() > self.config.max_time", "def verify_event_timing(self, event, item):\n return True", "def __exp2_changed_callback(self, ndx):\n if self.recording_sequence:\n self.record_sequence()\n self.exp2_radio.setChecked(True)\n self.exp2_ifi_select.setCurrentIndex(0)\n self.camera.set_exposure(ndx, 0)\n self.rec_seq_button.setEnabled(False)\n et = np.int(np.round(self.camera.actual_exposure_time_ms))\n self.write_to_log('Exposure %d ms' % et)", "def _badness(self, time):\n return (time - self.expected_time)**2", "def check_lightcurve_time(light_curve, exposure_time, frame_time):\n logger = logging.getLogger('mirage.seed_image.tso.check_lightcurve_time')\n\n times = copy.deepcopy(light_curve[\"times\"].value)\n fluxes = copy.deepcopy(light_curve[\"fluxes\"].value)\n time_units = light_curve[\"times\"].unit\n flux_units = light_curve[\"fluxes\"].unit\n adjusted = False\n\n # Remove elements where time < 0.\n if np.min(times) < 0.:\n positive_times = times >= 0.\n times = times[positive_times]\n fluxes = fluxes[positive_times]\n adjusted = True\n\n # If the times begin at values significantly > 0,\n # then add entries to bring the start back to time = 0\n if np.min(times) > 0.:\n logger.info((\"Lightcurve time values do not start at zero. Prepending an entry with time=0 \"\n \"and flux = 1.\"))\n times = np.insert(times, 0, 0.)\n fluxes = np.insert(fluxes, 0, 1.)\n adjusted = True\n\n # If the ending time is less than the exposure's total\n # observation time, then add entries with flux=1\n if np.max(times) < exposure_time:\n logger.info((\"Lightcurve time values extend only to {} seconds. This is not long enough \"\n \"to cover the entire exposure time of {} seconds. Extending to cover the full \"\n \"exposure time with flux = 1.\".format(np.max(times), exposure_time)))\n times = np.append(times, exposure_time + 5 * frame_time)\n fluxes = np.append(fluxes, 1.)\n adjusted = True\n\n if adjusted:\n light_curve[\"times\"] = times * time_units\n light_curve[\"fluxes\"] = fluxes * flux_units\n\n return light_curve", "def is_timeout(self) -> bool:\n return self.runtime.timeout <= 0.0", "def after(self, time2):\r\n return self.to_seconds() > time2.to_seconds()", "def __exp1_changed_callback(self, ndx):\n if self.recording_sequence:\n self.record_sequence()\n self.exp1_radio.setChecked(True)\n self.exp1_ifi_select.setCurrentIndex(0)\n self.camera.set_exposure(ndx, 0)\n self.rec_seq_button.setEnabled(False)\n et = np.int(np.round(self.camera.actual_exposure_time_ms))\n self.write_to_log('Exposure %d ms' % et)", "def check_attack(self):\n now = time.time() * 1000\n if self.prev_time is None:\n return True\n else:\n next_time = self.prev_time + self.get_recharge\n if now >= next_time:\n return True\n else:\n return False", "def is_dropped(upd_time, time_before):\n if (upd_time - time_before) / float(Config.BOUNDARY) >= 1.5:\n return True\n return False", "def is_expired(self):\n if not self.is_signed:\n return True\n return int(self._token_claims.get(self.__class__.exp_claim, 0)) < int(\n time.time()\n )", "def exceeded(self):\r\n return int(time.time()) - self.start_time >= self.length", "def epsilon(self, length: int, time: int) -> float:\n return (self.beta ** (1.0 / (length / 2))) ** time", "def timedout(self):\n\n return self.duration() > self.check.timeout", "def evaluate(self, time) -> float:\n ...", "def haveTime(self):\n if self.timeout is None:\n return True\n return time.time() <= self._stop", "def test_EXPIRY_DURATION(self):\n self.assertIsInstance(constants.EXPIRY_DURATION, int,\n \"constants.EXPIRY_DURATION must be an integer.\")", "def after(self, time2):\n return self.to_seconds() > time2.to_seconds()", "def exposuretime(self):\n _, = self.exposuretimes\n return _", "def is_expired(self):\n return int(time.time()) - self.time > self.interval", "def _verify_timeout(self, doc):\n expires = doc['expires']\n if expires == 0:\n return False\n if expires >= self._time():\n return False\n return True", "def spike_duration(self, spike_duration):\n\n self._spike_duration = spike_duration", "def evaluateTime(self, *args):\n return _osgAnimation.Motion_evaluateTime(self, *args)", "def overtime(self):\n if self._overtime != '':\n return True\n return False", "def check_time(self, m, s):\r\n if m*60 + s > 5400:\r\n self.unit.s = 0\r\n self.unit.m = 90\r\n return\r\n if s < 0:\r\n s = 0\r\n if m < 0:\r\n m = 0\r\n self.unit.s = s\r\n self.unit.m = m", "def test_expand_data_1500_correct_len():\n # TODO: should it round up to allow last snippet of time?\n exp = expand_data(log, 1500)\n assert len(exp) == (log['end'].iloc[-1] / 1500)", "def test_timestep(self):\n class Mock(object):\n def __init__(self):\n self.t = 0.0\n self.dt = None\n\n def evolve(self1, t, dt):\n if self1.dt is not None:\n self.assertAlmostEqual(self1.dt, dt)\n else:\n self1.dt = dt\n\n self.assertAlmostEqual(self1.t, t)\n\n self1.t += self1.dt\n\n t_max = 10.0\n dt = 0.2\n\n G = Mock()\n simulation.Simulation(G, dt=dt).run(t_max)\n self.assertAlmostEqual(G.dt, dt)", "def check_observation(self):\n last_obs_time = self.get_obs_time()\n last_obs_time_tz = last_obs_time.astimezone(\n ZoneInfo(self._config['db']['DisplayTimezone']))\n time_diff = datetime.now(tz=last_obs_time.tzinfo) - last_obs_time\n\n if int(time_diff.total_seconds() / 60) > \\\n int(self._config['observation']['Timeout']):\n if self._state['email_sent'] == 'False':\n if send_email(self._config['email'],\n 'env-logger: observation inactivity warning',\n 'No observations have been received in the env-logger '\n 'backend after {} (timeout {} minutes). Please check for '\n 'possible problems.'.format(last_obs_time_tz.isoformat(),\n self._config['observation']['Timeout'])):\n self._state['email_sent'] = 'True'\n else:\n self._state['email_sent'] = 'False'\n elif self._state['email_sent'] == 'True':\n send_email(self._config['email'],\n 'env-logger: observation received',\n 'An observation has been received at '\n f'{last_obs_time_tz.isoformat()}.')\n self._state['email_sent'] = 'False'", "def excitationPulse(self, time, power):\n t = time * ns + self.step # Should center at one step before 0\n if self.step <= 200 * ps: # resolution warrants modelling the pulse\n width = 200.0 * ps # self.step\n\n if t < width * 10: # Only evaulate when the value is significant\n amp = power / (width * sqrt_2pi) # normalized amplitude\n value = amp * np.exp(-1.0 * (t) * (t) / (2 * width * width))\n value = value\n else:\n value = 0.0\n else: # impulsive limit, just dump all the excitons in at t=0\n # if time >= 0 - self.step/2 and time < 0 + self.step/2:\n if t > -0.5 * self.step and t <= 0.5 * self.step:\n value = power / self.step\n else:\n value = 0.0\n return (value*self.step)", "def validity_by_time(self):\n conn = psycopg2.connect(self.conn)\n permissable_maximum_age_secs = 600 # 600s = 10mins\n query = \"SELECT time FROM steve_sense_sensor_logs ORDER BY time DESC LIMIT 1\"\n cur = conn.cursor()\n cur.execute(query)\n queryResult = cur.fetchall()\n age_seconds = (datetime.datetime.now(\n timezone.utc) - queryResult[0][0]).seconds\n cur.close()\n conn.close()\n if age_seconds > permissable_maximum_age_secs:\n print(\"Check Sensor, last sample is \"+str(age_seconds)+\" old\")\n return False\n else:\n return True", "def _matchTime(self, time: float):\n return self._comparator['Time'] < time", "def tune_exposure_time(camera, target, initial_exptime, min_exptime=0, max_exptime=None,\n max_steps=5, tolerance=0.1, cutout_size=256, bias=None, **kwargs):\n camera.logger.info(f\"Tuning exposure time for {camera}.\")\n\n images_dir = camera.get_config(\"directories.images\", None)\n if images_dir:\n images_dir = os.path.join(images_dir, \"temp\")\n os.makedirs(images_dir, exist_ok=True)\n\n # Parse quantities\n initial_exptime = get_quantity_value(initial_exptime, \"second\") * u.second\n\n if min_exptime is not None:\n min_exptime = get_quantity_value(min_exptime, \"second\") * u.second\n if max_exptime is not None:\n max_exptime = get_quantity_value(max_exptime, \"second\") * u.second\n\n try:\n bit_depth = camera.bit_depth.to_value(\"bit\")\n except NotImplementedError:\n bit_depth = 16\n\n saturated_counts = 2 ** bit_depth\n\n prefix = images_dir if images_dir is None else images_dir + \"/\"\n with tempfile.NamedTemporaryFile(suffix=\".fits\", prefix=prefix, delete=False) as tf:\n\n exptime = initial_exptime\n\n for step in range(max_steps):\n\n # Check if exposure time is within valid range\n if (exptime == max_exptime) or (exptime == min_exptime):\n break\n\n # Get an image\n cutout = camera.get_cutout(exptime, tf.name, cutout_size, keep_file=False, **kwargs)\n cutout = cutout.astype(\"float32\")\n if bias is not None:\n cutout -= bias\n\n # Measure average counts\n normalised_counts = np.median(cutout) / saturated_counts\n\n camera.logger.debug(f\"Normalised counts for {exptime} exposure on {camera}:\"\n f\" {normalised_counts}\")\n\n # Check if tolerance condition is met\n if tolerance:\n if abs(normalised_counts - target) < tolerance:\n break\n\n # Update exposure time\n exptime = exptime * target / normalised_counts\n if max_exptime is not None:\n exptime = min(exptime, max_exptime)\n if min_exptime is not None:\n exptime = max(exptime, min_exptime)\n\n camera.logger.info(f\"Tuned exposure time for {camera}: {exptime}\")\n\n return exptime", "def _check_maxexp(np_type, maxexp):\n dt = np.dtype(np_type)\n np_type = dt.type\n two = np_type(2).reshape((1,)) # to avoid upcasting\n return (np.isfinite(two ** (maxexp - 1)) and\n not np.isfinite(two ** maxexp))", "def is_expired(self):\n\n if self._lifetime is not None and self._lifetime > 0:\n # 300 seconds waite is the tolerance !\n # The unit of lifetime is millisecond\n if (time.time() - self._create_date) * 1000 > self._lifetime + 300000:\n return True\n\n return False", "def isCalibrationExpired(self):\n return (getI1ProTimeUntilCalibrationExpire() < 200)", "def find_exposure_time(cam,targetIntensity=100,margin=5):\n from numpy import mean\n\n if targetIntensity < 0 or targetIntensity > 255:\n print(\"Invalid target intensity\")\n return 1\n minExposure = 0.01 # Smallest value in ms\n maxExposure = 80\n counter = 0\n\n # Calculate exposures at the different end\n Image = cam.grab_image(timeout='1s', copy=True,\n exposure_time=number_to_millisecond(minExposure))\n minIntensity = mean(Image)\n\n Image = cam.grab_image(timeout='1s', copy=True,\n exposure_time=number_to_millisecond(maxExposure))\n maxIntensity = mean(Image)\n\n midIntensity = 1\n while midIntensity < (targetIntensity - margin) or\\\n midIntensity > (targetIntensity+margin) and counter < 20:\n # Set exposure, take a picture and check how good it was\n counter = counter + 1\n\n midExposure = (maxExposure + minExposure) / 2\n Image = cam.grab_image(timeout='1s',\n copy=True,\n exposure_time=number_to_millisecond(midExposure))\n midIntensity = mean(Image)\n\n if midIntensity > targetIntensity: # Exposure time too short\n maxExposure = midExposure\n # maxIntensity = midIntensity\n else: # Exposure time too long\n minExposure = midExposure\n # minIntensity = midIntensity\n if counter == 100:\n print(\"WARNING: Find exposure function ran max number of iterations!\\\n No really suitable exposure setting found\")\n # Update the exposure time of the camera and return the target exposure\n cam.set_defaults(exposure_time=number_to_millisecond(midExposure))\n return midExposure#number_to_millisecond(midExposure)", "def GAME_TIME_ADVANCE(dt):", "def checkGracePeriodDuration(self):\n if (not self.isInGraceInvulnerability):\n return\n if (time.time()-self.gracePeriodStartTime > shipDamagedInvulerabilityGracePeriodLength):\n #if the grace period is over...\n self.disableGracePeriod()", "def valid(t):\n return float(t) > time.time()", "def test_upload_time(self):\n time_val = 1345427105 # just a realistic time val, ~ 8/19/12 6:45pm\n self.assertIsNone(self.model.last_uploaded(),\n \"Did not return None even though upload entry doesn't exist\")\n self.model.set_last_uploaded(time_val)\n self.assertEquals(time_val, self.model.last_uploaded(),\n \"Created initial val, but appears incorect\")\n self.model.set_last_uploaded(time_val + 10)\n self.assertEquals(time_val + 10, self.model.last_uploaded(),\n \"Updated value, but appears incorrect\")", "def important_event(time: int) -> bool:\n last_event = get_events(True)[0]\n try:\n time_event = int(last_event.split('\\n')[0].strip(\"'\"))\n except ValueError:\n time_event = int(last_event.split('\\n')[-1].strip(\"'\"))\n if time - time_event < 60:\n return 'gol' in last_event or 'cartão' in last_event\n return False", "def __luredWakeupTime(self, suitId):\n return self.__suitIsLured(suitId) and \\\n self.currentlyLuredSuits[suitId][0] > 0 and \\\n random.randint(0, 99) < \\\n self.currentlyLuredSuits[suitId][2]", "def is_exploring(self, step):\n return np.random.rand() < self._epsilon(step)", "def T_elapsed(T_amount: BlockHeight) -> bool:\n T_now = getCurrentBlockHeight()\n return T_now - T_init >= T_amount", "def _check_pulse(self):\n timedelta = time.time() - self.heartbeat\n update_delay = float(1/self.qbpm.frequency)\n time_to_update = False\n if timedelta > update_delay:\n time_to_update = True\n self.heartbeat = time.time()\n return time_to_update", "def tick(self):\n if time.time() - self.t > self.speed:\n self.t = time.time()\n return True\n else:\n return False", "def _is_noise(self, t):\n\t\tfor ts in self.noise_itv:\n\t\t\tif len(ts) == 2:\n\t\t\t\ttime1 = ts[0]\n\t\t\t\ttime2 = ts[1]\n\t\t\t\tif time1 <= t and t <= time2:\n\t\t\t\t\treturn True\n\t\t\telse:\n\t\t\t\tend_time = ts[0]\n\t\t\t\tif t >= end_time:\n\t\t\t\t\treturn True\n\t\treturn False", "def extended(self):\n if self.expires_at:\n return self.expires_at - self.issued_at > timedelta(days=30)\n return False", "def check_convergency(self):\n if self.vars['ema_trace'][self.vars['step']] <= self.settings[\"emaSpeedTol\"]:\n return True\n else:\n return False", "def set_exposure(self, exposure):\n self.logger.info(f'Setting exposure to {exposure}')\n self._driver.ExposureTime.SetValue(exposure)", "def expired(self):\n return rospy.get_rostime() - self.start_time > self.duration", "def timestep(self):\n self.survive()\n self.move()\n self.set_survival_probability(self.vital_var)\n self.reproduce()\n # self.save_frame()\n if self.time % self.save_state_frequency == 0:\n self.save_state(self.state_file.format(self.time))\n if self.time % self.save_stats_frequency == 0:\n self.write_stats()\n # self.connected_component()\n self.time += 1\n\n if self.pop_size > 0:\n return True\n else:\n return False", "def mood_duration_millis():\n return max(200, int(state.normal(750, 500)))", "def is_chunk_timeout(self, chunk_timeout): \n return time() - self._chunk_timeout_time > chunk_timeout", "def delta_time(self):\n delta_time = time.time() - self.time\n if delta_time >= 1.0 / self.target_fps:\n self.time = time.time()\n # end if\n return delta_time", "def test_duration_property(self):\n recording_dt = 0.1\n recording_shape = {\n 'no_timesteps': 1000,\n 'no_sweeps': 10,\n 'no_channels': 4,\n }\n expected_duration = recording_shape['no_timesteps'] * recording_dt\n test_rec = rt.Recording(\n np.zeros(\n [\n recording_shape['no_channels'],\n recording_shape['no_timesteps'],\n recording_shape['no_sweeps'],\n ]\n ),\n dt=recording_dt,\n )\n npt.assert_almost_equal(\n test_rec.duration,\n expected_duration,\n err_msg='Expected {} for `duration` property; got {} instead.'.format(\n expected_duration, test_rec.duration\n ),\n )", "def expired(self) -> bool:\n if not self.use_wts:\n return False\n\n return datetime.now() > self.expire", "def discrete_time(self):\n return bool(self._ll_tree_sequence.get_discrete_time())", "def checkAtFinalTime():\n global final_time\n if final_time <= current_second:\n return True\n return False", "def delete_expired(self):\n check_time = datetime.now()\n if self.can_expire and self.duration:\n exp_times = deepcopy(self.exp_times)\n for key in exp_times:\n if exp_times[key] < check_time:\n self.delete(key)", "def is_limited(self) -> bool:\n return self.__times > ActionState.UNLIMITED", "def is_delayed(self) -> bool:\n if self.periodic and self.is_attached():\n return self.runtime.cost > self.runtime.tasklet.tick\n\n return False", "def __sleep(self):\n if self.sleep_duration > 0:\n gevent.sleep(self.sleep_duration)\n else:\n self.__warn(\n f\"The average tick took longer than the set tick duration of {self.__tick_duration}. \"\n f\"Program is to heavy to run real time\")", "def exp(self) -> t.Optional[int]:\n return self.claims.get(\"exp\")", "def rate_limiting(cls):\n this_click_time = time.time()\n time_to_last_click = None\n if cls.last_click_time:\n time_to_last_click = this_click_time - cls.last_click_time\n cls.last_click_time = this_click_time\n return time_to_last_click and time_to_last_click < 0.7", "def test_time_supp_length_matches_no_timesteps(self):\n for no_timesteps in [5, 578, 993, 300072]:\n for dt in [0.1, 0.5, 3.0]:\n test_rec = rt.Recording(np.empty([6, no_timesteps, 1]), dt=dt)\n self.assertEqual(\n len(test_rec.time_supp),\n no_timesteps,\n 'Expected length of time_supp {} to match no_timesteps of '\n 'input {}.'.format(len(test_rec.time_supp), no_timesteps),\n )", "def timed_out(self):\n return self.enabled and (\n self.elapsed >= self.timeout\n or math.isclose(self.elapsed, self.timeout)\n )", "def check_timer(self, wanted_time):\n if time.time() - self.start_time >= wanted_time:\n return True\n return False", "def setHoldTime(self, channel, time, unitCode=0):\n # Ensure compliance with level boundary conditions\n\n resp = self.XAPCommand('GHOLD', channel, time, unitCode=unitCode)\n return float(resp)", "def ComputeTimeReward(self, currentTime, expectedTime):\r\n return (expectedTime - currentTime) * 1 if currentTime < expectedTime else (expectedTime - currentTime) * 1", "def __checkTimer(self):\n if self.__endTime is None:\n raise AssertionError('The end time had not been set.')\n if time.time() > self.__endTime:\n self._logError('Maximum Run Time Reached !!')\n raise _MaxRunTimeReachedError('')", "def _timeout_request(start_time, request_timeout):\n return int(round(time() * 1000)) - start_time >= request_timeout", "def test_time(self):\n M = simulation.StateMonitor(self.G, 'v')\n sim = simulation.Simulation(self.G, M, dt=self.dt)\n sim.run(self.t_max)\n self.assertTrue(np.allclose(M.t, np.arange(0, self.t_max, self.dt)))", "def isTimeRemaining(self):\n if self.run_type.startswith('timed'):\n time_since_start = (time.time() - self.start_times['run'])\n remaining_time = self.max_time * 60 - time_since_start\n if remaining_time < 0:\n return False\n else:\n return True", "def calculateDelayedEventTime(self, raw_time):\n return self.event_time", "def is_over(self, time):\n over = (not self.enable_loop()) and (time >= self.get_duration())\n return over", "def is_equidistant(self) -> bool:\n if len(self.time) < 3:\n return True\n return len(self.time.to_series().diff().dropna().unique()) == 1", "def explores(self, explores):\n\n self._explores = explores", "def fine_tune(self, duration = 2):\n\n with sr.Microphone() as source:\n self.recorder.adjust_for_ambient_noise(source, duration=duration)\n return self.recorder.energy_threshold", "def test_do_apogee_science_1000s_after_500s_cart7(self):\n\n sopTester.updateModel('platedb', TestHelper.platedbState['apogeeLeadCart7'])\n self._update_cart(7, 'APOGEE')\n cmdState = self.actorState.doApogeeScience\n cmdState.reinitialize(self.cmd)\n self.assertEqual(cmdState.expTime, 500)\n self.assertEqual(cmdState.keywords['expTime'], 500)\n\n sopTester.updateModel('platedb', TestHelper.platedbState['apogeeLead1000sCart7'])\n self._update_cart(7, 'APOGEE')\n cmdState = self.actorState.doApogeeScience\n cmdState.reinitialize(self.cmd)\n self.assertEqual(cmdState.expTime, 1000)\n self.assertEqual(cmdState.keywords['expTime'], 1000)", "def __check_total_duration(self, duration: int) -> bool:\n available_duration = N_EIGHTHS_PER_MEASURE * (self.n_measures - 1)\n return self.current_time_in_eighths + duration <= available_duration", "def Exposure(self, time):\r\n IS_EXPOSURE_CMD_SET_EXPOSURE = 12 #there is a whole list to implement\r\n TIME = DOUBLE(time)\r\n nSizeOfParam = 8\r\n CALL('Exposure', self, \r\n UINT(IS_EXPOSURE_CMD_SET_EXPOSURE), \r\n byref(TIME), \r\n UINT(nSizeOfParam))", "def test_custom_time(self):\n interval = 0.5\n M = simulation.StateMonitor(self.G, 'v', interval=interval)\n sim = simulation.Simulation(self.G, M, dt=self.dt)\n sim.run(self.t_max)\n self.assertTrue(np.allclose(M.t, np.arange(0, self.t_max, interval)))", "def simulate_until(self, stop_time, dt=None):\n while self.t < stop_time:\n self.step(dt)", "def isExpired(self):\n return True/False", "def get_timings(self):\n exp=lib.is_Exposure_d8(self.hcam,7)*1E-3\n frame_rate=lib.is_SetFrameRate(self.hcam,0x8000)\n return self.AcqTimes(exp,1./frame_rate)", "def test_exp():\n x = np.linspace(-3,3,13)\n\n default_use_numexpr = accel_math._USE_NUMEXPR\n\n accel_math._USE_NUMEXPR = True\n r1 = accel_math._exp(x)\n\n accel_math._USE_NUMEXPR = False\n r2 = accel_math._exp(x)\n\n np.testing.assert_almost_equal(r1,r2)\n\n accel_math._USE_NUMEXPR = default_use_numexpr", "def guard_time(self):\n return uniform(0, self.max_guard_time)", "def _about_to_expire(self, secret: Secret) -> bool:\n return secret.is_expired(datetime.now(UTC) + self.expiry_margin)" ]
[ "0.6508764", "0.6255665", "0.62178487", "0.615677", "0.57449126", "0.5718129", "0.562681", "0.560696", "0.5600589", "0.5502671", "0.54754275", "0.54733014", "0.5451332", "0.5331526", "0.529389", "0.52561027", "0.5224857", "0.5195893", "0.5194161", "0.51847905", "0.51787597", "0.51708823", "0.5164096", "0.516151", "0.51563543", "0.5128906", "0.5110004", "0.5107061", "0.5092745", "0.50908405", "0.50787985", "0.50677943", "0.506563", "0.5057097", "0.5045766", "0.5039357", "0.50273323", "0.50153506", "0.49983722", "0.49860993", "0.49858907", "0.49657658", "0.49502572", "0.49489132", "0.49486795", "0.49469966", "0.49450952", "0.4942598", "0.49225974", "0.4922355", "0.4920734", "0.4913474", "0.49049914", "0.490453", "0.49020043", "0.4900217", "0.48997605", "0.48916352", "0.48900992", "0.48845503", "0.48769727", "0.48690078", "0.4865471", "0.48605007", "0.48499867", "0.48462734", "0.48350704", "0.4830496", "0.48224762", "0.48178232", "0.48173565", "0.48164526", "0.4816118", "0.48138097", "0.47986743", "0.4787773", "0.47682443", "0.47654238", "0.47588697", "0.47537604", "0.475016", "0.47447678", "0.4740874", "0.4730716", "0.47162125", "0.47124946", "0.4702591", "0.47003716", "0.4698047", "0.46947765", "0.46946338", "0.4694088", "0.4692679", "0.46907756", "0.46888813", "0.4686828", "0.4686386", "0.46837866", "0.46777448", "0.4677685" ]
0.4705112
86
Tests if expTime is set to 1000s after a normal length exposure.
def test_do_apogee_science_1000s_after_500s_cart7(self): sopTester.updateModel('platedb', TestHelper.platedbState['apogeeLeadCart7']) self._update_cart(7, 'APOGEE') cmdState = self.actorState.doApogeeScience cmdState.reinitialize(self.cmd) self.assertEqual(cmdState.expTime, 500) self.assertEqual(cmdState.keywords['expTime'], 500) sopTester.updateModel('platedb', TestHelper.platedbState['apogeeLead1000sCart7']) self._update_cart(7, 'APOGEE') cmdState = self.actorState.doApogeeScience cmdState.reinitialize(self.cmd) self.assertEqual(cmdState.expTime, 1000) self.assertEqual(cmdState.keywords['expTime'], 1000)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def exp(self, exposure_time):\n print(f\"exp: {exposure_time}\")\n self.device_control.exposure = exposure_time\n yield", "def setExposureTime(self, cmd, expTime):\n\n pass", "def exposuretime(self) -> ErrorValue:\n return ErrorValue(self._data['ExpTime'], self._data.setdefault('ExpTimeError',0.0))", "def exptime(et=0.02):\n if et < 0.02:\n et = 0.02\n logger.error('Exposure time less than 0.02 seconds specified, using 0.02.')\n print camera.exptime(et)\n camera.status.update()", "def time_to_fire(self):\n return(self.frequency < (time.time() - self.last_fired))", "def is_time(self) -> bool:\n return self.times > 1", "def _validate_exp(self):\n now = timegm(datetime.utcnow().utctimetuple())\n\n if self.authtype == 'jwt':\n if not hasattr(self, 'token'):\n # I pass here only one time, when I request a token\n self.token = None\n return True\n payload = jwt.decode(self.token, verify=False)\n try:\n exp = int(payload['exp'])\n except ValueError:\n raise jwt.DecodeError('Expiration Time claim (exp) must be an'\n ' integer.')\n\n if exp < now:\n # raise jwt.ExpiredSignatureError('Signature has expired')\n return False\n else:\n self.s.auth = JWTAuth(self.token)\n return True\n else:\n return True", "def check_time():\n times = get_times()\n time_difference = abs((times['local'] - times['target']).total_seconds())\n return time_difference < post_time_tol_seconds", "def setExpTime(self, exptime):\n with self.lock:\n self.exptime = exptime", "def validate_exp(self, now, leeway):\n if 'exp' in self:\n exp = self['exp']\n if not _validate_numeric_time(exp):\n raise InvalidClaimError('exp')\n if exp < (now - leeway):\n raise ExpiredTokenError()", "def is_expired(self):\n if not self.is_signed:\n return True\n return int(self._token_claims.get(self.__class__.exp_claim, 0)) < int(\n time.time()\n )", "def set_exptime(self, exptime):\n exptime = u.Quantity(exptime, unit=u.s)\n if not np.isscalar(exptime):\n raise TypeError('Exposure time must be an integer, a float or an Astropy Unit object')\n if exptime.value <= 0:\n raise ValueError('Exposure time can not be zero or negative')\n self.exptime = exptime.value\n try:\n if self.cycle < self.exptime:\n warnings.warn('Exposure time ({:0.4f} seconds) higher than Cycle time ({:0.4f} seconds)'.\n format(self.exptime, self.cycle))\n except:\n pass", "def exceeded(self):\r\n return int(time.time()) - self.start_time >= self.length", "def time_is_out(self):\n return self.get_simulation_time() > self.config.max_time", "def realtime(self):\n return self._time is None", "def valid(t):\n return float(t) > time.time()", "def overtime(self):\n if self._overtime != '':\n return True\n return False", "def check_attack(self):\n now = time.time() * 1000\n if self.prev_time is None:\n return True\n else:\n next_time = self.prev_time + self.get_recharge\n if now >= next_time:\n return True\n else:\n return False", "def checkAtFinalTime():\n global final_time\n if final_time <= current_second:\n return True\n return False", "def haveTime(self):\n if self.timeout is None:\n return True\n return time.time() <= self._stop", "def _verify_timeout(self, doc):\n expires = doc['expires']\n if expires == 0:\n return False\n if expires >= self._time():\n return False\n return True", "def verify_event_timing(self, event, item):\n return True", "def is_expired(self):\n return int(time.time()) - self.time > self.interval", "def _badness(self, time):\n return (time - self.expected_time)**2", "def check_lightcurve_time(light_curve, exposure_time, frame_time):\n logger = logging.getLogger('mirage.seed_image.tso.check_lightcurve_time')\n\n times = copy.deepcopy(light_curve[\"times\"].value)\n fluxes = copy.deepcopy(light_curve[\"fluxes\"].value)\n time_units = light_curve[\"times\"].unit\n flux_units = light_curve[\"fluxes\"].unit\n adjusted = False\n\n # Remove elements where time < 0.\n if np.min(times) < 0.:\n positive_times = times >= 0.\n times = times[positive_times]\n fluxes = fluxes[positive_times]\n adjusted = True\n\n # If the times begin at values significantly > 0,\n # then add entries to bring the start back to time = 0\n if np.min(times) > 0.:\n logger.info((\"Lightcurve time values do not start at zero. Prepending an entry with time=0 \"\n \"and flux = 1.\"))\n times = np.insert(times, 0, 0.)\n fluxes = np.insert(fluxes, 0, 1.)\n adjusted = True\n\n # If the ending time is less than the exposure's total\n # observation time, then add entries with flux=1\n if np.max(times) < exposure_time:\n logger.info((\"Lightcurve time values extend only to {} seconds. This is not long enough \"\n \"to cover the entire exposure time of {} seconds. Extending to cover the full \"\n \"exposure time with flux = 1.\".format(np.max(times), exposure_time)))\n times = np.append(times, exposure_time + 5 * frame_time)\n fluxes = np.append(fluxes, 1.)\n adjusted = True\n\n if adjusted:\n light_curve[\"times\"] = times * time_units\n light_curve[\"fluxes\"] = fluxes * flux_units\n\n return light_curve", "def is_timeout(self) -> bool:\n return self.runtime.timeout <= 0.0", "def expired(self):\n return rospy.get_rostime() - self.start_time > self.duration", "def _check_maxexp(np_type, maxexp):\n dt = np.dtype(np_type)\n np_type = dt.type\n two = np_type(2).reshape((1,)) # to avoid upcasting\n return (np.isfinite(two ** (maxexp - 1)) and\n not np.isfinite(two ** maxexp))", "def tick(self):\n if time.time() - self.t > self.speed:\n self.t = time.time()\n return True\n else:\n return False", "def evaluate(self, time) -> float:\n ...", "def is_real_time(self):\n return time.time() - self.timestamp < self._DEADLINE_SEC", "def is_expired(self):\n\n if self._lifetime is not None and self._lifetime > 0:\n # 300 seconds waite is the tolerance !\n # The unit of lifetime is millisecond\n if (time.time() - self._create_date) * 1000 > self._lifetime + 300000:\n return True\n\n return False", "def _matchTime(self, time: float):\n return self._comparator['Time'] < time", "def evaluateTime(self, *args):\n return _osgAnimation.Motion_evaluateTime(self, *args)", "def check_time(self, m, s):\r\n if m*60 + s > 5400:\r\n self.unit.s = 0\r\n self.unit.m = 90\r\n return\r\n if s < 0:\r\n s = 0\r\n if m < 0:\r\n m = 0\r\n self.unit.s = s\r\n self.unit.m = m", "def discrete_time(self):\n return bool(self._ll_tree_sequence.get_discrete_time())", "def after(self, time2):\r\n return self.to_seconds() > time2.to_seconds()", "def test_upload_time(self):\n time_val = 1345427105 # just a realistic time val, ~ 8/19/12 6:45pm\n self.assertIsNone(self.model.last_uploaded(),\n \"Did not return None even though upload entry doesn't exist\")\n self.model.set_last_uploaded(time_val)\n self.assertEquals(time_val, self.model.last_uploaded(),\n \"Created initial val, but appears incorect\")\n self.model.set_last_uploaded(time_val + 10)\n self.assertEquals(time_val + 10, self.model.last_uploaded(),\n \"Updated value, but appears incorrect\")", "def validity_by_time(self):\n conn = psycopg2.connect(self.conn)\n permissable_maximum_age_secs = 600 # 600s = 10mins\n query = \"SELECT time FROM steve_sense_sensor_logs ORDER BY time DESC LIMIT 1\"\n cur = conn.cursor()\n cur.execute(query)\n queryResult = cur.fetchall()\n age_seconds = (datetime.datetime.now(\n timezone.utc) - queryResult[0][0]).seconds\n cur.close()\n conn.close()\n if age_seconds > permissable_maximum_age_secs:\n print(\"Check Sensor, last sample is \"+str(age_seconds)+\" old\")\n return False\n else:\n return True", "def is_sampling_for_minmax(self):\n return (self._level_change_time is not None) and \\\n (get_time() - self._level_change_time) < self._duration_in_sec", "def check_convergency(self):\n if self.vars['ema_trace'][self.vars['step']] <= self.settings[\"emaSpeedTol\"]:\n return True\n else:\n return False", "def extended(self):\n if self.expires_at:\n return self.expires_at - self.issued_at > timedelta(days=30)\n return False", "def valid(self):\n delta = datetime.datetime.now() - self._last_access\n return ((delta.seconds < 7200) and not self.is_empty()) or \\\n (delta.seconds < 60)", "def __luredWakeupTime(self, suitId):\n return self.__suitIsLured(suitId) and \\\n self.currentlyLuredSuits[suitId][0] > 0 and \\\n random.randint(0, 99) < \\\n self.currentlyLuredSuits[suitId][2]", "def epsilon(self, length: int, time: int) -> float:\n return (self.beta ** (1.0 / (length / 2))) ** time", "def __exp1_changed_callback(self, ndx):\n if self.recording_sequence:\n self.record_sequence()\n self.exp1_radio.setChecked(True)\n self.exp1_ifi_select.setCurrentIndex(0)\n self.camera.set_exposure(ndx, 0)\n self.rec_seq_button.setEnabled(False)\n et = np.int(np.round(self.camera.actual_exposure_time_ms))\n self.write_to_log('Exposure %d ms' % et)", "def isPeriodTimerEffective(self):\n return 1", "def is_dropped(upd_time, time_before):\n if (upd_time - time_before) / float(Config.BOUNDARY) >= 1.5:\n return True\n return False", "def test_exp():\n x = np.linspace(-3,3,13)\n\n default_use_numexpr = accel_math._USE_NUMEXPR\n\n accel_math._USE_NUMEXPR = True\n r1 = accel_math._exp(x)\n\n accel_math._USE_NUMEXPR = False\n r2 = accel_math._exp(x)\n\n np.testing.assert_almost_equal(r1,r2)\n\n accel_math._USE_NUMEXPR = default_use_numexpr", "def isTimeRemaining(self):\n if self.run_type.startswith('timed'):\n time_since_start = (time.time() - self.start_times['run'])\n remaining_time = self.max_time * 60 - time_since_start\n if remaining_time < 0:\n return False\n else:\n return True", "def _isBasisDuration(self):\n invdur = 1/self.getDurationNoDot()\n if invdur % 1 > 0:\n return False\n else:\n return True", "def __exp2_changed_callback(self, ndx):\n if self.recording_sequence:\n self.record_sequence()\n self.exp2_radio.setChecked(True)\n self.exp2_ifi_select.setCurrentIndex(0)\n self.camera.set_exposure(ndx, 0)\n self.rec_seq_button.setEnabled(False)\n et = np.int(np.round(self.camera.actual_exposure_time_ms))\n self.write_to_log('Exposure %d ms' % et)", "def exp(self) -> t.Optional[int]:\n return self.claims.get(\"exp\")", "def after(self, time2):\n return self.to_seconds() > time2.to_seconds()", "def test_EXPIRY_DURATION(self):\n self.assertIsInstance(constants.EXPIRY_DURATION, int,\n \"constants.EXPIRY_DURATION must be an integer.\")", "def timedout(self):\n\n return self.duration() > self.check.timeout", "def check_timer(self, wanted_time):\n if time.time() - self.start_time >= wanted_time:\n return True\n return False", "def test_expand_data_1500_correct_len():\n # TODO: should it round up to allow last snippet of time?\n exp = expand_data(log, 1500)\n assert len(exp) == (log['end'].iloc[-1] / 1500)", "def isCalibrationExpired(self):\n return (getI1ProTimeUntilCalibrationExpire() < 200)", "def hit(self):\n if self._power == 0 :\n return False\n self._power -= 1\n return True", "def important_event(time: int) -> bool:\n last_event = get_events(True)[0]\n try:\n time_event = int(last_event.split('\\n')[0].strip(\"'\"))\n except ValueError:\n time_event = int(last_event.split('\\n')[-1].strip(\"'\"))\n if time - time_event < 60:\n return 'gol' in last_event or 'cartão' in last_event\n return False", "def check_expiration(self, cur_time):\n\n\t\ttime_limit = 1000\n\t\ttime_elapsed = cur_time - self.time_created\n\n\t\t# Erase cache after an arbitrary amount of time\n\t\tif time_elapsed > time_limit:\n\t\t\tself.cache_expiration()", "def is_equidistant(self) -> bool:\n if len(self.time) < 3:\n return True\n return len(self.time.to_series().diff().dropna().unique()) == 1", "def delta_time(self):\n delta_time = time.time() - self.time\n if delta_time >= 1.0 / self.target_fps:\n self.time = time.time()\n # end if\n return delta_time", "def _check_bullet_cooldown(self):\n time_now = pygame.time.get_ticks()\n if time_now - self.last_bullet_fired >= self.bullet_cooldown:\n self.shoot_disabled = False", "def _check_goauth_expiration(self, expiry):\n now = int(time.time())\n time_left = int(expiry) - now\n # 10 days\n min_time_left = 60*60*24*10\n if time_left < min_time_left:\n return False\n else:\n return True", "def test_time_supp_length_matches_no_timesteps(self):\n for no_timesteps in [5, 578, 993, 300072]:\n for dt in [0.1, 0.5, 3.0]:\n test_rec = rt.Recording(np.empty([6, no_timesteps, 1]), dt=dt)\n self.assertEqual(\n len(test_rec.time_supp),\n no_timesteps,\n 'Expected length of time_supp {} to match no_timesteps of '\n 'input {}.'.format(len(test_rec.time_supp), no_timesteps),\n )", "def T_elapsed(T_amount: BlockHeight) -> bool:\n T_now = getCurrentBlockHeight()\n return T_now - T_init >= T_amount", "def exposuretime(self):\n _, = self.exposuretimes\n return _", "def delete_expired(self):\n check_time = datetime.now()\n if self.can_expire and self.duration:\n exp_times = deepcopy(self.exp_times)\n for key in exp_times:\n if exp_times[key] < check_time:\n self.delete(key)", "def ComputeTimeReward(self, currentTime, expectedTime):\r\n return (expectedTime - currentTime) * 1 if currentTime < expectedTime else (expectedTime - currentTime) * 1", "def is_limited(self) -> bool:\n return self.__times > ActionState.UNLIMITED", "def is_exploring(self, step):\n return np.random.rand() < self._epsilon(step)", "def test_duration_property(self):\n recording_dt = 0.1\n recording_shape = {\n 'no_timesteps': 1000,\n 'no_sweeps': 10,\n 'no_channels': 4,\n }\n expected_duration = recording_shape['no_timesteps'] * recording_dt\n test_rec = rt.Recording(\n np.zeros(\n [\n recording_shape['no_channels'],\n recording_shape['no_timesteps'],\n recording_shape['no_sweeps'],\n ]\n ),\n dt=recording_dt,\n )\n npt.assert_almost_equal(\n test_rec.duration,\n expected_duration,\n err_msg='Expected {} for `duration` property; got {} instead.'.format(\n expected_duration, test_rec.duration\n ),\n )", "def rate_limiting(cls):\n this_click_time = time.time()\n time_to_last_click = None\n if cls.last_click_time:\n time_to_last_click = this_click_time - cls.last_click_time\n cls.last_click_time = this_click_time\n return time_to_last_click and time_to_last_click < 0.7", "def isDefaultedAt(self, time):\n return time > self.getDefaultTime()", "def test_fields_effort_time_units_dictionary_success(self, _mock_check):\n field = EffortField(time_units={\"minute\": (\"minute\", \"minutes\")})\n\n errors = field.check()\n self.assertEqual(len(errors), 0)", "def excitationPulse(self, time, power):\n t = time * ns + self.step # Should center at one step before 0\n if self.step <= 200 * ps: # resolution warrants modelling the pulse\n width = 200.0 * ps # self.step\n\n if t < width * 10: # Only evaulate when the value is significant\n amp = power / (width * sqrt_2pi) # normalized amplitude\n value = amp * np.exp(-1.0 * (t) * (t) / (2 * width * width))\n value = value\n else:\n value = 0.0\n else: # impulsive limit, just dump all the excitons in at t=0\n # if time >= 0 - self.step/2 and time < 0 + self.step/2:\n if t > -0.5 * self.step and t <= 0.5 * self.step:\n value = power / self.step\n else:\n value = 0.0\n return (value*self.step)", "def metropolis ( delta ):\n\n import numpy as np\n \n exponent_guard = 75.0\n\n if delta > exponent_guard: # Too high, reject without evaluating\n return False\n elif delta < 0.0: # Downhill, accept without evaluating\n return True\n else:\n zeta = np.random.rand() # Uniform random number in range (0,1)\n return np.exp(-delta) > zeta # Metropolis test", "def is_expired(self):\n\n return time.time() * 1000 - self._refreshed_on > self._expire", "def is_expired(self):\n return timeutils.utcnow_ts() > self.expire_ts", "def is_expired(self) -> bool:\n return now() > self.expires", "def io_operation(self, time):\n self._io_time -= time\n if self._io_time <= 0:\n self._io = False\n return True\n return False", "def _is_noise(self, t):\n\t\tfor ts in self.noise_itv:\n\t\t\tif len(ts) == 2:\n\t\t\t\ttime1 = ts[0]\n\t\t\t\ttime2 = ts[1]\n\t\t\t\tif time1 <= t and t <= time2:\n\t\t\t\t\treturn True\n\t\t\telse:\n\t\t\t\tend_time = ts[0]\n\t\t\t\tif t >= end_time:\n\t\t\t\t\treturn True\n\t\treturn False", "def _long_range_normed_exposure_between_bounds(self, time1: float, time2: float) -> _VectorisedFloat:\n exposure = 0.\n for start, stop in self.exposed.presence_interval().boundaries():\n if stop < time1:\n continue\n elif start > time2:\n break\n elif start <= time1 and time2<= stop:\n exposure += self.concentration_model.normed_integrated_concentration(time1, time2)\n elif start <= time1 and stop < time2:\n exposure += self.concentration_model.normed_integrated_concentration(time1, stop)\n elif time1 < start and time2 <= stop:\n exposure += self.concentration_model.normed_integrated_concentration(start, time2)\n elif time1 <= start and stop < time2:\n exposure += self.concentration_model.normed_integrated_concentration(start, stop)\n return exposure", "def _timeout_request(start_time, request_timeout):\n return int(round(time() * 1000)) - start_time >= request_timeout", "def time_until_full(self):\n if ((datetime.datetime.utcnow() - self.initial_time).total_seconds() \n > UPDATE_PERIOD\n and self.space_decay_rate() < 0):\n secs_until_full = self.available_space() / -self.space_decay_rate() \n return datetime.timedelta(seconds=secs_until_full)", "def isFreeTimeExpired(self):\n assert self.notify.debugStateCall(self, 'loginFSM', 'gameFSM')\n if self.accountOldAuth:\n return 0\n\n # check for the config overrides\n # if true, free-time-expired takes precedence over unlimited-free-time\n if base.config.GetBool(\"free-time-expired\", 0):\n return 1\n if base.config.GetBool(\"unlimited-free-time\", 0):\n return 0\n\n # -1 == never expires (paid/exempt)\n if self.freeTimeExpiresAt == -1:\n return 0\n\n # 0 == expired\n if self.freeTimeExpiresAt == 0:\n return 1\n\n if self.freeTimeExpiresAt < -1:\n self.notify.warning('freeTimeExpiresAt is less than -1 (%s)' %\n self.freeTimeExpiresAt)\n\n # freeTimeExpiresAt is an epoch time\n # is it in the past?\n if self.freeTimeExpiresAt < time.time():\n return 1\n else:\n return 0", "def is_expired(self):\n if self.access_token is None:\n logging.debug('Access token not found')\n return True\n else:\n return (self.expiration <= datetime.now())", "def checkGracePeriodDuration(self):\n if (not self.isInGraceInvulnerability):\n return\n if (time.time()-self.gracePeriodStartTime > shipDamagedInvulerabilityGracePeriodLength):\n #if the grace period is over...\n self.disableGracePeriod()", "def _is_percent_of_time(percent_of_time):\n assert 0 <= percent_of_time\n assert percent_of_time <= 100\n random_number = random.uniform(0, 100)\n return random_number <= percent_of_time", "def _safe_limit_check(self):\n if self.rem == 40:\n self.time_start = time.time()\n elif time.time() - self.time_start >= 11:\n self.rem = 40\n self.time_start = time.time()\n elif self.rem <= 0:\n t = 11 - (time.time() - self.time_start)\n\n if t <= 0:\n self.rem = 40\n self.time_start = time.time()\n else:\n if self.policy == Limit.Sleep:\n time.sleep(t)\n elif self.policy == Limit.Ignore:\n return False\n\n self.rem -= 1\n return True", "def close_to_exceeding(self) -> bool:\n mean = self.current / self.num_cuts\n if self.max_frames is not None:\n return self.current + mean > self.max_frames\n if self.max_samples is not None:\n return self.current + mean > self.max_samples\n if self.max_duration is not None:\n return self.current + mean > self.max_duration\n return False", "def expired(self):\n return int(time.time()) > self.expires_at", "def __checkTimer(self):\n if self.__endTime is None:\n raise AssertionError('The end time had not been set.')\n if time.time() > self.__endTime:\n self._logError('Maximum Run Time Reached !!')\n raise _MaxRunTimeReachedError('')", "def tune_exposure_time(camera, target, initial_exptime, min_exptime=0, max_exptime=None,\n max_steps=5, tolerance=0.1, cutout_size=256, bias=None, **kwargs):\n camera.logger.info(f\"Tuning exposure time for {camera}.\")\n\n images_dir = camera.get_config(\"directories.images\", None)\n if images_dir:\n images_dir = os.path.join(images_dir, \"temp\")\n os.makedirs(images_dir, exist_ok=True)\n\n # Parse quantities\n initial_exptime = get_quantity_value(initial_exptime, \"second\") * u.second\n\n if min_exptime is not None:\n min_exptime = get_quantity_value(min_exptime, \"second\") * u.second\n if max_exptime is not None:\n max_exptime = get_quantity_value(max_exptime, \"second\") * u.second\n\n try:\n bit_depth = camera.bit_depth.to_value(\"bit\")\n except NotImplementedError:\n bit_depth = 16\n\n saturated_counts = 2 ** bit_depth\n\n prefix = images_dir if images_dir is None else images_dir + \"/\"\n with tempfile.NamedTemporaryFile(suffix=\".fits\", prefix=prefix, delete=False) as tf:\n\n exptime = initial_exptime\n\n for step in range(max_steps):\n\n # Check if exposure time is within valid range\n if (exptime == max_exptime) or (exptime == min_exptime):\n break\n\n # Get an image\n cutout = camera.get_cutout(exptime, tf.name, cutout_size, keep_file=False, **kwargs)\n cutout = cutout.astype(\"float32\")\n if bias is not None:\n cutout -= bias\n\n # Measure average counts\n normalised_counts = np.median(cutout) / saturated_counts\n\n camera.logger.debug(f\"Normalised counts for {exptime} exposure on {camera}:\"\n f\" {normalised_counts}\")\n\n # Check if tolerance condition is met\n if tolerance:\n if abs(normalised_counts - target) < tolerance:\n break\n\n # Update exposure time\n exptime = exptime * target / normalised_counts\n if max_exptime is not None:\n exptime = min(exptime, max_exptime)\n if min_exptime is not None:\n exptime = max(exptime, min_exptime)\n\n camera.logger.info(f\"Tuned exposure time for {camera}: {exptime}\")\n\n return exptime", "def _check_pulse(self):\n timedelta = time.time() - self.heartbeat\n update_delay = float(1/self.qbpm.frequency)\n time_to_update = False\n if timedelta > update_delay:\n time_to_update = True\n self.heartbeat = time.time()\n return time_to_update", "def testStretchedExpData(nTimes = 1000, beta = 0.5, sigma = 0.05, linear=False):\n\n if linear:\n Times = rangeLin(1.0e-3, 1.0e+3, nTimes) # in seconds\n else:\n Times = rangeLog(1.0e-3, 1.0e+3, nTimes) # in seconds\n\n Data = np.exp(-1.*(Times**beta))\n Data += sigma*np.random.randn( len(Data) )\n\n return Times, Data", "def expired(self) -> bool:\n if not self.use_wts:\n return False\n\n return datetime.now() > self.expire", "def is_over(self, time):\n over = (not self.enable_loop()) and (time >= self.get_duration())\n return over", "def sanity_check(self):\n res = True\n res = res and self.detected\n res = res and np.sum(self.diffs) < 30000 # experimental value\n return res" ]
[ "0.6262144", "0.6255858", "0.6163135", "0.61418873", "0.5915087", "0.5863674", "0.5856211", "0.57416385", "0.55517083", "0.5540617", "0.55379456", "0.5453827", "0.5453151", "0.54436797", "0.5430154", "0.5369838", "0.5325574", "0.5313959", "0.5313157", "0.53118265", "0.5288747", "0.52797335", "0.5260478", "0.5257042", "0.52416027", "0.51850665", "0.5179103", "0.5174016", "0.5171707", "0.51668525", "0.5157539", "0.51347667", "0.51273763", "0.50815827", "0.5079341", "0.5075893", "0.5067881", "0.5054975", "0.50516963", "0.504854", "0.5042227", "0.50301766", "0.5020313", "0.50202614", "0.501119", "0.5011094", "0.5005053", "0.5001828", "0.5001057", "0.49995127", "0.49883458", "0.49882838", "0.4983302", "0.49812782", "0.4975712", "0.49748138", "0.49719593", "0.4971577", "0.4961053", "0.4960821", "0.4955817", "0.4934189", "0.4920372", "0.49197683", "0.49140066", "0.49114564", "0.4909697", "0.4906464", "0.48975718", "0.48962536", "0.4889649", "0.48894712", "0.48854095", "0.48846924", "0.48666143", "0.4862617", "0.48583996", "0.4856517", "0.48520264", "0.48464453", "0.48449537", "0.48434305", "0.48421896", "0.4840959", "0.48378775", "0.48365378", "0.483607", "0.48342377", "0.48322853", "0.48285756", "0.48283836", "0.4827491", "0.48272526", "0.4824871", "0.4823129", "0.48221514", "0.4821971", "0.4820364", "0.48164126", "0.4811015", "0.47907817" ]
0.0
-1